diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..30d45bd --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +vendor/ +**/docker-compose.yml diff --git a/.github/workflows/SetPageFileSize.ps1 b/.github/workflows/SetPageFileSize.ps1 new file mode 100644 index 0000000..8eaf70f --- /dev/null +++ b/.github/workflows/SetPageFileSize.ps1 @@ -0,0 +1,196 @@ +<# +# MIT License (MIT) Copyright (c) 2020 Maxim Lobanov and contributors +# Source: https://github.com/al-cheb/configure-pagefile-action/blob/master/scripts/SetPageFileSize.ps1 +.SYNOPSIS + Configure Pagefile on Windows machine +.NOTES + Author: Aleksandr Chebotov + +.EXAMPLE + SetPageFileSize.ps1 -MinimumSize 4GB -MaximumSize 8GB -DiskRoot "D:" +#> + +param( + [System.UInt64] $MinimumSize = 16gb , + [System.UInt64] $MaximumSize = 16gb , + [System.String] $DiskRoot = "D:" +) + +# https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/NativeMethods.cs,619688d876febbe1 +# https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/mm/modwrite/create.htm +# https://referencesource.microsoft.com/#mscorlib/microsoft/win32/safehandles/safefilehandle.cs,9b08210f3be75520 +# https://referencesource.microsoft.com/#mscorlib/system/security/principal/tokenaccesslevels.cs,6eda91f498a38586 +# https://www.autoitscript.com/forum/topic/117993-api-ntcreatepagingfile/ + +$source = @' +using System; +using System.ComponentModel; +using System.Diagnostics; +using System.Runtime.InteropServices; +using System.Security.Principal; +using System.Text; +using Microsoft.Win32; +using Microsoft.Win32.SafeHandles; + +namespace Util +{ + class NativeMethods + { + [StructLayout(LayoutKind.Sequential)] + internal struct LUID + { + internal uint LowPart; + internal uint HighPart; + } + + [StructLayout(LayoutKind.Sequential)] + internal struct LUID_AND_ATTRIBUTES + { + internal LUID Luid; + internal uint Attributes; + } + + [StructLayout(LayoutKind.Sequential)] + internal struct TOKEN_PRIVILEGE + { + internal uint PrivilegeCount; + internal LUID_AND_ATTRIBUTES Privilege; + + internal static readonly uint Size = (uint)Marshal.SizeOf(typeof(TOKEN_PRIVILEGE)); + } + + [StructLayoutAttribute(LayoutKind.Sequential, CharSet = CharSet.Unicode)] + internal struct UNICODE_STRING + { + internal UInt16 length; + internal UInt16 maximumLength; + internal string buffer; + } + + [DllImport("kernel32.dll", SetLastError=true)] + internal static extern IntPtr LocalFree(IntPtr handle); + + [DllImport("advapi32.dll", ExactSpelling = true, CharSet = CharSet.Unicode, SetLastError = true, PreserveSig = false)] + internal static extern bool LookupPrivilegeValueW( + [In] string lpSystemName, + [In] string lpName, + [Out] out LUID luid + ); + + [DllImport("advapi32.dll", SetLastError = true, PreserveSig = false)] + internal static extern bool AdjustTokenPrivileges( + [In] SafeCloseHandle tokenHandle, + [In] bool disableAllPrivileges, + [In] ref TOKEN_PRIVILEGE newState, + [In] uint bufferLength, + [Out] out TOKEN_PRIVILEGE previousState, + [Out] out uint returnLength + ); + + [DllImport("advapi32.dll", CharSet = CharSet.Auto, SetLastError = true, PreserveSig = false)] + internal static extern bool OpenProcessToken( + [In] IntPtr processToken, + [In] int desiredAccess, + [Out] out SafeCloseHandle tokenHandle + ); + + [DllImport("ntdll.dll", CharSet = CharSet.Unicode, SetLastError = true, CallingConvention = CallingConvention.StdCall)] + internal static extern Int32 NtCreatePagingFile( + [In] ref UNICODE_STRING pageFileName, + [In] ref Int64 minimumSize, + [In] ref Int64 maximumSize, + [In] UInt32 flags + ); + + [DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + internal static extern uint QueryDosDeviceW( + string lpDeviceName, + StringBuilder lpTargetPath, + int ucchMax + ); + } + + public sealed class SafeCloseHandle: SafeHandleZeroOrMinusOneIsInvalid + { + [DllImport("kernel32.dll", ExactSpelling = true, SetLastError = true)] + internal extern static bool CloseHandle(IntPtr handle); + + private SafeCloseHandle() : base(true) + { + } + + public SafeCloseHandle(IntPtr preexistingHandle, bool ownsHandle) : base(ownsHandle) + { + SetHandle(preexistingHandle); + } + + override protected bool ReleaseHandle() + { + return CloseHandle(handle); + } + } + + public class PageFile + { + public static void SetPageFileSize(long minimumValue, long maximumValue, string lpDeviceName) + { + SetPageFilePrivilege(); + StringBuilder lpTargetPath = new StringBuilder(260); + + UInt32 resultQueryDosDevice = NativeMethods.QueryDosDeviceW(lpDeviceName, lpTargetPath, lpTargetPath.Capacity); + if (resultQueryDosDevice == 0) + { + throw new Win32Exception(Marshal.GetLastWin32Error()); + } + + string pageFilePath = lpTargetPath.ToString() + "\\pagefile.sys"; + + NativeMethods.UNICODE_STRING pageFileName = new NativeMethods.UNICODE_STRING + { + length = (ushort)(pageFilePath.Length * 2), + maximumLength = (ushort)(2 * (pageFilePath.Length + 1)), + buffer = pageFilePath + }; + + Int32 resultNtCreatePagingFile = NativeMethods.NtCreatePagingFile(ref pageFileName, ref minimumValue, ref maximumValue, 0); + if (resultNtCreatePagingFile != 0) + { + throw new Win32Exception(Marshal.GetLastWin32Error()); + } + + Console.WriteLine("PageFile: {0} / {1} bytes for {2}", minimumValue, maximumValue, pageFilePath); + } + + static void SetPageFilePrivilege() + { + const int SE_PRIVILEGE_ENABLED = 0x00000002; + const int AdjustPrivileges = 0x00000020; + const int Query = 0x00000008; + + NativeMethods.LUID luid; + NativeMethods.LookupPrivilegeValueW(null, "SeCreatePagefilePrivilege", out luid); + + SafeCloseHandle hToken; + NativeMethods.OpenProcessToken( + Process.GetCurrentProcess().Handle, + AdjustPrivileges | Query, + out hToken + ); + + NativeMethods.TOKEN_PRIVILEGE previousState; + NativeMethods.TOKEN_PRIVILEGE newState; + uint previousSize = 0; + newState.PrivilegeCount = 1; + newState.Privilege.Luid = luid; + newState.Privilege.Attributes = SE_PRIVILEGE_ENABLED; + + NativeMethods.AdjustTokenPrivileges(hToken, false, ref newState, NativeMethods.TOKEN_PRIVILEGE.Size, out previousState, out previousSize); + } + } +} +'@ + +Add-Type -TypeDefinition $source + +# Set SetPageFileSize +[Util.PageFile]::SetPageFileSize($minimumSize, $maximumSize, $diskRoot) \ No newline at end of file diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml new file mode 100644 index 0000000..d928a7a --- /dev/null +++ b/.github/workflows/deploy.yaml @@ -0,0 +1,109 @@ +name: Build and upload assets +on: + release: + types: [ published ] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: ubuntu-latest + TARGET: linux/x86_64 + - os: ubuntu-latest + TARGET: linux/aarch64 + - os: ubuntu-latest + TARGET: windows/x64 + - os: macos-latest + TARGET: macos/x64 + name: Building, ${{ matrix.TARGET }} + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: 1.21 + + - name: Update sources + if: matrix.TARGET == 'linux/aarch64' || matrix.TARGET == 'windows/x64' + run: sudo apt-get update -y + + - name: Install compilers + if: matrix.TARGET == 'linux/aarch64' || matrix.TARGET == 'windows/x64' + run: sudo apt-get install gcc-aarch64-linux-gnu gcc-mingw-w64-x86-64-win32 -y + + - name: Build on Linux for ${{ matrix.TARGET }} + if: matrix.TARGET == 'linux/x86_64' + run: | + + # `-extldflags=-static` - means static link everything, + # `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net" + # `-s -w` strips the binary to produce smaller size binaries + go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/... + archive="bin/spectred-${{ github.event.release.tag_name }}-linux-x86_64.zip" + asset_name="spectred-${{ github.event.release.tag_name }}-linux-x86_64.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + echo "asset_name=${asset_name}" >> $GITHUB_ENV + + - name: Build on Linux for ${{ matrix.TARGET }} + if: matrix.TARGET == 'linux/aarch64' + env: + CGO_ENABLED: 1 + CC: aarch64-linux-gnu-gcc + GOOS: linux + GOARCH: arm64 + run: | + + # `-extldflags=-static` - means static link everything, + # `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net" + # `-s -w` strips the binary to produce smaller size binaries + go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/... + archive="bin/spectred-${{ github.event.release.tag_name }}-linux-aarch64.zip" + asset_name="spectred-${{ github.event.release.tag_name }}-linux-aarch64.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + echo "asset_name=${asset_name}" >> $GITHUB_ENV + + - name: Build on Linux for ${{ matrix.TARGET }} + if: matrix.TARGET == 'windows/x64' + env: + CGO_ENABLED: 1 + CC: x86_64-w64-mingw32-gcc + GOOS: windows + GOARCH: amd64 + run: | + + # `-extldflags=-static` - means static link everything, + # `-tags netgo,osusergo` means use pure go replacements for "os/user" and "net" + # `-s -w` strips the binary to produce smaller size binaries + go build -v -ldflags="-s -w -extldflags=-static" -tags netgo,osusergo -o ./bin/ . ./cmd/... + archive="bin/spectred-${{ github.event.release.tag_name }}-windows-x64.zip" + asset_name="spectred-${{ github.event.release.tag_name }}-windows-x64.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + echo "asset_name=${asset_name}" >> $GITHUB_ENV + + - name: Build on Linux for ${{ matrix.TARGET }} + if: matrix.TARGET == 'macos/x64' + run: | + go build -v -ldflags="-s -w" -o ./bin/ . ./cmd/... + archive="bin/spectred-${{ github.event.release.tag_name }}-macos-x64.zip" + asset_name="spectred-${{ github.event.release.tag_name }}-macos-x64.zip" + zip -r "${archive}" ./bin/* + echo "archive=${archive}" >> $GITHUB_ENV + echo "asset_name=${asset_name}" >> $GITHUB_ENV + + - name: Upload release asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ github.event.release.upload_url }} + asset_path: "./${{ env.archive }}" + asset_name: "${{ env.asset_name }}" + asset_content_type: application/zip diff --git a/.github/workflows/race.yaml b/.github/workflows/race.yaml new file mode 100644 index 0000000..f07a380 --- /dev/null +++ b/.github/workflows/race.yaml @@ -0,0 +1,48 @@ +name: Race + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +jobs: + race_test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + branch: [ stable, latest ] + name: Race detection on ${{ matrix.branch }} + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: 1.21 + + - name: Set scheduled branch name + shell: bash + if: github.event_name == 'schedule' + run: | + if [ "${{ matrix.branch }}" == "stable" ]; then + echo "run_on=master" >> $GITHUB_ENV + fi + if [ "${{ matrix.branch }}" == "latest" ]; then + echo "run_on=dev" >> $GITHUB_ENV + fi + + - name: Set manual branch name + shell: bash + if: github.event_name == 'workflow_dispatch' + run: echo "run_on=${{ github.ref }}" >> $GITHUB_ENV + + - name: Test with race detector + shell: bash + run: | + git checkout "${{ env.run_on }}" + git status + go test -timeout 20m -race ./... diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml new file mode 100644 index 0000000..af81f5c --- /dev/null +++ b/.github/workflows/tests.yaml @@ -0,0 +1,68 @@ +name: Tests +on: + push: + pull_request: + + # edtited - because base branch can be modified + # synchronize - update commits on PR + types: [opened, synchronize, edited] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, windows-latest, macos-latest ] + name: Tests, ${{ matrix.os }} + steps: + - name: Fix CRLF on Windows + if: runner.os == 'Windows' + run: git config --global core.autocrlf false + + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + + # Increase the pagefile size on Windows to aviod running out of memory + - name: Increase pagefile size on Windows + if: runner.os == 'Windows' + run: powershell -command .github\workflows\SetPageFileSize.ps1 + + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: 1.21 + + # Source: https://github.com/actions/cache/blob/main/examples.md#go---modules + - name: Go Cache + uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Test + shell: bash + run: ./build_and_test.sh + + stability-test-fast: + runs-on: ubuntu-latest + name: Fast stability tests + steps: + - name: Setup Go + uses: actions/setup-go@v4 + with: + go-version: 1.21 + + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install spectred + run: go install ./... + + - name: Run fast stability tests + working-directory: stability-tests + run: ./install_and_test.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5ff50d5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,62 @@ +# Temp files +*~ + +# Databases +spectred.db +*-shm +*-wal + +# Log files +*.log + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Real binaries, build with `go build .` +spectred +cmd/gencerts/gencerts +cmd/spectrectl/spectrectl +cmd/spectreminer/spectreminer +*.exe +*.exe~ + +# Output of the go coverage tool +*.out + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + + +# IDE +.idea +.vscode +debug +debug.test +__debug_bin + +# CI +version.txt +coverage.txt + +testdbs/ +coverage.tmp diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..9d3b003 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,71 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our +project and our community a harassment-free experience for everyone, +regardless of age, body size, disability, ethnicity, gender identity +and expression, level of experience, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive +environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual + attention or advances +* Trolling, insulting/derogatory comments, and personal or political + attacks +* Public or private harassment +* Publishing others' private information, such as a physical or + electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in + a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of +acceptable behavior and are expected to take appropriate and fair +corrective action in response to any instances of unacceptable +behavior. + +Project maintainers have the right and responsibility to remove, edit, +or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Code of Conduct, or to ban +temporarily or permanently any contributor for other behaviors that +they deem inappropriate, threatening, offensive, or harmful. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by contacting the project maintainers on this +[Google form][gform]. The project maintainers will review and +investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project maintainers are obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted +separately. + +Project maintainers who do not follow or enforce the Code of Conduct in +good faith may face temporary or permanent repercussions as determined +by other members of the project. + +## Attribution + +This Code of Conduct is adapted from the +[Contributor Covenant][homepage], version 1.4, available at +[http://contributor-covenant.org/version/1/4][version] + +[gform]: https://forms.gle/rfRZWxP6wv2VsLAH8 +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..e0ea043 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,21 @@ +# Contributing to Spectred + +Any contribution to Spectred is very welcome. + +## Getting started + +If you want to start contributing to Spectred and don't know where to +start, you can pick an issue from the [list](https://github.com/spectre-project/spectred/issues). + +If you want to make a big change it's better to discuss it first by +opening an issue or talk about it in development [Discord](https://discord.spectre-network.org/) +to avoid duplicate work. + +## Pull Request process + +Any pull request should be opened against the development branch +`dev`. + +All pull requests should pass the checks written in +`build_and_test.sh`, so it's recommended to run this script before +submitting your PR. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..796a87a --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +ISC License + +Copyright (c) 2024-2024 The Spectre developers +Copyright (c) 2018-2019 The kaspanet developers +Copyright (c) 2013-2018 The btcsuite developers +Copyright (c) 2015-2016 The Decred developers +Copyright (c) 2013-2014 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..24a74ce --- /dev/null +++ b/README.md @@ -0,0 +1,129 @@ +# Spectred + +[![Build Status](https://github.com/spectre-project/spectred/actions/workflows/tests.yaml/badge.svg)](https://github.com/spectre-project/spectred/actions/workflows/tests.yaml) +[![GitHub release](https://img.shields.io/github/v/release/spectre-project/spectred.svg)](https://github.com/spectre-project/spectred/releases) +[![GitHub license](https://img.shields.io/github/license/spectre-project/spectred.svg)](https://github.com/spectre-project/spectred/blob/main/LICENSE) +[![GitHub downloads](https://img.shields.io/github/downloads/spectre-project/spectred/total.svg)](https://github.com/spectre-project/spectred/releases) + +Spectred is the reference full node Spectre implementation written in +Go (golang). It is a [DAG](https://en.wikipedia.org/wiki/Directed_acyclic_graph) +as a proof-of-work cryptocurrency with instant confirmations and +sub-second block times. It is based on [the PHANTOM protocol](https://eprint.iacr.org/2018/104.pdf), a +generalization of Nakamoto consensus. + +## Overview + +Spectre is a fork of [Kaspa](https://github.com/kaspanet/kaspad) +introducing CPU-only mining algorithm [SpectreX](https://github.com/spectre-project/go-spectrex). + +SpectreX is based on [AstroBWTv3](https://github.com/deroproject/derohe/tree/main/astrobwt/astrobwtv3) +and proof-of-work calculation is done in the following steps: + +* Step 1: SHA-3 +* Step 2: AstroBWTv3 +* Step 3: HeavyHash + +Spectre will add full non-disclosable privacy and anonymous +transactions in future implemented with the GhostFACE protocol +build by a team of anonymous crypto algorithm researchers and +engineers. Simple and plain goal: + +* PHANTOM Protocol + GhostDAG + GhostFACE = Spectre + +Spectre will become a ghostchain; nothing more, nothing less. Design +decisions have been made already and more details about the GhostFACE +protocol will be released at a later stage. Sneak peak: It will use +[Pedersen Commitments](https://github.com/threehook/go-pedersen-commitment) +as it allows perfect integration with the Spectre UTXO model and +allows perfect hiding. ElGamal will be used for TX signature signing +as it has a superior TPS (transactions per second) performance. Any PRs +are welcome and can be made with anonymous accounts. No pre-mine, no +shit, pure privacy is a hit! + +## Comparison + +Why another fork? Kaspa is great but we love privacy, Monero and DERO +are great but we love speed! So lets join the cool things from both. +We decided to take Kaspa as codebase, quick comparison: + +Feature | Spectre | Kaspa | Monero | DERO +-----------------------------|----------|------------|---------|----------- +PoW Algorithm | SpectreX | kHeavyHash | RandomX | AstroBWTv3 +Balance Encryption | Future | No | Yes | Yes +Transaction Encryption | Future | No | Yes | Yes +Message Encyrption | Future | No | No | Yes +Untraceable Transactions | Future | No | Yes | Yes +Untraceable Mining | Yes | No | No | Yes +Built-in multicore CPU-miner | Yes | No | Yes | Yes +High BPS | Yes | Yes | No | No +High TPS | Yes | Yes | No | No + +Untraceable Mining is already achieved with AstroBWTv3 and a multicore +miner is already being shipped with Spectre, working on ARM/x86. There +is already a proof-of-concept Rust [AstroBWT](https://github.com/Slixe/astrobwt) +implementation currently under review and investigation to merge it +into Spectre Rust codebase. We leave it up to the community to build +an highly optimized CPU-miner. + +## Mathematics + +We love numbers, you will find a lot of mathematical constants in the +source code, in the genesis hash, genesis paylod, genesis merkle hash +and more. Mathematical constants like [Pi](https://en.wikipedia.org/wiki/Pi), +[E](https://en.wikipedia.org/wiki/E_(mathematical_constant)) and +several prime numbers used as starting values for nonce or difficulty. +The first released version is `0.3.14`, the famous Pi divided by 10. + +## Installation + +### Install from Binaries + +Pre-compiled binaries for Linux `x86_64`, Windows `x64` and macOS `x64` +as universal binary can be downloaded at: [https://github.com/spectre-project/spectred/releases](https://github.com/spectre-project/spectred/releases) + +### Build from Source + +Go 1.19 or later is required. Install Go according to the installation +instructions at [http://golang.org/doc/install](http://golang.org/doc/install). +Ensure Go was installed properly and is a supported version: + +```bash +go version +``` + +Run the following commands to obtain and install spectred including +all dependencies: + +```bash +git clone https://github.com/spectre-project/spectred +cd spectred +go install . ./cmd/... +``` + +Spectred (and utilities) should now be installed in +`$(go env GOPATH)/bin`. If you did not already add the `bin` directory +to your system path during Go installation, you are encouraged to do +so now. + +### Getting Started + +Spectred has several configuration options available to tweak how it +runs, but all of the basic operations work with zero configuration. + +```bash +spectred +``` + +## Discord + +Join our [Discord](https://discord.spectre-network.org/) server and +discuss with us. Don't forget: We love privacy! + +## Issue Tracker + +The [integrated github issue tracker](https://github.com/spectre-project/spectred/issues) +is used for this project. + +## License + +Spectred is licensed under the copyfree [ISC License](https://choosealicense.com/licenses/isc/). diff --git a/app/app.go b/app/app.go new file mode 100644 index 0000000..5e91ffe --- /dev/null +++ b/app/app.go @@ -0,0 +1,189 @@ +package app + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/os/execenv" + "github.com/spectre-project/spectred/infrastructure/os/limits" + "github.com/spectre-project/spectred/infrastructure/os/signal" + "github.com/spectre-project/spectred/infrastructure/os/winservice" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" + "github.com/spectre-project/spectred/version" +) + +const ( + leveldbCacheSizeMiB = 256 + defaultDataDirname = "datadir2" +) + +var desiredLimits = &limits.DesiredLimits{ + FileLimitWant: 2048, + FileLimitMin: 1024, +} + +var serviceDescription = &winservice.ServiceDescription{ + Name: "spectredsvc", + DisplayName: "Spectred Service", + Description: "Downloads and stays synchronized with the Spectre blockDAG and " + + "provides DAG services to applications.", +} + +type spectredApp struct { + cfg *config.Config +} + +// StartApp starts the spectred app, and blocks until it finishes running +func StartApp() error { + execenv.Initialize(desiredLimits) + + // Load configuration and parse command line. This function also + // initializes logging and configures it accordingly. + cfg, err := config.LoadConfig() + if err != nil { + fmt.Fprintln(os.Stderr, err) + return err + } + defer logger.BackendLog.Close() + defer panics.HandlePanic(log, "MAIN", nil) + + app := &spectredApp{cfg: cfg} + + // Call serviceMain on Windows to handle running as a service. When + // the return isService flag is true, exit now since we ran as a + // service. Otherwise, just fall through to normal operation. + if runtime.GOOS == "windows" { + isService, err := winservice.WinServiceMain(app.main, serviceDescription, cfg) + if err != nil { + return err + } + if isService { + return nil + } + } + + return app.main(nil) +} + +func (app *spectredApp) main(startedChan chan<- struct{}) error { + // Get a channel that will be closed when a shutdown signal has been + // triggered either from an OS signal such as SIGINT (Ctrl+C) or from + // another subsystem such as the RPC server. + interrupt := signal.InterruptListener() + defer log.Info("Shutdown complete") + + // Show version at startup. + log.Infof("Version %s", version.Version()) + + // Enable http profiling server if requested. + if app.cfg.Profile != "" { + profiling.Start(app.cfg.Profile, log) + } + profiling.TrackHeap(app.cfg.AppDir, log) + + // Return now if an interrupt signal was triggered. + if signal.InterruptRequested(interrupt) { + return nil + } + + if app.cfg.ResetDatabase { + err := removeDatabase(app.cfg) + if err != nil { + log.Error(err) + return err + } + } + + // Open the database + databaseContext, err := openDB(app.cfg) + if err != nil { + log.Errorf("Loading database failed: %+v", err) + return err + } + + defer func() { + log.Infof("Gracefully shutting down the database...") + err := databaseContext.Close() + if err != nil { + log.Errorf("Failed to close the database: %s", err) + } + }() + + // Return now if an interrupt signal was triggered. + if signal.InterruptRequested(interrupt) { + return nil + } + + // Create componentManager and start it. + componentManager, err := NewComponentManager(app.cfg, databaseContext, interrupt) + if err != nil { + log.Errorf("Unable to start spectred: %+v", err) + return err + } + + defer func() { + log.Infof("Gracefully shutting down spectred...") + + shutdownDone := make(chan struct{}) + go func() { + componentManager.Stop() + shutdownDone <- struct{}{} + }() + + const shutdownTimeout = 2 * time.Minute + + select { + case <-shutdownDone: + case <-time.After(shutdownTimeout): + log.Criticalf("Graceful shutdown timed out %s. Terminating...", shutdownTimeout) + } + log.Infof("Spectred shutdown complete") + }() + + componentManager.Start() + + if startedChan != nil { + startedChan <- struct{}{} + } + + // Wait until the interrupt signal is received from an OS signal or + // shutdown is requested through one of the subsystems such as the RPC + // server. + <-interrupt + return nil +} + +// dbPath returns the path to the block database given a database type. +func databasePath(cfg *config.Config) string { + return filepath.Join(cfg.AppDir, defaultDataDirname) +} + +func removeDatabase(cfg *config.Config) error { + dbPath := databasePath(cfg) + return os.RemoveAll(dbPath) +} + +func openDB(cfg *config.Config) (database.Database, error) { + dbPath := databasePath(cfg) + + err := checkDatabaseVersion(dbPath) + if err != nil { + return nil, err + } + + log.Infof("Loading database from '%s'", dbPath) + db, err := ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB) + if err != nil { + return nil, err + } + + return db, nil +} diff --git a/app/appmessage/README.md b/app/appmessage/README.md new file mode 100644 index 0000000..aa42f0c --- /dev/null +++ b/app/appmessage/README.md @@ -0,0 +1,73 @@ +# wire + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/spectre-project/spectred/wire) + +Package wire implements the spectre wire protocol. + +## Spectre Message Overview + +The spectre protocol consists of exchanging messages between peers. +Each message is preceded by a header which identifies information +about it such as which spectre network it is a part of, its type, how +big it is, and a checksum to verify validity. All encoding and +decoding of message headers is handled by this package. + +To accomplish this, there is a generic interface for spectre messages +named `Message` which allows messages of any type to be read, written, +or passed around through channels, functions, etc. In addition, +concrete implementations of most all spectre messages are provided. +All of the details of marshalling and unmarshalling to and from the +wire using spectre encoding are handled so the caller doesn't have +to concern themselves with the specifics. + +## Reading Messages Example + +In order to unmarshal spectre messages from the wire, use the +`ReadMessage` function. It accepts any `io.Reader`, but typically +this will be a `net.Conn` to a remote node running a spectre peer. +Example syntax is: + +```Go +// Use the most recent protocol version supported by the package and the +// main spectre network. +pver := wire.ProtocolVersion +spectrenet := wire.Mainnet + +// Reads and validates the next spectre message from conn using the +// protocol version pver and the spectre network spectrenet. The returns +// are a appmessage.Message, a []byte which contains the unmarshalled +// raw payload, and a possible error. +msg, rawPayload, err := wire.ReadMessage(conn, pver, spectrenet) +if err != nil { + // Log and handle the error +} +``` + +See the package documentation for details on determining the message +type. + +## Writing Messages Example + +In order to marshal spectre messages to the wire, use the +`WriteMessage` function. It accepts any `io.Writer`, but typically +this will be a `net.Conn` to a remote node running a spectre peer. +Example syntax to request addresses from a remote peer is: + +```Go +// Use the most recent protocol version supported by the package and the +// main bitcoin network. +pver := wire.ProtocolVersion +spectrenet := wire.Mainnet + +// Create a new getaddr spectre message. +msg := wire.NewMsgGetAddr() + +// Writes a spectre message msg to conn using the protocol version +// pver, and the spectre network spectrenet. The return is a possible +// error. +err := wire.WriteMessage(conn, msg, pver, spectrenet) +if err != nil { + // Log and handle the error +} +``` diff --git a/app/appmessage/base_message.go b/app/appmessage/base_message.go new file mode 100644 index 0000000..bb2d425 --- /dev/null +++ b/app/appmessage/base_message.go @@ -0,0 +1,24 @@ +package appmessage + +import "time" + +type baseMessage struct { + messageNumber uint64 + receivedAt time.Time +} + +func (b *baseMessage) MessageNumber() uint64 { + return b.messageNumber +} + +func (b *baseMessage) SetMessageNumber(messageNumber uint64) { + b.messageNumber = messageNumber +} + +func (b *baseMessage) ReceivedAt() time.Time { + return b.receivedAt +} + +func (b *baseMessage) SetReceivedAt(receivedAt time.Time) { + b.receivedAt = receivedAt +} diff --git a/app/appmessage/common.go b/app/appmessage/common.go new file mode 100644 index 0000000..e11f14c --- /dev/null +++ b/app/appmessage/common.go @@ -0,0 +1,20 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/pkg/errors" +) + +// MaxInvPerMsg is the maximum number of inventory vectors that can be in any type of spectre inv message. +const MaxInvPerMsg = 1 << 17 + +// errNonCanonicalVarInt is the common format string used for non-canonically +// encoded variable length integer errors. +var errNonCanonicalVarInt = "non-canonical varint %x - discriminant %x must " + + "encode a value greater than %x" + +// errNoEncodingForType signifies that there's no encoding for the given type. +var errNoEncodingForType = errors.New("there's no encoding for this type") diff --git a/app/appmessage/common_test.go b/app/appmessage/common_test.go new file mode 100644 index 0000000..22aeb81 --- /dev/null +++ b/app/appmessage/common_test.go @@ -0,0 +1,44 @@ +package appmessage + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// mainnetGenesisHash is the hash of the first block in the block DAG for the +// main network (genesis block). +var mainnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, + 0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b, + 0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1, + 0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63, +}) + +// simnetGenesisHash is the hash of the first block in the block DAG for the +// simulation test network. +var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x9d, 0x89, 0xb0, 0x6e, 0xb3, 0x47, 0xb5, 0x6e, + 0xcd, 0x6c, 0x63, 0x99, 0x45, 0x91, 0xd5, 0xce, + 0x9b, 0x43, 0x05, 0xc1, 0xa5, 0x5e, 0x2a, 0xda, + 0x90, 0x4c, 0xf0, 0x6c, 0x4d, 0x5f, 0xd3, 0x62, +}) + +// mainnetGenesisMerkleRoot is the hash of the first transaction in the genesis +// block for the main network. +var mainnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, + 0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f, + 0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a, + 0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b, +}) + +var exampleAcceptedIDMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, + 0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87, + 0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, + 0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F, +}) + +var exampleUTXOCommitment = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, + 0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87, + 0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, + 0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F, +}) diff --git a/app/appmessage/doc.go b/app/appmessage/doc.go new file mode 100644 index 0000000..56d17c2 --- /dev/null +++ b/app/appmessage/doc.go @@ -0,0 +1,133 @@ +/* +Package appmessage implements the spectre appmessage protocol. + +At a high level, this package provides support for marshalling and unmarshalling +supported spectre messages to and from the appmessage. This package does not deal +with the specifics of message handling such as what to do when a message is +received. This provides the caller with a high level of flexibility. + +# Spectre Message Overview + +The spectre protocol consists of exchanging messages between peers. Each +message is preceded by a header which identifies information about it such as +which spectre network it is a part of, its type, how big it is, and a checksum +to verify validity. All encoding and decoding of message headers is handled by +this package. + +To accomplish this, there is a generic interface for spectre messages named +Message which allows messages of any type to be read, written, or passed around +through channels, functions, etc. In addition, concrete implementations of most +of the currently supported spectre messages are provided. For these supported +messages, all of the details of marshalling and unmarshalling to and from the +appmessage using spectre encoding are handled so the caller doesn't have to concern +themselves with the specifics. + +# Message Interaction + +The following provides a quick summary of how the spectre messages are intended +to interact with one another. As stated above, these interactions are not +directly handled by this package. + +The initial handshake consists of two peers sending each other a version message +(MsgVersion) followed by responding with a verack message (MsgVerAck). Both +peers use the information in the version message (MsgVersion) to negotiate +things such as protocol version and supported services with each other. Once +the initial handshake is complete, the following chart indicates message +interactions in no particular order. + + Peer A Sends Peer B Responds + ---------------------------------------------------------------------------- + getaddr message (MsgRequestAddresses) addr message (MsgAddresses) + getblockinvs message (MsgGetBlockInvs) inv message (MsgInv) + inv message (MsgInv) getdata message (MsgGetData) + getdata message (MsgGetData) block message (MsgBlock) -or- + tx message (MsgTx) -or- + notfound message (MsgNotFound) + ping message (MsgPing) pong message (MsgPong) + +# Common Parameters + +There are several common parameters that arise when using this package to read +and write spectre messages. The following sections provide a quick overview of +these parameters so the next sections can build on them. + +# Protocol Version + +The protocol version should be negotiated with the remote peer at a higher +level than this package via the version (MsgVersion) message exchange, however, +this package provides the appmessage.ProtocolVersion constant which indicates the +latest protocol version this package supports and is typically the value to use +for all outbound connections before a potentially lower protocol version is +negotiated. + +# Spectre Network + +The spectre network is a magic number which is used to identify the start of a +message and which spectre network the message applies to. This package provides +the following constants: + + appmessage.Mainnet + appmessage.Testnet (Test network) + appmessage.Simnet (Simulation test network) + appmessage.Devnet (Development network) + +# Determining Message Type + +As discussed in the spectre message overview section, this package reads +and writes spectre messages using a generic interface named Message. In +order to determine the actual concrete type of the message, use a type +switch or type assertion. An example of a type switch follows: + + // Assumes msg is already a valid concrete message such as one created + // via NewMsgVersion or read via ReadMessage. + switch msg := msg.(type) { + case *appmessage.MsgVersion: + // The message is a pointer to a MsgVersion struct. + fmt.Printf("Protocol version: %d", msg.ProtocolVersion) + case *appmessage.MsgBlock: + // The message is a pointer to a MsgBlock struct. + fmt.Printf("Number of tx in block: %d", msg.Header.TxnCount) + } + +# Reading Messages + +In order to unmarshall spectre messages from the appmessage, use the ReadMessage +function. It accepts any io.Reader, but typically this will be a net.Conn to +a remote node running a spectre peer. Example syntax is: + + // Reads and validates the next spectre message from conn using the + // protocol version pver and the spectre network spectreNet. The returns + // are a appmessage.Message, a []byte which contains the unmarshalled + // raw payload, and a possible error. + msg, rawPayload, err := appmessage.ReadMessage(conn, pver, spectreNet) + if err != nil { + // Log and handle the error + } + +# Writing Messages + +In order to marshall spectre messages to the appmessage, use the WriteMessage +function. It accepts any io.Writer, but typically this will be a net.Conn to +a remote node running a spectre peer. Example syntax to request addresses +from a remote peer is: + + // Create a new getaddr spectre message. + msg := appmessage.NewMsgRequestAddresses() + + // Writes a spectre message msg to conn using the protocol version + // pver, and the spectre network spectreNet. The return is a possible + // error. + err := appmessage.WriteMessage(conn, msg, pver, spectreNet) + if err != nil { + // Log and handle the error + } + +# Errors + +Errors returned by this package are either the raw errors provided by underlying +calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and +io.ErrShortWrite, or of type appmessage.MessageError. This allows the caller to +differentiate between general IO errors and malformed messages through type +assertions. +*/ +package appmessage diff --git a/app/appmessage/domainconverters.go b/app/appmessage/domainconverters.go new file mode 100644 index 0000000..b16af97 --- /dev/null +++ b/app/appmessage/domainconverters.go @@ -0,0 +1,602 @@ +package appmessage + +import ( + "encoding/hex" + "math/big" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + "github.com/spectre-project/spectred/util/mstime" +) + +// DomainBlockToMsgBlock converts an externalapi.DomainBlock to MsgBlock +func DomainBlockToMsgBlock(domainBlock *externalapi.DomainBlock) *MsgBlock { + msgTxs := make([]*MsgTx, 0, len(domainBlock.Transactions)) + for _, domainTransaction := range domainBlock.Transactions { + msgTxs = append(msgTxs, DomainTransactionToMsgTx(domainTransaction)) + } + return &MsgBlock{ + Header: *DomainBlockHeaderToBlockHeader(domainBlock.Header), + Transactions: msgTxs, + } +} + +// DomainBlockHeaderToBlockHeader converts an externalapi.BlockHeader to MsgBlockHeader +func DomainBlockHeaderToBlockHeader(domainBlockHeader externalapi.BlockHeader) *MsgBlockHeader { + return &MsgBlockHeader{ + Version: domainBlockHeader.Version(), + Parents: domainBlockHeader.Parents(), + HashMerkleRoot: domainBlockHeader.HashMerkleRoot(), + AcceptedIDMerkleRoot: domainBlockHeader.AcceptedIDMerkleRoot(), + UTXOCommitment: domainBlockHeader.UTXOCommitment(), + Timestamp: mstime.UnixMilliseconds(domainBlockHeader.TimeInMilliseconds()), + Bits: domainBlockHeader.Bits(), + Nonce: domainBlockHeader.Nonce(), + BlueScore: domainBlockHeader.BlueScore(), + DAAScore: domainBlockHeader.DAAScore(), + BlueWork: domainBlockHeader.BlueWork(), + PruningPoint: domainBlockHeader.PruningPoint(), + } +} + +// MsgBlockToDomainBlock converts a MsgBlock to externalapi.DomainBlock +func MsgBlockToDomainBlock(msgBlock *MsgBlock) *externalapi.DomainBlock { + transactions := make([]*externalapi.DomainTransaction, 0, len(msgBlock.Transactions)) + for _, msgTx := range msgBlock.Transactions { + transactions = append(transactions, MsgTxToDomainTransaction(msgTx)) + } + + return &externalapi.DomainBlock{ + Header: BlockHeaderToDomainBlockHeader(&msgBlock.Header), + Transactions: transactions, + } +} + +// BlockHeaderToDomainBlockHeader converts a MsgBlockHeader to externalapi.BlockHeader +func BlockHeaderToDomainBlockHeader(blockHeader *MsgBlockHeader) externalapi.BlockHeader { + return blockheader.NewImmutableBlockHeader( + blockHeader.Version, + blockHeader.Parents, + blockHeader.HashMerkleRoot, + blockHeader.AcceptedIDMerkleRoot, + blockHeader.UTXOCommitment, + blockHeader.Timestamp.UnixMilliseconds(), + blockHeader.Bits, + blockHeader.Nonce, + blockHeader.DAAScore, + blockHeader.BlueScore, + blockHeader.BlueWork, + blockHeader.PruningPoint, + ) +} + +// DomainTransactionToMsgTx converts an externalapi.DomainTransaction into an MsgTx +func DomainTransactionToMsgTx(domainTransaction *externalapi.DomainTransaction) *MsgTx { + txIns := make([]*TxIn, 0, len(domainTransaction.Inputs)) + for _, input := range domainTransaction.Inputs { + txIns = append(txIns, domainTransactionInputToTxIn(input)) + } + + txOuts := make([]*TxOut, 0, len(domainTransaction.Outputs)) + for _, output := range domainTransaction.Outputs { + txOuts = append(txOuts, domainTransactionOutputToTxOut(output)) + } + + return &MsgTx{ + Version: domainTransaction.Version, + TxIn: txIns, + TxOut: txOuts, + LockTime: domainTransaction.LockTime, + SubnetworkID: domainTransaction.SubnetworkID, + Gas: domainTransaction.Gas, + Payload: domainTransaction.Payload, + } +} + +func domainTransactionOutputToTxOut(domainTransactionOutput *externalapi.DomainTransactionOutput) *TxOut { + return &TxOut{ + Value: domainTransactionOutput.Value, + ScriptPubKey: domainTransactionOutput.ScriptPublicKey, + } +} + +func domainTransactionInputToTxIn(domainTransactionInput *externalapi.DomainTransactionInput) *TxIn { + return &TxIn{ + PreviousOutpoint: *domainOutpointToOutpoint(domainTransactionInput.PreviousOutpoint), + SignatureScript: domainTransactionInput.SignatureScript, + Sequence: domainTransactionInput.Sequence, + SigOpCount: domainTransactionInput.SigOpCount, + } +} + +func domainOutpointToOutpoint(domainOutpoint externalapi.DomainOutpoint) *Outpoint { + return NewOutpoint( + &domainOutpoint.TransactionID, + domainOutpoint.Index) +} + +// MsgTxToDomainTransaction converts an MsgTx into externalapi.DomainTransaction +func MsgTxToDomainTransaction(msgTx *MsgTx) *externalapi.DomainTransaction { + transactionInputs := make([]*externalapi.DomainTransactionInput, 0, len(msgTx.TxIn)) + for _, txIn := range msgTx.TxIn { + transactionInputs = append(transactionInputs, txInToDomainTransactionInput(txIn)) + } + + transactionOutputs := make([]*externalapi.DomainTransactionOutput, 0, len(msgTx.TxOut)) + for _, txOut := range msgTx.TxOut { + transactionOutputs = append(transactionOutputs, txOutToDomainTransactionOutput(txOut)) + } + + payload := make([]byte, 0) + if msgTx.Payload != nil { + payload = msgTx.Payload + } + + return &externalapi.DomainTransaction{ + Version: msgTx.Version, + Inputs: transactionInputs, + Outputs: transactionOutputs, + LockTime: msgTx.LockTime, + SubnetworkID: msgTx.SubnetworkID, + Gas: msgTx.Gas, + Payload: payload, + } +} + +func txOutToDomainTransactionOutput(txOut *TxOut) *externalapi.DomainTransactionOutput { + return &externalapi.DomainTransactionOutput{ + Value: txOut.Value, + ScriptPublicKey: txOut.ScriptPubKey, + } +} + +func txInToDomainTransactionInput(txIn *TxIn) *externalapi.DomainTransactionInput { + return &externalapi.DomainTransactionInput{ + PreviousOutpoint: *outpointToDomainOutpoint(&txIn.PreviousOutpoint), //TODO + SignatureScript: txIn.SignatureScript, + SigOpCount: txIn.SigOpCount, + Sequence: txIn.Sequence, + } +} + +func outpointToDomainOutpoint(outpoint *Outpoint) *externalapi.DomainOutpoint { + return &externalapi.DomainOutpoint{ + TransactionID: outpoint.TxID, + Index: outpoint.Index, + } +} + +// RPCTransactionToDomainTransaction converts RPCTransactions to DomainTransactions +func RPCTransactionToDomainTransaction(rpcTransaction *RPCTransaction) (*externalapi.DomainTransaction, error) { + inputs := make([]*externalapi.DomainTransactionInput, len(rpcTransaction.Inputs)) + for i, input := range rpcTransaction.Inputs { + previousOutpoint, err := RPCOutpointToDomainOutpoint(input.PreviousOutpoint) + if err != nil { + return nil, err + } + signatureScript, err := hex.DecodeString(input.SignatureScript) + if err != nil { + return nil, err + } + inputs[i] = &externalapi.DomainTransactionInput{ + PreviousOutpoint: *previousOutpoint, + SignatureScript: signatureScript, + Sequence: input.Sequence, + SigOpCount: input.SigOpCount, + } + } + outputs := make([]*externalapi.DomainTransactionOutput, len(rpcTransaction.Outputs)) + for i, output := range rpcTransaction.Outputs { + scriptPublicKey, err := hex.DecodeString(output.ScriptPublicKey.Script) + if err != nil { + return nil, err + } + outputs[i] = &externalapi.DomainTransactionOutput{ + Value: output.Amount, + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version}, + } + } + + subnetworkID, err := subnetworks.FromString(rpcTransaction.SubnetworkID) + if err != nil { + return nil, err + } + payload, err := hex.DecodeString(rpcTransaction.Payload) + if err != nil { + return nil, err + } + + return &externalapi.DomainTransaction{ + Version: rpcTransaction.Version, + Inputs: inputs, + Outputs: outputs, + LockTime: rpcTransaction.LockTime, + SubnetworkID: *subnetworkID, + Gas: rpcTransaction.Gas, + Payload: payload, + }, nil +} + +// RPCOutpointToDomainOutpoint converts RPCOutpoint to DomainOutpoint +func RPCOutpointToDomainOutpoint(outpoint *RPCOutpoint) (*externalapi.DomainOutpoint, error) { + transactionID, err := transactionid.FromString(outpoint.TransactionID) + if err != nil { + return nil, err + } + return &externalapi.DomainOutpoint{ + TransactionID: *transactionID, + Index: outpoint.Index, + }, nil +} + +// RPCUTXOEntryToUTXOEntry converts RPCUTXOEntry to UTXOEntry +func RPCUTXOEntryToUTXOEntry(entry *RPCUTXOEntry) (externalapi.UTXOEntry, error) { + script, err := hex.DecodeString(entry.ScriptPublicKey.Script) + if err != nil { + return nil, err + } + + return utxo.NewUTXOEntry( + entry.Amount, + &externalapi.ScriptPublicKey{ + Script: script, + Version: entry.ScriptPublicKey.Version, + }, + entry.IsCoinbase, + entry.BlockDAAScore, + ), nil +} + +// DomainTransactionToRPCTransaction converts DomainTransactions to RPCTransactions +func DomainTransactionToRPCTransaction(transaction *externalapi.DomainTransaction) *RPCTransaction { + inputs := make([]*RPCTransactionInput, len(transaction.Inputs)) + for i, input := range transaction.Inputs { + transactionID := input.PreviousOutpoint.TransactionID.String() + previousOutpoint := &RPCOutpoint{ + TransactionID: transactionID, + Index: input.PreviousOutpoint.Index, + } + signatureScript := hex.EncodeToString(input.SignatureScript) + inputs[i] = &RPCTransactionInput{ + PreviousOutpoint: previousOutpoint, + SignatureScript: signatureScript, + Sequence: input.Sequence, + SigOpCount: input.SigOpCount, + } + } + outputs := make([]*RPCTransactionOutput, len(transaction.Outputs)) + for i, output := range transaction.Outputs { + scriptPublicKey := hex.EncodeToString(output.ScriptPublicKey.Script) + outputs[i] = &RPCTransactionOutput{ + Amount: output.Value, + ScriptPublicKey: &RPCScriptPublicKey{Script: scriptPublicKey, Version: output.ScriptPublicKey.Version}, + } + } + subnetworkID := transaction.SubnetworkID.String() + payload := hex.EncodeToString(transaction.Payload) + return &RPCTransaction{ + Version: transaction.Version, + Inputs: inputs, + Outputs: outputs, + LockTime: transaction.LockTime, + SubnetworkID: subnetworkID, + Gas: transaction.Gas, + Payload: payload, + } +} + +// OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs converts +// OutpointAndUTXOEntryPairs to domain OutpointAndUTXOEntryPairs +func OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs( + outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) []*externalapi.OutpointAndUTXOEntryPair { + + domainOutpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs)) + for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs { + domainOutpointAndUTXOEntryPairs[i] = outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair(outpointAndUTXOEntryPair) + } + return domainOutpointAndUTXOEntryPairs +} + +func outpointAndUTXOEntryPairToDomainOutpointAndUTXOEntryPair( + outpointAndUTXOEntryPair *OutpointAndUTXOEntryPair) *externalapi.OutpointAndUTXOEntryPair { + return &externalapi.OutpointAndUTXOEntryPair{ + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: outpointAndUTXOEntryPair.Outpoint.TxID, + Index: outpointAndUTXOEntryPair.Outpoint.Index, + }, + UTXOEntry: utxo.NewUTXOEntry( + outpointAndUTXOEntryPair.UTXOEntry.Amount, + outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey, + outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase, + outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore, + ), + } +} + +// DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs converts +// domain OutpointAndUTXOEntryPairs to OutpointAndUTXOEntryPairs +func DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs( + outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) []*OutpointAndUTXOEntryPair { + + domainOutpointAndUTXOEntryPairs := make([]*OutpointAndUTXOEntryPair, len(outpointAndUTXOEntryPairs)) + for i, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs { + domainOutpointAndUTXOEntryPairs[i] = &OutpointAndUTXOEntryPair{ + Outpoint: &Outpoint{ + TxID: outpointAndUTXOEntryPair.Outpoint.TransactionID, + Index: outpointAndUTXOEntryPair.Outpoint.Index, + }, + UTXOEntry: &UTXOEntry{ + Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount(), + ScriptPublicKey: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey(), + IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase(), + BlockDAAScore: outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore(), + }, + } + } + return domainOutpointAndUTXOEntryPairs +} + +// DomainBlockToRPCBlock converts DomainBlocks to RPCBlocks +func DomainBlockToRPCBlock(block *externalapi.DomainBlock) *RPCBlock { + parents := make([]*RPCBlockLevelParents, len(block.Header.Parents())) + for i, blockLevelParents := range block.Header.Parents() { + parents[i] = &RPCBlockLevelParents{ + ParentHashes: hashes.ToStrings(blockLevelParents), + } + } + header := &RPCBlockHeader{ + Version: uint32(block.Header.Version()), + Parents: parents, + HashMerkleRoot: block.Header.HashMerkleRoot().String(), + AcceptedIDMerkleRoot: block.Header.AcceptedIDMerkleRoot().String(), + UTXOCommitment: block.Header.UTXOCommitment().String(), + Timestamp: block.Header.TimeInMilliseconds(), + Bits: block.Header.Bits(), + Nonce: block.Header.Nonce(), + DAAScore: block.Header.DAAScore(), + BlueScore: block.Header.BlueScore(), + BlueWork: block.Header.BlueWork().Text(16), + PruningPoint: block.Header.PruningPoint().String(), + } + transactions := make([]*RPCTransaction, len(block.Transactions)) + for i, transaction := range block.Transactions { + transactions[i] = DomainTransactionToRPCTransaction(transaction) + } + return &RPCBlock{ + Header: header, + Transactions: transactions, + } +} + +// RPCBlockToDomainBlock converts `block` into a DomainBlock +func RPCBlockToDomainBlock(block *RPCBlock) (*externalapi.DomainBlock, error) { + parents := make([]externalapi.BlockLevelParents, len(block.Header.Parents)) + for i, blockLevelParents := range block.Header.Parents { + parents[i] = make(externalapi.BlockLevelParents, len(blockLevelParents.ParentHashes)) + for j, parentHash := range blockLevelParents.ParentHashes { + var err error + parents[i][j], err = externalapi.NewDomainHashFromString(parentHash) + if err != nil { + return nil, err + } + } + } + hashMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.HashMerkleRoot) + if err != nil { + return nil, err + } + acceptedIDMerkleRoot, err := externalapi.NewDomainHashFromString(block.Header.AcceptedIDMerkleRoot) + if err != nil { + return nil, err + } + utxoCommitment, err := externalapi.NewDomainHashFromString(block.Header.UTXOCommitment) + if err != nil { + return nil, err + } + blueWork, success := new(big.Int).SetString(block.Header.BlueWork, 16) + if !success { + return nil, errors.Errorf("failed to parse blue work: %s", block.Header.BlueWork) + } + pruningPoint, err := externalapi.NewDomainHashFromString(block.Header.PruningPoint) + if err != nil { + return nil, err + } + header := blockheader.NewImmutableBlockHeader( + uint16(block.Header.Version), + parents, + hashMerkleRoot, + acceptedIDMerkleRoot, + utxoCommitment, + block.Header.Timestamp, + block.Header.Bits, + block.Header.Nonce, + block.Header.DAAScore, + block.Header.BlueScore, + blueWork, + pruningPoint) + transactions := make([]*externalapi.DomainTransaction, len(block.Transactions)) + for i, transaction := range block.Transactions { + domainTransaction, err := RPCTransactionToDomainTransaction(transaction) + if err != nil { + return nil, err + } + transactions[i] = domainTransaction + } + return &externalapi.DomainBlock{ + Header: header, + Transactions: transactions, + }, nil +} + +// BlockWithTrustedDataToDomainBlockWithTrustedData converts *MsgBlockWithTrustedData to *externalapi.BlockWithTrustedData +func BlockWithTrustedDataToDomainBlockWithTrustedData(block *MsgBlockWithTrustedData) *externalapi.BlockWithTrustedData { + daaWindow := make([]*externalapi.TrustedDataDataDAAHeader, len(block.DAAWindow)) + for i, daaBlock := range block.DAAWindow { + daaWindow[i] = &externalapi.TrustedDataDataDAAHeader{ + Header: BlockHeaderToDomainBlockHeader(&daaBlock.Block.Header), + GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData), + } + } + + ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData)) + for i, datum := range block.GHOSTDAGData { + ghostdagData[i] = &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: datum.Hash, + GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData), + } + } + + return &externalapi.BlockWithTrustedData{ + Block: MsgBlockToDomainBlock(block.Block), + DAAWindow: daaWindow, + GHOSTDAGData: ghostdagData, + } +} + +// TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader converts *TrustedDataDAAHeader to *externalapi.TrustedDataDataDAAHeader +func TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(daaBlock *TrustedDataDAAHeader) *externalapi.TrustedDataDataDAAHeader { + return &externalapi.TrustedDataDataDAAHeader{ + Header: BlockHeaderToDomainBlockHeader(daaBlock.Header), + GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(daaBlock.GHOSTDAGData), + } +} + +// GHOSTDAGHashPairToDomainGHOSTDAGHashPair converts *BlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair +func GHOSTDAGHashPairToDomainGHOSTDAGHashPair(datum *BlockGHOSTDAGDataHashPair) *externalapi.BlockGHOSTDAGDataHashPair { + return &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: datum.Hash, + GHOSTDAGData: ghostdagDataToDomainGHOSTDAGData(datum.GHOSTDAGData), + } +} + +func ghostdagDataToDomainGHOSTDAGData(data *BlockGHOSTDAGData) *externalapi.BlockGHOSTDAGData { + bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(data.BluesAnticoneSizes)) + for _, pair := range data.BluesAnticoneSizes { + bluesAnticoneSizes[*pair.BlueHash] = pair.AnticoneSize + } + return externalapi.NewBlockGHOSTDAGData( + data.BlueScore, + data.BlueWork, + data.SelectedParent, + data.MergeSetBlues, + data.MergeSetReds, + bluesAnticoneSizes, + ) +} + +func domainGHOSTDAGDataGHOSTDAGData(data *externalapi.BlockGHOSTDAGData) *BlockGHOSTDAGData { + bluesAnticoneSizes := make([]*BluesAnticoneSizes, 0, len(data.BluesAnticoneSizes())) + for blueHash, anticoneSize := range data.BluesAnticoneSizes() { + blueHashCopy := blueHash + bluesAnticoneSizes = append(bluesAnticoneSizes, &BluesAnticoneSizes{ + BlueHash: &blueHashCopy, + AnticoneSize: anticoneSize, + }) + } + + return &BlockGHOSTDAGData{ + BlueScore: data.BlueScore(), + BlueWork: data.BlueWork(), + SelectedParent: data.SelectedParent(), + MergeSetBlues: data.MergeSetBlues(), + MergeSetReds: data.MergeSetReds(), + BluesAnticoneSizes: bluesAnticoneSizes, + } +} + +// DomainBlockWithTrustedDataToBlockWithTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData +func DomainBlockWithTrustedDataToBlockWithTrustedData(block *externalapi.BlockWithTrustedData) *MsgBlockWithTrustedData { + daaWindow := make([]*TrustedDataDataDAABlock, len(block.DAAWindow)) + for i, daaBlock := range block.DAAWindow { + daaWindow[i] = &TrustedDataDataDAABlock{ + Block: &MsgBlock{ + Header: *DomainBlockHeaderToBlockHeader(daaBlock.Header), + }, + GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData), + } + } + + ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(block.GHOSTDAGData)) + for i, datum := range block.GHOSTDAGData { + ghostdagData[i] = &BlockGHOSTDAGDataHashPair{ + Hash: datum.Hash, + GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData), + } + } + + return &MsgBlockWithTrustedData{ + Block: DomainBlockToMsgBlock(block.Block), + DAAScore: block.Block.Header.DAAScore(), + DAAWindow: daaWindow, + GHOSTDAGData: ghostdagData, + } +} + +// DomainBlockWithTrustedDataToBlockWithTrustedDataV4 converts a set of *externalapi.DomainBlock, daa window indices and ghostdag data indices +// to *MsgBlockWithTrustedDataV4 +func DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block *externalapi.DomainBlock, daaWindowIndices, ghostdagDataIndices []uint64) *MsgBlockWithTrustedDataV4 { + return &MsgBlockWithTrustedDataV4{ + Block: DomainBlockToMsgBlock(block), + DAAWindowIndices: daaWindowIndices, + GHOSTDAGDataIndices: ghostdagDataIndices, + } +} + +// DomainTrustedDataToTrustedData converts *externalapi.BlockWithTrustedData to *MsgBlockWithTrustedData +func DomainTrustedDataToTrustedData(domainDAAWindow []*externalapi.TrustedDataDataDAAHeader, domainGHOSTDAGData []*externalapi.BlockGHOSTDAGDataHashPair) *MsgTrustedData { + daaWindow := make([]*TrustedDataDAAHeader, len(domainDAAWindow)) + for i, daaBlock := range domainDAAWindow { + daaWindow[i] = &TrustedDataDAAHeader{ + Header: DomainBlockHeaderToBlockHeader(daaBlock.Header), + GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(daaBlock.GHOSTDAGData), + } + } + + ghostdagData := make([]*BlockGHOSTDAGDataHashPair, len(domainGHOSTDAGData)) + for i, datum := range domainGHOSTDAGData { + ghostdagData[i] = &BlockGHOSTDAGDataHashPair{ + Hash: datum.Hash, + GHOSTDAGData: domainGHOSTDAGDataGHOSTDAGData(datum.GHOSTDAGData), + } + } + + return &MsgTrustedData{ + DAAWindow: daaWindow, + GHOSTDAGData: ghostdagData, + } +} + +// MsgPruningPointProofToDomainPruningPointProof converts *MsgPruningPointProof to *externalapi.PruningPointProof +func MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage *MsgPruningPointProof) *externalapi.PruningPointProof { + headers := make([][]externalapi.BlockHeader, len(pruningPointProofMessage.Headers)) + for blockLevel, blockLevelParents := range pruningPointProofMessage.Headers { + headers[blockLevel] = make([]externalapi.BlockHeader, len(blockLevelParents)) + for i, header := range blockLevelParents { + headers[blockLevel][i] = BlockHeaderToDomainBlockHeader(header) + } + } + return &externalapi.PruningPointProof{ + Headers: headers, + } +} + +// DomainPruningPointProofToMsgPruningPointProof converts *externalapi.PruningPointProof to *MsgPruningPointProof +func DomainPruningPointProofToMsgPruningPointProof(pruningPointProof *externalapi.PruningPointProof) *MsgPruningPointProof { + headers := make([][]*MsgBlockHeader, len(pruningPointProof.Headers)) + for blockLevel, blockLevelParents := range pruningPointProof.Headers { + headers[blockLevel] = make([]*MsgBlockHeader, len(blockLevelParents)) + for i, header := range blockLevelParents { + headers[blockLevel][i] = DomainBlockHeaderToBlockHeader(header) + } + } + return &MsgPruningPointProof{ + Headers: headers, + } +} diff --git a/app/appmessage/error.go b/app/appmessage/error.go new file mode 100644 index 0000000..32b9f4c --- /dev/null +++ b/app/appmessage/error.go @@ -0,0 +1,51 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "fmt" +) + +// MessageError describes an issue with a message. +// An example of some potential issues are messages from the wrong spectre +// network, invalid commands, mismatched checksums, and exceeding max payloads. +// +// This provides a mechanism for the caller to type assert the error to +// differentiate between general io errors such as io.EOF and issues that +// resulted from malformed messages. +type MessageError struct { + Func string // Function name + Description string // Human readable description of the issue +} + +// Error satisfies the error interface and prints human-readable errors. +func (e *MessageError) Error() string { + if e.Func != "" { + return fmt.Sprintf("%s: %s", e.Func, e.Description) + } + return e.Description +} + +// messageError creates an error for the given function and description. +func messageError(f string, desc string) *MessageError { + return &MessageError{Func: f, Description: desc} +} + +// RPCError represents an error arriving from the RPC +type RPCError struct { + Message string +} + +func (err RPCError) Error() string { + return err.Message +} + +// RPCErrorf formats according to a format specifier and returns the string +// as an RPCError. +func RPCErrorf(format string, args ...interface{}) *RPCError { + return &RPCError{ + Message: fmt.Sprintf(format, args...), + } +} diff --git a/app/appmessage/fixedIO_test.go b/app/appmessage/fixedIO_test.go new file mode 100644 index 0000000..24175e9 --- /dev/null +++ b/app/appmessage/fixedIO_test.go @@ -0,0 +1,77 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "bytes" + "io" +) + +// fixedWriter implements the io.Writer interface and intentially allows +// testing of error paths by forcing short writes. +type fixedWriter struct { + b []byte + pos int +} + +// Write writes the contents of p to w. When the contents of p would cause +// the writer to exceed the maximum allowed size of the fixed writer, +// io.ErrShortWrite is returned and the writer is left unchanged. +// +// This satisfies the io.Writer interface. +func (w *fixedWriter) Write(p []byte) (n int, err error) { + lenp := len(p) + if w.pos+lenp > cap(w.b) { + return 0, io.ErrShortWrite + } + n = lenp + w.pos += copy(w.b[w.pos:], p) + return +} + +// Bytes returns the bytes already written to the fixed writer. +func (w *fixedWriter) Bytes() []byte { + return w.b +} + +// newFixedWriter returns a new io.Writer that will error once more bytes than +// the specified max have been written. +func newFixedWriter(max int) io.Writer { + b := make([]byte, max) + fw := fixedWriter{b, 0} + return &fw +} + +// fixedReader implements the io.Reader interface and intentially allows +// testing of error paths by forcing short reads. +type fixedReader struct { + buf []byte + pos int + iobuf *bytes.Buffer +} + +// Read reads the next len(p) bytes from the fixed reader. When the number of +// bytes read would exceed the maximum number of allowed bytes to be read from +// the fixed writer, an error is returned. +// +// This satisfies the io.Reader interface. +func (fr *fixedReader) Read(p []byte) (n int, err error) { + n, err = fr.iobuf.Read(p) + fr.pos += n + return +} + +// newFixedReader returns a new io.Reader that will error once more bytes than +// the specified max have been read. +func newFixedReader(max int, buf []byte) io.Reader { + b := make([]byte, max) + if buf != nil { + copy(b[:], buf) + } + + iobuf := bytes.NewBuffer(b) + fr := fixedReader{b, 0, iobuf} + return &fr +} diff --git a/app/appmessage/message.go b/app/appmessage/message.go new file mode 100644 index 0000000..fb3b3e8 --- /dev/null +++ b/app/appmessage/message.go @@ -0,0 +1,315 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "fmt" + "time" +) + +// MaxMessagePayload is the maximum bytes a message can be regardless of other +// individual limits imposed by messages themselves. +const MaxMessagePayload = 1024 * 1024 * 32 // 32MB + +// MessageCommand is a number in the header of a message that represents its type. +type MessageCommand uint32 + +func (cmd MessageCommand) String() string { + cmdString, ok := ProtocolMessageCommandToString[cmd] + if !ok { + cmdString, ok = RPCMessageCommandToString[cmd] + } + if !ok { + cmdString = "unknown command" + } + return fmt.Sprintf("%s [code %d]", cmdString, uint8(cmd)) +} + +// Commands used in spectre message headers which describe the type of message. +const ( + // protocol + CmdVersion MessageCommand = iota + CmdVerAck + CmdRequestAddresses + CmdAddresses + CmdRequestHeaders + CmdBlock + CmdTx + CmdPing + CmdPong + CmdRequestBlockLocator + CmdBlockLocator + CmdInvRelayBlock + CmdRequestRelayBlocks + CmdInvTransaction + CmdRequestTransactions + CmdDoneHeaders + CmdTransactionNotFound + CmdReject + CmdRequestNextHeaders + CmdRequestPruningPointUTXOSet + CmdPruningPointUTXOSetChunk + CmdUnexpectedPruningPoint + CmdIBDBlockLocator + CmdIBDBlockLocatorHighestHash + CmdIBDBlockLocatorHighestHashNotFound + CmdBlockHeaders + CmdRequestNextPruningPointUTXOSetChunk + CmdDonePruningPointUTXOSetChunks + CmdBlockWithTrustedData + CmdDoneBlocksWithTrustedData + CmdRequestPruningPointAndItsAnticone + CmdIBDBlock + CmdRequestIBDBlocks + CmdPruningPoints + CmdRequestPruningPointProof + CmdPruningPointProof + CmdReady + CmdTrustedData + CmdBlockWithTrustedDataV4 + CmdRequestNextPruningPointAndItsAnticoneBlocks + CmdRequestIBDChainBlockLocator + CmdIBDChainBlockLocator + CmdRequestAnticone + + // rpc + CmdGetCurrentNetworkRequestMessage + CmdGetCurrentNetworkResponseMessage + CmdSubmitBlockRequestMessage + CmdSubmitBlockResponseMessage + CmdGetBlockTemplateRequestMessage + CmdGetBlockTemplateResponseMessage + CmdGetBlockTemplateTransactionMessage + CmdNotifyBlockAddedRequestMessage + CmdNotifyBlockAddedResponseMessage + CmdBlockAddedNotificationMessage + CmdGetPeerAddressesRequestMessage + CmdGetPeerAddressesResponseMessage + CmdGetSelectedTipHashRequestMessage + CmdGetSelectedTipHashResponseMessage + CmdGetMempoolEntryRequestMessage + CmdGetMempoolEntryResponseMessage + CmdGetConnectedPeerInfoRequestMessage + CmdGetConnectedPeerInfoResponseMessage + CmdAddPeerRequestMessage + CmdAddPeerResponseMessage + CmdSubmitTransactionRequestMessage + CmdSubmitTransactionResponseMessage + CmdNotifyVirtualSelectedParentChainChangedRequestMessage + CmdNotifyVirtualSelectedParentChainChangedResponseMessage + CmdVirtualSelectedParentChainChangedNotificationMessage + CmdGetBlockRequestMessage + CmdGetBlockResponseMessage + CmdGetSubnetworkRequestMessage + CmdGetSubnetworkResponseMessage + CmdGetVirtualSelectedParentChainFromBlockRequestMessage + CmdGetVirtualSelectedParentChainFromBlockResponseMessage + CmdGetBlocksRequestMessage + CmdGetBlocksResponseMessage + CmdGetBlockCountRequestMessage + CmdGetBlockCountResponseMessage + CmdGetBlockDAGInfoRequestMessage + CmdGetBlockDAGInfoResponseMessage + CmdResolveFinalityConflictRequestMessage + CmdResolveFinalityConflictResponseMessage + CmdNotifyFinalityConflictsRequestMessage + CmdNotifyFinalityConflictsResponseMessage + CmdFinalityConflictNotificationMessage + CmdFinalityConflictResolvedNotificationMessage + CmdGetMempoolEntriesRequestMessage + CmdGetMempoolEntriesResponseMessage + CmdShutDownRequestMessage + CmdShutDownResponseMessage + CmdGetHeadersRequestMessage + CmdGetHeadersResponseMessage + CmdNotifyUTXOsChangedRequestMessage + CmdNotifyUTXOsChangedResponseMessage + CmdUTXOsChangedNotificationMessage + CmdStopNotifyingUTXOsChangedRequestMessage + CmdStopNotifyingUTXOsChangedResponseMessage + CmdGetUTXOsByAddressesRequestMessage + CmdGetUTXOsByAddressesResponseMessage + CmdGetBalanceByAddressRequestMessage + CmdGetBalanceByAddressResponseMessage + CmdGetVirtualSelectedParentBlueScoreRequestMessage + CmdGetVirtualSelectedParentBlueScoreResponseMessage + CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage + CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage + CmdVirtualSelectedParentBlueScoreChangedNotificationMessage + CmdBanRequestMessage + CmdBanResponseMessage + CmdUnbanRequestMessage + CmdUnbanResponseMessage + CmdGetInfoRequestMessage + CmdGetInfoResponseMessage + CmdNotifyPruningPointUTXOSetOverrideRequestMessage + CmdNotifyPruningPointUTXOSetOverrideResponseMessage + CmdPruningPointUTXOSetOverrideNotificationMessage + CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage + CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage + CmdEstimateNetworkHashesPerSecondRequestMessage + CmdEstimateNetworkHashesPerSecondResponseMessage + CmdNotifyVirtualDaaScoreChangedRequestMessage + CmdNotifyVirtualDaaScoreChangedResponseMessage + CmdVirtualDaaScoreChangedNotificationMessage + CmdGetBalancesByAddressesRequestMessage + CmdGetBalancesByAddressesResponseMessage + CmdNotifyNewBlockTemplateRequestMessage + CmdNotifyNewBlockTemplateResponseMessage + CmdNewBlockTemplateNotificationMessage + CmdGetMempoolEntriesByAddressesRequestMessage + CmdGetMempoolEntriesByAddressesResponseMessage + CmdGetCoinSupplyRequestMessage + CmdGetCoinSupplyResponseMessage +) + +// ProtocolMessageCommandToString maps all MessageCommands to their string representation +var ProtocolMessageCommandToString = map[MessageCommand]string{ + CmdVersion: "Version", + CmdVerAck: "VerAck", + CmdRequestAddresses: "RequestAddresses", + CmdAddresses: "Addresses", + CmdRequestHeaders: "CmdRequestHeaders", + CmdBlock: "Block", + CmdTx: "Tx", + CmdPing: "Ping", + CmdPong: "Pong", + CmdRequestBlockLocator: "RequestBlockLocator", + CmdBlockLocator: "BlockLocator", + CmdInvRelayBlock: "InvRelayBlock", + CmdRequestRelayBlocks: "RequestRelayBlocks", + CmdInvTransaction: "InvTransaction", + CmdRequestTransactions: "RequestTransactions", + CmdDoneHeaders: "DoneHeaders", + CmdTransactionNotFound: "TransactionNotFound", + CmdReject: "Reject", + CmdRequestNextHeaders: "RequestNextHeaders", + CmdRequestPruningPointUTXOSet: "RequestPruningPointUTXOSet", + CmdPruningPointUTXOSetChunk: "PruningPointUTXOSetChunk", + CmdUnexpectedPruningPoint: "UnexpectedPruningPoint", + CmdIBDBlockLocator: "IBDBlockLocator", + CmdIBDBlockLocatorHighestHash: "IBDBlockLocatorHighestHash", + CmdIBDBlockLocatorHighestHashNotFound: "IBDBlockLocatorHighestHashNotFound", + CmdBlockHeaders: "BlockHeaders", + CmdRequestNextPruningPointUTXOSetChunk: "RequestNextPruningPointUTXOSetChunk", + CmdDonePruningPointUTXOSetChunks: "DonePruningPointUTXOSetChunks", + CmdBlockWithTrustedData: "BlockWithTrustedData", + CmdDoneBlocksWithTrustedData: "DoneBlocksWithTrustedData", + CmdRequestPruningPointAndItsAnticone: "RequestPruningPointAndItsAnticoneHeaders", + CmdIBDBlock: "IBDBlock", + CmdRequestIBDBlocks: "RequestIBDBlocks", + CmdPruningPoints: "PruningPoints", + CmdRequestPruningPointProof: "RequestPruningPointProof", + CmdPruningPointProof: "PruningPointProof", + CmdReady: "Ready", + CmdTrustedData: "TrustedData", + CmdBlockWithTrustedDataV4: "BlockWithTrustedDataV4", + CmdRequestNextPruningPointAndItsAnticoneBlocks: "RequestNextPruningPointAndItsAnticoneBlocks", + CmdRequestIBDChainBlockLocator: "RequestIBDChainBlockLocator", + CmdIBDChainBlockLocator: "IBDChainBlockLocator", + CmdRequestAnticone: "RequestAnticone", +} + +// RPCMessageCommandToString maps all MessageCommands to their string representation +var RPCMessageCommandToString = map[MessageCommand]string{ + CmdGetCurrentNetworkRequestMessage: "GetCurrentNetworkRequest", + CmdGetCurrentNetworkResponseMessage: "GetCurrentNetworkResponse", + CmdSubmitBlockRequestMessage: "SubmitBlockRequest", + CmdSubmitBlockResponseMessage: "SubmitBlockResponse", + CmdGetBlockTemplateRequestMessage: "GetBlockTemplateRequest", + CmdGetBlockTemplateResponseMessage: "GetBlockTemplateResponse", + CmdGetBlockTemplateTransactionMessage: "CmdGetBlockTemplateTransaction", + CmdNotifyBlockAddedRequestMessage: "NotifyBlockAddedRequest", + CmdNotifyBlockAddedResponseMessage: "NotifyBlockAddedResponse", + CmdBlockAddedNotificationMessage: "BlockAddedNotification", + CmdGetPeerAddressesRequestMessage: "GetPeerAddressesRequest", + CmdGetPeerAddressesResponseMessage: "GetPeerAddressesResponse", + CmdGetSelectedTipHashRequestMessage: "GetSelectedTipHashRequest", + CmdGetSelectedTipHashResponseMessage: "GetSelectedTipHashResponse", + CmdGetMempoolEntryRequestMessage: "GetMempoolEntryRequest", + CmdGetMempoolEntryResponseMessage: "GetMempoolEntryResponse", + CmdGetConnectedPeerInfoRequestMessage: "GetConnectedPeerInfoRequest", + CmdGetConnectedPeerInfoResponseMessage: "GetConnectedPeerInfoResponse", + CmdAddPeerRequestMessage: "AddPeerRequest", + CmdAddPeerResponseMessage: "AddPeerResponse", + CmdSubmitTransactionRequestMessage: "SubmitTransactionRequest", + CmdSubmitTransactionResponseMessage: "SubmitTransactionResponse", + CmdNotifyVirtualSelectedParentChainChangedRequestMessage: "NotifyVirtualSelectedParentChainChangedRequest", + CmdNotifyVirtualSelectedParentChainChangedResponseMessage: "NotifyVirtualSelectedParentChainChangedResponse", + CmdVirtualSelectedParentChainChangedNotificationMessage: "VirtualSelectedParentChainChangedNotification", + CmdGetBlockRequestMessage: "GetBlockRequest", + CmdGetBlockResponseMessage: "GetBlockResponse", + CmdGetSubnetworkRequestMessage: "GetSubnetworkRequest", + CmdGetSubnetworkResponseMessage: "GetSubnetworkResponse", + CmdGetVirtualSelectedParentChainFromBlockRequestMessage: "GetVirtualSelectedParentChainFromBlockRequest", + CmdGetVirtualSelectedParentChainFromBlockResponseMessage: "GetVirtualSelectedParentChainFromBlockResponse", + CmdGetBlocksRequestMessage: "GetBlocksRequest", + CmdGetBlocksResponseMessage: "GetBlocksResponse", + CmdGetBlockCountRequestMessage: "GetBlockCountRequest", + CmdGetBlockCountResponseMessage: "GetBlockCountResponse", + CmdGetBlockDAGInfoRequestMessage: "GetBlockDAGInfoRequest", + CmdGetBlockDAGInfoResponseMessage: "GetBlockDAGInfoResponse", + CmdResolveFinalityConflictRequestMessage: "ResolveFinalityConflictRequest", + CmdResolveFinalityConflictResponseMessage: "ResolveFinalityConflictResponse", + CmdNotifyFinalityConflictsRequestMessage: "NotifyFinalityConflictsRequest", + CmdNotifyFinalityConflictsResponseMessage: "NotifyFinalityConflictsResponse", + CmdFinalityConflictNotificationMessage: "FinalityConflictNotification", + CmdFinalityConflictResolvedNotificationMessage: "FinalityConflictResolvedNotification", + CmdGetMempoolEntriesRequestMessage: "GetMempoolEntriesRequest", + CmdGetMempoolEntriesResponseMessage: "GetMempoolEntriesResponse", + CmdGetHeadersRequestMessage: "GetHeadersRequest", + CmdGetHeadersResponseMessage: "GetHeadersResponse", + CmdNotifyUTXOsChangedRequestMessage: "NotifyUTXOsChangedRequest", + CmdNotifyUTXOsChangedResponseMessage: "NotifyUTXOsChangedResponse", + CmdUTXOsChangedNotificationMessage: "UTXOsChangedNotification", + CmdStopNotifyingUTXOsChangedRequestMessage: "StopNotifyingUTXOsChangedRequest", + CmdStopNotifyingUTXOsChangedResponseMessage: "StopNotifyingUTXOsChangedResponse", + CmdGetUTXOsByAddressesRequestMessage: "GetUTXOsByAddressesRequest", + CmdGetUTXOsByAddressesResponseMessage: "GetUTXOsByAddressesResponse", + CmdGetBalanceByAddressRequestMessage: "GetBalanceByAddressRequest", + CmdGetBalanceByAddressResponseMessage: "GetBalancesByAddressResponse", + CmdGetVirtualSelectedParentBlueScoreRequestMessage: "GetVirtualSelectedParentBlueScoreRequest", + CmdGetVirtualSelectedParentBlueScoreResponseMessage: "GetVirtualSelectedParentBlueScoreResponse", + CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: "NotifyVirtualSelectedParentBlueScoreChangedRequest", + CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage: "NotifyVirtualSelectedParentBlueScoreChangedResponse", + CmdVirtualSelectedParentBlueScoreChangedNotificationMessage: "VirtualSelectedParentBlueScoreChangedNotification", + CmdBanRequestMessage: "BanRequest", + CmdBanResponseMessage: "BanResponse", + CmdUnbanRequestMessage: "UnbanRequest", + CmdUnbanResponseMessage: "UnbanResponse", + CmdGetInfoRequestMessage: "GetInfoRequest", + CmdGetInfoResponseMessage: "GeInfoResponse", + CmdNotifyPruningPointUTXOSetOverrideRequestMessage: "NotifyPruningPointUTXOSetOverrideRequest", + CmdNotifyPruningPointUTXOSetOverrideResponseMessage: "NotifyPruningPointUTXOSetOverrideResponse", + CmdPruningPointUTXOSetOverrideNotificationMessage: "PruningPointUTXOSetOverrideNotification", + CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: "StopNotifyingPruningPointUTXOSetOverrideRequest", + CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage: "StopNotifyingPruningPointUTXOSetOverrideResponse", + CmdEstimateNetworkHashesPerSecondRequestMessage: "EstimateNetworkHashesPerSecondRequest", + CmdEstimateNetworkHashesPerSecondResponseMessage: "EstimateNetworkHashesPerSecondResponse", + CmdNotifyVirtualDaaScoreChangedRequestMessage: "NotifyVirtualDaaScoreChangedRequest", + CmdNotifyVirtualDaaScoreChangedResponseMessage: "NotifyVirtualDaaScoreChangedResponse", + CmdVirtualDaaScoreChangedNotificationMessage: "VirtualDaaScoreChangedNotification", + CmdGetBalancesByAddressesRequestMessage: "GetBalancesByAddressesRequest", + CmdGetBalancesByAddressesResponseMessage: "GetBalancesByAddressesResponse", + CmdNotifyNewBlockTemplateRequestMessage: "NotifyNewBlockTemplateRequest", + CmdNotifyNewBlockTemplateResponseMessage: "NotifyNewBlockTemplateResponse", + CmdNewBlockTemplateNotificationMessage: "NewBlockTemplateNotification", + CmdGetMempoolEntriesByAddressesRequestMessage: "GetMempoolEntriesByAddressesRequest", + CmdGetMempoolEntriesByAddressesResponseMessage: "GetMempoolEntriesByAddressesResponse", + CmdGetCoinSupplyRequestMessage: "GetCoinSupplyRequest", + CmdGetCoinSupplyResponseMessage: "GetCoinSupplyResponse", +} + +// Message is an interface that describes a spectre message. A type that +// implements Message has complete control over the representation of its data +// and may therefore contain additional or fewer fields than those which +// are used directly in the protocol encoded message. +type Message interface { + Command() MessageCommand + MessageNumber() uint64 + SetMessageNumber(index uint64) + ReceivedAt() time.Time + SetReceivedAt(receivedAt time.Time) +} diff --git a/app/appmessage/p2p_blockheaders.go b/app/appmessage/p2p_blockheaders.go new file mode 100644 index 0000000..ab36a80 --- /dev/null +++ b/app/appmessage/p2p_blockheaders.go @@ -0,0 +1,19 @@ +package appmessage + +// BlockHeadersMessage represents a spectre BlockHeaders message +type BlockHeadersMessage struct { + baseMessage + BlockHeaders []*MsgBlockHeader +} + +// Command returns the protocol command string for the message +func (msg *BlockHeadersMessage) Command() MessageCommand { + return CmdBlockHeaders +} + +// NewBlockHeadersMessage returns a new spectre BlockHeaders message +func NewBlockHeadersMessage(blockHeaders []*MsgBlockHeader) *BlockHeadersMessage { + return &BlockHeadersMessage{ + BlockHeaders: blockHeaders, + } +} diff --git a/app/appmessage/p2p_msgaddresses.go b/app/appmessage/p2p_msgaddresses.go new file mode 100644 index 0000000..29e216a --- /dev/null +++ b/app/appmessage/p2p_msgaddresses.go @@ -0,0 +1,30 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +// MaxAddressesPerMsg is the maximum number of addresses that can be in a single +// spectre Addresses message (MsgAddresses). +const MaxAddressesPerMsg = 1000 + +// MsgAddresses implements the Message interface and represents a spectre +// Addresses message. +type MsgAddresses struct { + baseMessage + AddressList []*NetAddress +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgAddresses) Command() MessageCommand { + return CmdAddresses +} + +// NewMsgAddresses returns a new spectre Addresses message that conforms to the +// Message interface. See MsgAddresses for details. +func NewMsgAddresses(addressList []*NetAddress) *MsgAddresses { + return &MsgAddresses{ + AddressList: addressList, + } +} diff --git a/app/appmessage/p2p_msgblock.go b/app/appmessage/p2p_msgblock.go new file mode 100644 index 0000000..b3bf57d --- /dev/null +++ b/app/appmessage/p2p_msgblock.go @@ -0,0 +1,74 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// defaultTransactionAlloc is the default size used for the backing array +// for transactions. The transaction array will dynamically grow as needed, but +// this figure is intended to provide enough space for the number of +// transactions in the vast majority of blocks without needing to grow the +// backing array multiple times. +const defaultTransactionAlloc = 2048 + +// TxLoc holds locator data for the offset and length of where a transaction is +// located within a MsgBlock data buffer. +type TxLoc struct { + TxStart int + TxLen int +} + +// MsgBlock implements the Message interface and represents a spectre +// block message. It is used to deliver block and transaction information in +// response to a getdata message (MsgGetData) for a given block hash. +type MsgBlock struct { + baseMessage + Header MsgBlockHeader + Transactions []*MsgTx +} + +// AddTransaction adds a transaction to the message. +func (msg *MsgBlock) AddTransaction(tx *MsgTx) { + msg.Transactions = append(msg.Transactions, tx) +} + +// ClearTransactions removes all transactions from the message. +func (msg *MsgBlock) ClearTransactions() { + msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc) +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgBlock) Command() MessageCommand { + return CmdBlock +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 { + return MaxMessagePayload +} + +// ConvertToPartial clears out all the payloads of the subnetworks that are +// incompatible with the given subnetwork ID. +// Note: this operation modifies the block in place. +func (msg *MsgBlock) ConvertToPartial(subnetworkID *externalapi.DomainSubnetworkID) { + for _, tx := range msg.Transactions { + if !tx.SubnetworkID.Equal(subnetworkID) { + tx.Payload = []byte{} + } + } +} + +// NewMsgBlock returns a new spectre block message that conforms to the +// Message interface. See MsgBlock for details. +func NewMsgBlock(blockHeader *MsgBlockHeader) *MsgBlock { + return &MsgBlock{ + Header: *blockHeader, + Transactions: make([]*MsgTx, 0, defaultTransactionAlloc), + } +} diff --git a/app/appmessage/p2p_msgblock_test.go b/app/appmessage/p2p_msgblock_test.go new file mode 100644 index 0000000..57444a7 --- /dev/null +++ b/app/appmessage/p2p_msgblock_test.go @@ -0,0 +1,246 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "math" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestBlock tests the MsgBlock API. +func TestBlock(t *testing.T) { + pver := uint32(4) + + // Block 1 header. + parents := blockOne.Header.Parents + hashMerkleRoot := blockOne.Header.HashMerkleRoot + acceptedIDMerkleRoot := blockOne.Header.AcceptedIDMerkleRoot + utxoCommitment := blockOne.Header.UTXOCommitment + bits := blockOne.Header.Bits + nonce := blockOne.Header.Nonce + daaScore := blockOne.Header.DAAScore + blueScore := blockOne.Header.BlueScore + blueWork := blockOne.Header.BlueWork + pruningPoint := blockOne.Header.PruningPoint + bh := NewBlockHeader(1, parents, hashMerkleRoot, acceptedIDMerkleRoot, utxoCommitment, bits, nonce, + daaScore, blueScore, blueWork, pruningPoint) + + // Ensure the command is expected value. + wantCmd := MessageCommand(5) + msg := NewMsgBlock(bh) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgBlock: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure max payload is expected value for latest protocol version. + wantPayload := uint32(1024 * 1024 * 32) + maxPayload := msg.MaxPayloadLength(pver) + if maxPayload != wantPayload { + t.Errorf("MaxPayloadLength: wrong max payload length for "+ + "protocol version %d - got %v, want %v", pver, + maxPayload, wantPayload) + } + + // Ensure we get the same block header data back out. + if !reflect.DeepEqual(&msg.Header, bh) { + t.Errorf("NewMsgBlock: wrong block header - got %v, want %v", + spew.Sdump(&msg.Header), spew.Sdump(bh)) + } + + // Ensure transactions are added properly. + tx := blockOne.Transactions[0].Copy() + msg.AddTransaction(tx) + if !reflect.DeepEqual(msg.Transactions, blockOne.Transactions) { + t.Errorf("AddTransaction: wrong transactions - got %v, want %v", + spew.Sdump(msg.Transactions), + spew.Sdump(blockOne.Transactions)) + } + + // Ensure transactions are properly cleared. + msg.ClearTransactions() + if len(msg.Transactions) != 0 { + t.Errorf("ClearTransactions: wrong transactions - got %v, want %v", + len(msg.Transactions), 0) + } +} + +func TestConvertToPartial(t *testing.T) { + localSubnetworkID := &externalapi.DomainSubnetworkID{0x12} + + transactions := []struct { + subnetworkID *externalapi.DomainSubnetworkID + payload []byte + expectedPayloadLength int + }{ + { + subnetworkID: &subnetworks.SubnetworkIDNative, + payload: []byte{}, + expectedPayloadLength: 0, + }, + { + subnetworkID: &subnetworks.SubnetworkIDRegistry, + payload: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, + expectedPayloadLength: 0, + }, + { + subnetworkID: localSubnetworkID, + payload: []byte{0x01}, + expectedPayloadLength: 1, + }, + { + subnetworkID: &externalapi.DomainSubnetworkID{0x34}, + payload: []byte{0x02}, + expectedPayloadLength: 0, + }, + } + + block := MsgBlock{} + payload := []byte{1} + for _, transaction := range transactions { + block.Transactions = append(block.Transactions, NewSubnetworkMsgTx(1, nil, nil, transaction.subnetworkID, 0, payload)) + } + + block.ConvertToPartial(localSubnetworkID) + + for _, testTransaction := range transactions { + var subnetworkTx *MsgTx + for _, blockTransaction := range block.Transactions { + if blockTransaction.SubnetworkID.Equal(testTransaction.subnetworkID) { + subnetworkTx = blockTransaction + } + } + if subnetworkTx == nil { + t.Errorf("ConvertToPartial: subnetworkID '%s' not found in block!", testTransaction.subnetworkID) + continue + } + + payloadLength := len(subnetworkTx.Payload) + if payloadLength != testTransaction.expectedPayloadLength { + t.Errorf("ConvertToPartial: unexpected payload length for subnetwork '%s': expected: %d, got: %d", + testTransaction.subnetworkID, testTransaction.expectedPayloadLength, payloadLength) + } + } +} + +// blockOne is the first block in the mainnet block DAG. +var blockOne = MsgBlock{ + Header: MsgBlockHeader{ + Version: 0, + Parents: []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}}, + HashMerkleRoot: mainnetGenesisMerkleRoot, + AcceptedIDMerkleRoot: exampleAcceptedIDMerkleRoot, + UTXOCommitment: exampleUTXOCommitment, + Timestamp: mstime.UnixMilliseconds(0x17315ed0f99), + Bits: 0x1d00ffff, // 486604799 + Nonce: 0x9962e301, // 2573394689 + }, + Transactions: []*MsgTx{ + NewNativeMsgTx(1, + []*TxIn{ + { + PreviousOutpoint: Outpoint{ + TxID: externalapi.DomainTransactionID{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{ + 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, + }, + Sequence: math.MaxUint64, + }, + }, + []*TxOut{ + { + Value: 0x12a05f200, + ScriptPubKey: &externalapi.ScriptPublicKey{ + Script: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + Version: 0}, + }, + }), + }, +} + +// Block one serialized bytes. +var blockOneBytes = []byte{ + 0x00, 0x00, // Version 0 + 0x02, // NumParentBlocks + 0xdc, 0x5f, 0x5b, 0x5b, 0x1d, 0xc2, 0xa7, 0x25, // mainnetGenesisHash + 0x49, 0xd5, 0x1d, 0x4d, 0xee, 0xd7, 0xa4, 0x8b, + 0xaf, 0xd3, 0x14, 0x4b, 0x56, 0x78, 0x98, 0xb1, + 0x8c, 0xfd, 0x9f, 0x69, 0xdd, 0xcf, 0xbb, 0x63, + 0xf6, 0x7a, 0xd7, 0x69, 0x5d, 0x9b, 0x66, 0x2a, // simnetGenesisHash + 0x72, 0xff, 0x3d, 0x8e, 0xdb, 0xbb, 0x2d, 0xe0, + 0xbf, 0xa6, 0x7b, 0x13, 0x97, 0x4b, 0xb9, 0x91, + 0x0d, 0x11, 0x6d, 0x5c, 0xbd, 0x86, 0x3e, 0x68, + 0x4a, 0x5e, 0x1e, 0x4b, 0xaa, 0xb8, 0x9f, 0x3a, // HashMerkleRoot + 0x32, 0x51, 0x8a, 0x88, 0xc3, 0x1b, 0xc8, 0x7f, + 0x61, 0x8f, 0x76, 0x67, 0x3e, 0x2c, 0xc7, 0x7a, + 0xb2, 0x12, 0x7b, 0x7a, 0xfd, 0xed, 0xa3, 0x3b, + 0x09, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // AcceptedIDMerkleRoot + 0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87, + 0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, + 0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F, + 0x10, 0x3B, 0xC7, 0xE3, 0x67, 0x11, 0x7B, 0x3C, // UTXOCommitment + 0x30, 0xC1, 0xF8, 0xFD, 0xD0, 0xD9, 0x72, 0x87, + 0x7F, 0x16, 0xC5, 0x96, 0x2E, 0x8B, 0xD9, 0x63, + 0x65, 0x9C, 0x79, 0x3C, 0xE3, 0x70, 0xD9, 0x5F, + 0x99, 0x0f, 0xed, 0x15, 0x73, 0x01, 0x00, 0x00, // Timestamp + 0xff, 0xff, 0x00, 0x1d, // Bits + 0x01, 0xe3, 0x62, 0x99, 0x00, 0x00, 0x00, 0x00, // Fake Nonce + 0x01, // TxnCount + 0x00, 0x00, 0x00, 0x00, // Version + 0x01, // Varint for number of transaction inputs + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash + 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0x07, // Varint for length of signature script + 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script (coinbase) + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Sequence + 0x01, // Varint for number of transaction outputs + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount + 0x43, // Varint for length of scriptPubKey + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte uncompressed public key + 0xac, // OP_CHECKSIG + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Lock time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, // SubnetworkID +} + +// Transaction location information for block one transactions. +var blockOneTxLocs = []TxLoc{ + {TxStart: 186, TxLen: 162}, +} diff --git a/app/appmessage/p2p_msgblockheader.go b/app/appmessage/p2p_msgblockheader.go new file mode 100644 index 0000000..80b7940 --- /dev/null +++ b/app/appmessage/p2p_msgblockheader.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/mstime" +) + +// BaseBlockHeaderPayload is the base number of bytes a block header can be, +// not including the list of parent block headers. +// Version 4 bytes + Timestamp 8 bytes + Bits 4 bytes + Nonce 8 bytes + +// + NumParentBlocks 1 byte + HashMerkleRoot hash + +// + AcceptedIDMerkleRoot hash + UTXOCommitment hash. +// To get total size of block header len(ParentHashes) * externalapi.DomainHashSize should be +// added to this value +const BaseBlockHeaderPayload = 25 + 3*(externalapi.DomainHashSize) + +// MaxNumParentBlocks is the maximum number of parent blocks a block can reference. +// Currently set to 255 as the maximum number NumParentBlocks can be due to it being a byte +const MaxNumParentBlocks = 255 + +// MaxBlockHeaderPayload is the maximum number of bytes a block header can be. +// BaseBlockHeaderPayload + up to MaxNumParentBlocks hashes of parent blocks +const MaxBlockHeaderPayload = BaseBlockHeaderPayload + (MaxNumParentBlocks * externalapi.DomainHashSize) + +// MsgBlockHeader defines information about a block and is used in the spectre +// block (MsgBlock) and headers (MsgHeader) messages. +type MsgBlockHeader struct { + baseMessage + + // Version of the block. This is not the same as the protocol version. + Version uint16 + + // Parents are the parent block hashes of the block in the DAG per superblock level. + Parents []externalapi.BlockLevelParents + + // HashMerkleRoot is the merkle tree reference to hash of all transactions for the block. + HashMerkleRoot *externalapi.DomainHash + + // AcceptedIDMerkleRoot is merkle tree reference to hash all transactions + // accepted form the block.Blues + AcceptedIDMerkleRoot *externalapi.DomainHash + + // UTXOCommitment is an ECMH UTXO commitment to the block UTXO. + UTXOCommitment *externalapi.DomainHash + + // Time the block was created. + Timestamp mstime.Time + + // Difficulty target for the block. + Bits uint32 + + // Nonce used to generate the block. + Nonce uint64 + + // DAASCore is the DAA score of the block. + DAAScore uint64 + + BlueScore uint64 + + // BlueWork is the blue work of the block. + BlueWork *big.Int + + PruningPoint *externalapi.DomainHash +} + +// BlockHash computes the block identifier hash for the given block header. +func (h *MsgBlockHeader) BlockHash() *externalapi.DomainHash { + return consensushashing.HeaderHash(BlockHeaderToDomainBlockHeader(h)) +} + +// NewBlockHeader returns a new MsgBlockHeader using the provided version, previous +// block hash, hash merkle root, accepted ID merkle root, difficulty bits, and nonce used to generate the +// block with defaults or calclulated values for the remaining fields. +func NewBlockHeader(version uint16, parents []externalapi.BlockLevelParents, hashMerkleRoot *externalapi.DomainHash, + acceptedIDMerkleRoot *externalapi.DomainHash, utxoCommitment *externalapi.DomainHash, bits uint32, nonce, + daaScore, blueScore uint64, blueWork *big.Int, pruningPoint *externalapi.DomainHash) *MsgBlockHeader { + + // Limit the timestamp to one millisecond precision since the protocol + // doesn't support better. + return &MsgBlockHeader{ + Version: version, + Parents: parents, + HashMerkleRoot: hashMerkleRoot, + AcceptedIDMerkleRoot: acceptedIDMerkleRoot, + UTXOCommitment: utxoCommitment, + Timestamp: mstime.Now(), + Bits: bits, + Nonce: nonce, + DAAScore: daaScore, + BlueScore: blueScore, + BlueWork: blueWork, + PruningPoint: pruningPoint, + } +} diff --git a/app/appmessage/p2p_msgblockheader_test.go b/app/appmessage/p2p_msgblockheader_test.go new file mode 100644 index 0000000..ef3c154 --- /dev/null +++ b/app/appmessage/p2p_msgblockheader_test.go @@ -0,0 +1,65 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "math/big" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestBlockHeader tests the MsgBlockHeader API. +func TestBlockHeader(t *testing.T) { + nonce := uint64(0xba4d87a69924a93d) + + parents := []externalapi.BlockLevelParents{[]*externalapi.DomainHash{mainnetGenesisHash, simnetGenesisHash}} + + merkleHash := mainnetGenesisMerkleRoot + acceptedIDMerkleRoot := exampleAcceptedIDMerkleRoot + bits := uint32(0x1d00ffff) + daaScore := uint64(123) + blueScore := uint64(456) + blueWork := big.NewInt(789) + pruningPoint := simnetGenesisHash + bh := NewBlockHeader(1, parents, merkleHash, acceptedIDMerkleRoot, exampleUTXOCommitment, bits, nonce, + daaScore, blueScore, blueWork, pruningPoint) + + // Ensure we get the same data back out. + if !reflect.DeepEqual(bh.Parents, parents) { + t.Errorf("NewBlockHeader: wrong parents - got %v, want %v", + spew.Sprint(bh.Parents), spew.Sprint(parents)) + } + if bh.HashMerkleRoot != merkleHash { + t.Errorf("NewBlockHeader: wrong merkle root - got %v, want %v", + spew.Sprint(bh.HashMerkleRoot), spew.Sprint(merkleHash)) + } + if bh.Bits != bits { + t.Errorf("NewBlockHeader: wrong bits - got %v, want %v", + bh.Bits, bits) + } + if bh.Nonce != nonce { + t.Errorf("NewBlockHeader: wrong nonce - got %v, want %v", + bh.Nonce, nonce) + } + if bh.DAAScore != daaScore { + t.Errorf("NewBlockHeader: wrong daaScore - got %v, want %v", + bh.DAAScore, daaScore) + } + if bh.BlueScore != blueScore { + t.Errorf("NewBlockHeader: wrong blueScore - got %v, want %v", + bh.BlueScore, blueScore) + } + if bh.BlueWork != blueWork { + t.Errorf("NewBlockHeader: wrong blueWork - got %v, want %v", + bh.BlueWork, blueWork) + } + if !bh.PruningPoint.Equal(pruningPoint) { + t.Errorf("NewBlockHeader: wrong pruningPoint - got %v, want %v", + bh.PruningPoint, pruningPoint) + } +} diff --git a/app/appmessage/p2p_msgblocklocator.go b/app/appmessage/p2p_msgblocklocator.go new file mode 100644 index 0000000..e228584 --- /dev/null +++ b/app/appmessage/p2p_msgblocklocator.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MaxBlockLocatorsPerMsg is the maximum number of block locator hashes allowed +// per message. +const MaxBlockLocatorsPerMsg = 500 + +// MsgBlockLocator implements the Message interface and represents a spectre +// locator message. It is used to find the blockLocator of a peer that is +// syncing with you. +type MsgBlockLocator struct { + baseMessage + BlockLocatorHashes []*externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgBlockLocator) Command() MessageCommand { + return CmdBlockLocator +} + +// NewMsgBlockLocator returns a new spectre locator message that conforms to +// the Message interface. See MsgBlockLocator for details. +func NewMsgBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgBlockLocator { + return &MsgBlockLocator{ + BlockLocatorHashes: locatorHashes, + } +} diff --git a/app/appmessage/p2p_msgblocklocator_test.go b/app/appmessage/p2p_msgblocklocator_test.go new file mode 100644 index 0000000..5b80193 --- /dev/null +++ b/app/appmessage/p2p_msgblocklocator_test.go @@ -0,0 +1,35 @@ +package appmessage + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/davecgh/go-spew/spew" +) + +// TestBlockLocator tests the MsgBlockLocator API. +func TestBlockLocator(t *testing.T) { + hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" + locatorHash, err := externalapi.NewDomainHashFromString(hashStr) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + } + + msg := NewMsgBlockLocator([]*externalapi.DomainHash{locatorHash}) + + // Ensure the command is expected value. + wantCmd := MessageCommand(10) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgBlockLocator: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure block locator hashes are added properly. + if msg.BlockLocatorHashes[0] != locatorHash { + t.Errorf("AddBlockLocatorHash: wrong block locator added - "+ + "got %v, want %v", + spew.Sprint(msg.BlockLocatorHashes[0]), + spew.Sprint(locatorHash)) + } +} diff --git a/app/appmessage/p2p_msgblockwithtrusteddata.go b/app/appmessage/p2p_msgblockwithtrusteddata.go new file mode 100644 index 0000000..2fa9b97 --- /dev/null +++ b/app/appmessage/p2p_msgblockwithtrusteddata.go @@ -0,0 +1,55 @@ +package appmessage + +import ( + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgBlockWithTrustedData represents a spectre BlockWithTrustedData message +type MsgBlockWithTrustedData struct { + baseMessage + + Block *MsgBlock + DAAScore uint64 + DAAWindow []*TrustedDataDataDAABlock + GHOSTDAGData []*BlockGHOSTDAGDataHashPair +} + +// Command returns the protocol command string for the message +func (msg *MsgBlockWithTrustedData) Command() MessageCommand { + return CmdBlockWithTrustedData +} + +// NewMsgBlockWithTrustedData returns a new MsgBlockWithTrustedData. +func NewMsgBlockWithTrustedData() *MsgBlockWithTrustedData { + return &MsgBlockWithTrustedData{} +} + +// TrustedDataDataDAABlock is an appmessage representation of externalapi.TrustedDataDataDAABlock +type TrustedDataDataDAABlock struct { + Block *MsgBlock + GHOSTDAGData *BlockGHOSTDAGData +} + +// BlockGHOSTDAGData is an appmessage representation of externalapi.BlockGHOSTDAGData +type BlockGHOSTDAGData struct { + BlueScore uint64 + BlueWork *big.Int + SelectedParent *externalapi.DomainHash + MergeSetBlues []*externalapi.DomainHash + MergeSetReds []*externalapi.DomainHash + BluesAnticoneSizes []*BluesAnticoneSizes +} + +// BluesAnticoneSizes is an appmessage representation of the BluesAnticoneSizes part of GHOSTDAG data. +type BluesAnticoneSizes struct { + BlueHash *externalapi.DomainHash + AnticoneSize externalapi.KType +} + +// BlockGHOSTDAGDataHashPair is an appmessage representation of externalapi.BlockGHOSTDAGDataHashPair +type BlockGHOSTDAGDataHashPair struct { + Hash *externalapi.DomainHash + GHOSTDAGData *BlockGHOSTDAGData +} diff --git a/app/appmessage/p2p_msgblockwithtrusteddatav4.go b/app/appmessage/p2p_msgblockwithtrusteddatav4.go new file mode 100644 index 0000000..909dec6 --- /dev/null +++ b/app/appmessage/p2p_msgblockwithtrusteddatav4.go @@ -0,0 +1,20 @@ +package appmessage + +// MsgBlockWithTrustedDataV4 represents a spectre BlockWithTrustedDataV4 message +type MsgBlockWithTrustedDataV4 struct { + baseMessage + + Block *MsgBlock + DAAWindowIndices []uint64 + GHOSTDAGDataIndices []uint64 +} + +// Command returns the protocol command string for the message +func (msg *MsgBlockWithTrustedDataV4) Command() MessageCommand { + return CmdBlockWithTrustedDataV4 +} + +// NewMsgBlockWithTrustedDataV4 returns a new MsgBlockWithTrustedDataV4. +func NewMsgBlockWithTrustedDataV4() *MsgBlockWithTrustedDataV4 { + return &MsgBlockWithTrustedDataV4{} +} diff --git a/app/appmessage/p2p_msgdoneblockswithmetadata.go b/app/appmessage/p2p_msgdoneblockswithmetadata.go new file mode 100644 index 0000000..b7d2e50 --- /dev/null +++ b/app/appmessage/p2p_msgdoneblockswithmetadata.go @@ -0,0 +1,21 @@ +package appmessage + +// MsgDoneBlocksWithTrustedData implements the Message interface and represents a spectre +// DoneBlocksWithTrustedData message +// +// This message has no payload. +type MsgDoneBlocksWithTrustedData struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgDoneBlocksWithTrustedData) Command() MessageCommand { + return CmdDoneBlocksWithTrustedData +} + +// NewMsgDoneBlocksWithTrustedData returns a new spectre DoneBlocksWithTrustedData message that conforms to the +// Message interface. +func NewMsgDoneBlocksWithTrustedData() *MsgDoneBlocksWithTrustedData { + return &MsgDoneBlocksWithTrustedData{} +} diff --git a/app/appmessage/p2p_msgdoneheaders.go b/app/appmessage/p2p_msgdoneheaders.go new file mode 100644 index 0000000..f9d217e --- /dev/null +++ b/app/appmessage/p2p_msgdoneheaders.go @@ -0,0 +1,22 @@ +package appmessage + +// MsgDoneHeaders implements the Message interface and represents a spectre +// DoneHeaders message. It is used to notify the IBD syncing peer that the +// syncer sent all the requested headers. +// +// This message has no payload. +type MsgDoneHeaders struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgDoneHeaders) Command() MessageCommand { + return CmdDoneHeaders +} + +// NewMsgDoneHeaders returns a new spectre DoneIBDBlocks message that conforms to the +// Message interface. +func NewMsgDoneHeaders() *MsgDoneHeaders { + return &MsgDoneHeaders{} +} diff --git a/app/appmessage/p2p_msgdonepruningpointutxosetchunks.go b/app/appmessage/p2p_msgdonepruningpointutxosetchunks.go new file mode 100644 index 0000000..630fa79 --- /dev/null +++ b/app/appmessage/p2p_msgdonepruningpointutxosetchunks.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgDonePruningPointUTXOSetChunks represents a spectre DonePruningPointUTXOSetChunks message +type MsgDonePruningPointUTXOSetChunks struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgDonePruningPointUTXOSetChunks) Command() MessageCommand { + return CmdDonePruningPointUTXOSetChunks +} + +// NewMsgDonePruningPointUTXOSetChunks returns a new MsgDonePruningPointUTXOSetChunks. +func NewMsgDonePruningPointUTXOSetChunks() *MsgDonePruningPointUTXOSetChunks { + return &MsgDonePruningPointUTXOSetChunks{} +} diff --git a/app/appmessage/p2p_msgdrequestpruningpointanditsanticoneheaders.go b/app/appmessage/p2p_msgdrequestpruningpointanditsanticoneheaders.go new file mode 100644 index 0000000..cf226cb --- /dev/null +++ b/app/appmessage/p2p_msgdrequestpruningpointanditsanticoneheaders.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgRequestPruningPointAndItsAnticone represents a spectre RequestPruningPointAndItsAnticone message +type MsgRequestPruningPointAndItsAnticone struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgRequestPruningPointAndItsAnticone) Command() MessageCommand { + return CmdRequestPruningPointAndItsAnticone +} + +// NewMsgRequestPruningPointAndItsAnticone returns a new MsgRequestPruningPointAndItsAnticone. +func NewMsgRequestPruningPointAndItsAnticone() *MsgRequestPruningPointAndItsAnticone { + return &MsgRequestPruningPointAndItsAnticone{} +} diff --git a/app/appmessage/p2p_msgibdblock.go b/app/appmessage/p2p_msgibdblock.go new file mode 100644 index 0000000..3267eaf --- /dev/null +++ b/app/appmessage/p2p_msgibdblock.go @@ -0,0 +1,31 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +// MsgIBDBlock implements the Message interface and represents a spectre +// ibdblock message. It is used to deliver block and transaction information in +// response to a RequestIBDBlocks message (MsgRequestIBDBlocks). +type MsgIBDBlock struct { + baseMessage + *MsgBlock +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgIBDBlock) Command() MessageCommand { + return CmdIBDBlock +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgIBDBlock) MaxPayloadLength(pver uint32) uint32 { + return MaxMessagePayload +} + +// NewMsgIBDBlock returns a new spectre ibdblock message that conforms to the +// Message interface. See MsgIBDBlock for details. +func NewMsgIBDBlock(msgBlock *MsgBlock) *MsgIBDBlock { + return &MsgIBDBlock{MsgBlock: msgBlock} +} diff --git a/app/appmessage/p2p_msgibdblocklocator.go b/app/appmessage/p2p_msgibdblocklocator.go new file mode 100644 index 0000000..2a9750a --- /dev/null +++ b/app/appmessage/p2p_msgibdblocklocator.go @@ -0,0 +1,27 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgIBDBlockLocator represents a spectre ibdBlockLocator message +type MsgIBDBlockLocator struct { + baseMessage + TargetHash *externalapi.DomainHash + BlockLocatorHashes []*externalapi.DomainHash +} + +// Command returns the protocol command string for the message +func (msg *MsgIBDBlockLocator) Command() MessageCommand { + return CmdIBDBlockLocator +} + +// NewMsgIBDBlockLocator returns a new spectre ibdBlockLocator message +func NewMsgIBDBlockLocator(targetHash *externalapi.DomainHash, + blockLocatorHashes []*externalapi.DomainHash) *MsgIBDBlockLocator { + + return &MsgIBDBlockLocator{ + TargetHash: targetHash, + BlockLocatorHashes: blockLocatorHashes, + } +} diff --git a/app/appmessage/p2p_msgibdblocklocatorhighesthash.go b/app/appmessage/p2p_msgibdblocklocatorhighesthash.go new file mode 100644 index 0000000..4152fdb --- /dev/null +++ b/app/appmessage/p2p_msgibdblocklocatorhighesthash.go @@ -0,0 +1,23 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgIBDBlockLocatorHighestHash represents a spectre BlockLocatorHighestHash message +type MsgIBDBlockLocatorHighestHash struct { + baseMessage + HighestHash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message +func (msg *MsgIBDBlockLocatorHighestHash) Command() MessageCommand { + return CmdIBDBlockLocatorHighestHash +} + +// NewMsgIBDBlockLocatorHighestHash returns a new BlockLocatorHighestHash message +func NewMsgIBDBlockLocatorHighestHash(highestHash *externalapi.DomainHash) *MsgIBDBlockLocatorHighestHash { + return &MsgIBDBlockLocatorHighestHash{ + HighestHash: highestHash, + } +} diff --git a/app/appmessage/p2p_msgibdblocklocatorhighesthashnotfound.go b/app/appmessage/p2p_msgibdblocklocatorhighesthashnotfound.go new file mode 100644 index 0000000..34f4b77 --- /dev/null +++ b/app/appmessage/p2p_msgibdblocklocatorhighesthashnotfound.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgIBDBlockLocatorHighestHashNotFound represents a spectre BlockLocatorHighestHashNotFound message +type MsgIBDBlockLocatorHighestHashNotFound struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgIBDBlockLocatorHighestHashNotFound) Command() MessageCommand { + return CmdIBDBlockLocatorHighestHashNotFound +} + +// NewMsgIBDBlockLocatorHighestHashNotFound returns a new IBDBlockLocatorHighestHashNotFound message +func NewMsgIBDBlockLocatorHighestHashNotFound() *MsgIBDBlockLocatorHighestHashNotFound { + return &MsgIBDBlockLocatorHighestHashNotFound{} +} diff --git a/app/appmessage/p2p_msgibdchainblocklocator.go b/app/appmessage/p2p_msgibdchainblocklocator.go new file mode 100644 index 0000000..489a475 --- /dev/null +++ b/app/appmessage/p2p_msgibdchainblocklocator.go @@ -0,0 +1,27 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgIBDChainBlockLocator implements the Message interface and represents a spectre +// locator message. It is used to find the blockLocator of a peer that is +// syncing with you. +type MsgIBDChainBlockLocator struct { + baseMessage + BlockLocatorHashes []*externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgIBDChainBlockLocator) Command() MessageCommand { + return CmdIBDChainBlockLocator +} + +// NewMsgIBDChainBlockLocator returns a new spectre locator message that conforms to +// the Message interface. See MsgBlockLocator for details. +func NewMsgIBDChainBlockLocator(locatorHashes []*externalapi.DomainHash) *MsgIBDChainBlockLocator { + return &MsgIBDChainBlockLocator{ + BlockLocatorHashes: locatorHashes, + } +} diff --git a/app/appmessage/p2p_msginvrelayblock.go b/app/appmessage/p2p_msginvrelayblock.go new file mode 100644 index 0000000..6e51f06 --- /dev/null +++ b/app/appmessage/p2p_msginvrelayblock.go @@ -0,0 +1,27 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgInvRelayBlock implements the Message interface and represents a spectre +// block inventory message. It is used to notify the network about new block +// by sending their hash, and let the receiving node decide if it needs it. +type MsgInvRelayBlock struct { + baseMessage + Hash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgInvRelayBlock) Command() MessageCommand { + return CmdInvRelayBlock +} + +// NewMsgInvBlock returns a new spectre invrelblk message that conforms to +// the Message interface. See MsgInvRelayBlock for details. +func NewMsgInvBlock(hash *externalapi.DomainHash) *MsgInvRelayBlock { + return &MsgInvRelayBlock{ + Hash: hash, + } +} diff --git a/app/appmessage/p2p_msginvtransaction.go b/app/appmessage/p2p_msginvtransaction.go new file mode 100644 index 0000000..b5ebf78 --- /dev/null +++ b/app/appmessage/p2p_msginvtransaction.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MaxInvPerTxInvMsg is the maximum number of hashes that can +// be in a single CmdInvTransaction message. +const MaxInvPerTxInvMsg = MaxInvPerMsg + +// MsgInvTransaction implements the Message interface and represents a spectre +// TxInv message. It is used to notify the network about new transactions +// by sending their ID, and let the receiving node decide if it needs it. +type MsgInvTransaction struct { + baseMessage + TxIDs []*externalapi.DomainTransactionID +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgInvTransaction) Command() MessageCommand { + return CmdInvTransaction +} + +// NewMsgInvTransaction returns a new spectre TxInv message that conforms to +// the Message interface. See MsgInvTransaction for details. +func NewMsgInvTransaction(ids []*externalapi.DomainTransactionID) *MsgInvTransaction { + return &MsgInvTransaction{ + TxIDs: ids, + } +} diff --git a/app/appmessage/p2p_msgping.go b/app/appmessage/p2p_msgping.go new file mode 100644 index 0000000..9709770 --- /dev/null +++ b/app/appmessage/p2p_msgping.go @@ -0,0 +1,37 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +// MsgPing implements the Message interface and represents a spectre ping +// message. +// +// For versions BIP0031Version and earlier, it is used primarily to confirm +// that a connection is still valid. A transmission error is typically +// interpreted as a closed connection and that the peer should be removed. +// For versions AFTER BIP0031Version it contains an identifier which can be +// returned in the pong message to determine network timing. +// +// The payload for this message just consists of a nonce used for identifying +// it later. +type MsgPing struct { + baseMessage + // Unique value associated with message that is used to identify + // specific ping message. + Nonce uint64 +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgPing) Command() MessageCommand { + return CmdPing +} + +// NewMsgPing returns a new spectre ping message that conforms to the Message +// interface. See MsgPing for details. +func NewMsgPing(nonce uint64) *MsgPing { + return &MsgPing{ + Nonce: nonce, + } +} diff --git a/app/appmessage/p2p_msgping_test.go b/app/appmessage/p2p_msgping_test.go new file mode 100644 index 0000000..c3a8bd3 --- /dev/null +++ b/app/appmessage/p2p_msgping_test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "testing" +) + +// TestPing tests the MsgPing API against the latest protocol version. +func TestPing(t *testing.T) { + // Ensure we get the same nonce back out. + nonce := uint64(0x61c2c5535902862) + msg := NewMsgPing(nonce) + if msg.Nonce != nonce { + t.Errorf("NewMsgPing: wrong nonce - got %v, want %v", + msg.Nonce, nonce) + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(7) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgPing: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgpong.go b/app/appmessage/p2p_msgpong.go new file mode 100644 index 0000000..c267b4a --- /dev/null +++ b/app/appmessage/p2p_msgpong.go @@ -0,0 +1,31 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +// MsgPong implements the Message interface and represents a spectre pong +// message which is used primarily to confirm that a connection is still valid +// in response to a spectre ping message (MsgPing). +// +// This message was not added until protocol versions AFTER BIP0031Version. +type MsgPong struct { + baseMessage + // Unique value associated with message that is used to identify + // specific ping message. + Nonce uint64 +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgPong) Command() MessageCommand { + return CmdPong +} + +// NewMsgPong returns a new spectre pong message that conforms to the Message +// interface. See MsgPong for details. +func NewMsgPong(nonce uint64) *MsgPong { + return &MsgPong{ + Nonce: nonce, + } +} diff --git a/app/appmessage/p2p_msgpong_test.go b/app/appmessage/p2p_msgpong_test.go new file mode 100644 index 0000000..1fa0010 --- /dev/null +++ b/app/appmessage/p2p_msgpong_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "testing" +) + +// TestPongLatest tests the MsgPong API against the latest protocol version. +func TestPongLatest(t *testing.T) { + nonce := uint64(0x1a05b581a5182c) + msg := NewMsgPong(nonce) + if msg.Nonce != nonce { + t.Errorf("NewMsgPong: wrong nonce - got %v, want %v", + msg.Nonce, nonce) + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(8) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgPong: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgpruningpointproof.go b/app/appmessage/p2p_msgpruningpointproof.go new file mode 100644 index 0000000..4b61e8d --- /dev/null +++ b/app/appmessage/p2p_msgpruningpointproof.go @@ -0,0 +1,20 @@ +package appmessage + +// MsgPruningPointProof represents a spectre PruningPointProof message +type MsgPruningPointProof struct { + baseMessage + + Headers [][]*MsgBlockHeader +} + +// Command returns the protocol command string for the message +func (msg *MsgPruningPointProof) Command() MessageCommand { + return CmdPruningPointProof +} + +// NewMsgPruningPointProof returns a new MsgPruningPointProof. +func NewMsgPruningPointProof(headers [][]*MsgBlockHeader) *MsgPruningPointProof { + return &MsgPruningPointProof{ + Headers: headers, + } +} diff --git a/app/appmessage/p2p_msgpruningpoints.go b/app/appmessage/p2p_msgpruningpoints.go new file mode 100644 index 0000000..5dd67b2 --- /dev/null +++ b/app/appmessage/p2p_msgpruningpoints.go @@ -0,0 +1,20 @@ +package appmessage + +// MsgPruningPoints represents a spectre PruningPoints message +type MsgPruningPoints struct { + baseMessage + + Headers []*MsgBlockHeader +} + +// Command returns the protocol command string for the message +func (msg *MsgPruningPoints) Command() MessageCommand { + return CmdPruningPoints +} + +// NewMsgPruningPoints returns a new MsgPruningPoints. +func NewMsgPruningPoints(headers []*MsgBlockHeader) *MsgPruningPoints { + return &MsgPruningPoints{ + Headers: headers, + } +} diff --git a/app/appmessage/p2p_msgpruningpointutxosetchunk.go b/app/appmessage/p2p_msgpruningpointutxosetchunk.go new file mode 100644 index 0000000..4a57755 --- /dev/null +++ b/app/appmessage/p2p_msgpruningpointutxosetchunk.go @@ -0,0 +1,36 @@ +package appmessage + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// MsgPruningPointUTXOSetChunk represents a spectre PruningPointUTXOSetChunk message +type MsgPruningPointUTXOSetChunk struct { + baseMessage + OutpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair +} + +// Command returns the protocol command string for the message +func (msg *MsgPruningPointUTXOSetChunk) Command() MessageCommand { + return CmdPruningPointUTXOSetChunk +} + +// NewMsgPruningPointUTXOSetChunk returns a new MsgPruningPointUTXOSetChunk. +func NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) *MsgPruningPointUTXOSetChunk { + return &MsgPruningPointUTXOSetChunk{ + OutpointAndUTXOEntryPairs: outpointAndUTXOEntryPairs, + } +} + +// OutpointAndUTXOEntryPair is an outpoint along with its +// respective UTXO entry +type OutpointAndUTXOEntryPair struct { + Outpoint *Outpoint + UTXOEntry *UTXOEntry +} + +// UTXOEntry houses details about an individual transaction output in a UTXO +type UTXOEntry struct { + Amount uint64 + ScriptPublicKey *externalapi.ScriptPublicKey + BlockDAAScore uint64 + IsCoinbase bool +} diff --git a/app/appmessage/p2p_msgreject.go b/app/appmessage/p2p_msgreject.go new file mode 100644 index 0000000..3341c37 --- /dev/null +++ b/app/appmessage/p2p_msgreject.go @@ -0,0 +1,22 @@ +package appmessage + +// MsgReject implements the Message interface and represents a spectre +// Reject message. It is used to notify peers why they are banned. +type MsgReject struct { + baseMessage + Reason string +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgReject) Command() MessageCommand { + return CmdReject +} + +// NewMsgReject returns a new spectre Reject message that conforms to the +// Message interface. +func NewMsgReject(reason string) *MsgReject { + return &MsgReject{ + Reason: reason, + } +} diff --git a/app/appmessage/p2p_msgrequestaddresses.go b/app/appmessage/p2p_msgrequestaddresses.go new file mode 100644 index 0000000..cf8e446 --- /dev/null +++ b/app/appmessage/p2p_msgrequestaddresses.go @@ -0,0 +1,36 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestAddresses implements the Message interface and represents a spectre +// RequestAddresses message. It is used to request a list of known active peers on the +// network from a peer to help identify potential nodes. The list is returned +// via one or more addr messages (MsgAddresses). +// +// This message has no payload. +type MsgRequestAddresses struct { + baseMessage + IncludeAllSubnetworks bool + SubnetworkID *externalapi.DomainSubnetworkID +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestAddresses) Command() MessageCommand { + return CmdRequestAddresses +} + +// NewMsgRequestAddresses returns a new spectre RequestAddresses message that conforms to the +// Message interface. See MsgRequestAddresses for details. +func NewMsgRequestAddresses(includeAllSubnetworks bool, subnetworkID *externalapi.DomainSubnetworkID) *MsgRequestAddresses { + return &MsgRequestAddresses{ + IncludeAllSubnetworks: includeAllSubnetworks, + SubnetworkID: subnetworkID, + } +} diff --git a/app/appmessage/p2p_msgrequestaddresses_test.go b/app/appmessage/p2p_msgrequestaddresses_test.go new file mode 100644 index 0000000..f9dee9f --- /dev/null +++ b/app/appmessage/p2p_msgrequestaddresses_test.go @@ -0,0 +1,20 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "testing" +) + +// TestRequestAddresses tests the MsgRequestAddresses API. +func TestRequestAddresses(t *testing.T) { + // Ensure the command is expected value. + wantCmd := MessageCommand(2) + msg := NewMsgRequestAddresses(false, nil) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgRequestAddresses: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgrequestanticone.go b/app/appmessage/p2p_msgrequestanticone.go new file mode 100644 index 0000000..6513695 --- /dev/null +++ b/app/appmessage/p2p_msgrequestanticone.go @@ -0,0 +1,33 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestAnticone implements the Message interface and represents a spectre +// RequestHeaders message. It is used to request the set past(ContextHash) \cap anticone(BlockHash) +type MsgRequestAnticone struct { + baseMessage + BlockHash *externalapi.DomainHash + ContextHash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestAnticone) Command() MessageCommand { + return CmdRequestAnticone +} + +// NewMsgRequestAnticone returns a new spectre RequestPastDiff message that conforms to the +// Message interface using the passed parameters and defaults for the remaining +// fields. +func NewMsgRequestAnticone(blockHash, contextHash *externalapi.DomainHash) *MsgRequestAnticone { + return &MsgRequestAnticone{ + BlockHash: blockHash, + ContextHash: contextHash, + } +} diff --git a/app/appmessage/p2p_msgrequestblocklocator.go b/app/appmessage/p2p_msgrequestblocklocator.go new file mode 100644 index 0000000..3fbb272 --- /dev/null +++ b/app/appmessage/p2p_msgrequestblocklocator.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestBlockLocator implements the Message interface and represents a spectre +// RequestBlockLocator message. It is used to request a block locator between low +// and high hash. +// The locator is returned via a locator message (MsgBlockLocator). +type MsgRequestBlockLocator struct { + baseMessage + HighHash *externalapi.DomainHash + Limit uint32 +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestBlockLocator) Command() MessageCommand { + return CmdRequestBlockLocator +} + +// NewMsgRequestBlockLocator returns a new RequestBlockLocator message that conforms to the +// Message interface using the passed parameters and defaults for the remaining +// fields. +func NewMsgRequestBlockLocator(highHash *externalapi.DomainHash, limit uint32) *MsgRequestBlockLocator { + return &MsgRequestBlockLocator{ + HighHash: highHash, + Limit: limit, + } +} diff --git a/app/appmessage/p2p_msgrequestblocklocator_test.go b/app/appmessage/p2p_msgrequestblocklocator_test.go new file mode 100644 index 0000000..db7f1b6 --- /dev/null +++ b/app/appmessage/p2p_msgrequestblocklocator_test.go @@ -0,0 +1,24 @@ +package appmessage + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestRequestBlockLocator tests the MsgRequestBlockLocator API. +func TestRequestBlockLocator(t *testing.T) { + hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" + highHash, err := externalapi.NewDomainHashFromString(hashStr) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(9) + msg := NewMsgRequestBlockLocator(highHash, 0) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgRequestBlockLocator: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgrequestheaders.go b/app/appmessage/p2p_msgrequestheaders.go new file mode 100644 index 0000000..24f752b --- /dev/null +++ b/app/appmessage/p2p_msgrequestheaders.go @@ -0,0 +1,34 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestHeaders implements the Message interface and represents a spectre +// RequestHeaders message. It is used to request a list of blocks starting after the +// low hash and until the high hash. +type MsgRequestHeaders struct { + baseMessage + LowHash *externalapi.DomainHash + HighHash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestHeaders) Command() MessageCommand { + return CmdRequestHeaders +} + +// NewMsgRequstHeaders returns a new spectre RequestHeaders message that conforms to the +// Message interface using the passed parameters and defaults for the remaining +// fields. +func NewMsgRequstHeaders(lowHash, highHash *externalapi.DomainHash) *MsgRequestHeaders { + return &MsgRequestHeaders{ + LowHash: lowHash, + HighHash: highHash, + } +} diff --git a/app/appmessage/p2p_msgrequestheaders_test.go b/app/appmessage/p2p_msgrequestheaders_test.go new file mode 100644 index 0000000..c71488e --- /dev/null +++ b/app/appmessage/p2p_msgrequestheaders_test.go @@ -0,0 +1,40 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestRequstIBDBlocks tests the MsgRequestIBDBlocks API. +func TestRequstIBDBlocks(t *testing.T) { + hashStr := "000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0" + lowHash, err := externalapi.NewDomainHashFromString(hashStr) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + } + + hashStr = "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" + highHash, err := externalapi.NewDomainHashFromString(hashStr) + if err != nil { + t.Errorf("NewHashFromStr: %v", err) + } + + // Ensure we get the same data back out. + msg := NewMsgRequstHeaders(lowHash, highHash) + if !msg.HighHash.Equal(highHash) { + t.Errorf("NewMsgRequstIBDBlocks: wrong high hash - got %v, want %v", + msg.HighHash, highHash) + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(4) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgRequstIBDBlocks: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgrequestibdblocks.go b/app/appmessage/p2p_msgrequestibdblocks.go new file mode 100644 index 0000000..a370d10 --- /dev/null +++ b/app/appmessage/p2p_msgrequestibdblocks.go @@ -0,0 +1,26 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestIBDBlocks implements the Message interface and represents a spectre +// RequestIBDBlocks message. It is used to request blocks as part of the IBD +// protocol. +type MsgRequestIBDBlocks struct { + baseMessage + Hashes []*externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestIBDBlocks) Command() MessageCommand { + return CmdRequestIBDBlocks +} + +// NewMsgRequestIBDBlocks returns a new MsgRequestIBDBlocks. +func NewMsgRequestIBDBlocks(hashes []*externalapi.DomainHash) *MsgRequestIBDBlocks { + return &MsgRequestIBDBlocks{ + Hashes: hashes, + } +} diff --git a/app/appmessage/p2p_msgrequestibdchainblocklocator.go b/app/appmessage/p2p_msgrequestibdchainblocklocator.go new file mode 100644 index 0000000..e364b57 --- /dev/null +++ b/app/appmessage/p2p_msgrequestibdchainblocklocator.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestIBDChainBlockLocator implements the Message interface and represents a spectre +// IBDRequestChainBlockLocator message. It is used to request a block locator between low +// and high hash. +// The locator is returned via a locator message (MsgIBDChainBlockLocator). +type MsgRequestIBDChainBlockLocator struct { + baseMessage + HighHash *externalapi.DomainHash + LowHash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestIBDChainBlockLocator) Command() MessageCommand { + return CmdRequestIBDChainBlockLocator +} + +// NewMsgIBDRequestChainBlockLocator returns a new IBDRequestChainBlockLocator message that conforms to the +// Message interface using the passed parameters and defaults for the remaining +// fields. +func NewMsgIBDRequestChainBlockLocator(highHash, lowHash *externalapi.DomainHash) *MsgRequestIBDChainBlockLocator { + return &MsgRequestIBDChainBlockLocator{ + HighHash: highHash, + LowHash: lowHash, + } +} diff --git a/app/appmessage/p2p_msgrequestnextheaders.go b/app/appmessage/p2p_msgrequestnextheaders.go new file mode 100644 index 0000000..5dfa4c9 --- /dev/null +++ b/app/appmessage/p2p_msgrequestnextheaders.go @@ -0,0 +1,22 @@ +package appmessage + +// MsgRequestNextHeaders implements the Message interface and represents a spectre +// RequestNextHeaders message. It is used to notify the IBD syncer peer to send +// more headers. +// +// This message has no payload. +type MsgRequestNextHeaders struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestNextHeaders) Command() MessageCommand { + return CmdRequestNextHeaders +} + +// NewMsgRequestNextHeaders returns a new spectre RequestNextHeaders message that conforms to the +// Message interface. +func NewMsgRequestNextHeaders() *MsgRequestNextHeaders { + return &MsgRequestNextHeaders{} +} diff --git a/app/appmessage/p2p_msgrequestnextpruningpointanditsanticoneblocks.go b/app/appmessage/p2p_msgrequestnextpruningpointanditsanticoneblocks.go new file mode 100644 index 0000000..08a2afb --- /dev/null +++ b/app/appmessage/p2p_msgrequestnextpruningpointanditsanticoneblocks.go @@ -0,0 +1,22 @@ +package appmessage + +// MsgRequestNextPruningPointAndItsAnticoneBlocks implements the Message interface and represents a spectre +// RequestNextPruningPointAndItsAnticoneBlocks message. It is used to notify the IBD syncer peer to send +// more blocks from the pruning anticone. +// +// This message has no payload. +type MsgRequestNextPruningPointAndItsAnticoneBlocks struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestNextPruningPointAndItsAnticoneBlocks) Command() MessageCommand { + return CmdRequestNextPruningPointAndItsAnticoneBlocks +} + +// NewMsgRequestNextPruningPointAndItsAnticoneBlocks returns a new spectre RequestNextPruningPointAndItsAnticoneBlocks message that conforms to the +// Message interface. +func NewMsgRequestNextPruningPointAndItsAnticoneBlocks() *MsgRequestNextPruningPointAndItsAnticoneBlocks { + return &MsgRequestNextPruningPointAndItsAnticoneBlocks{} +} diff --git a/app/appmessage/p2p_msgrequestnextpruningpointutxosetchunk.go b/app/appmessage/p2p_msgrequestnextpruningpointutxosetchunk.go new file mode 100644 index 0000000..ccd8a44 --- /dev/null +++ b/app/appmessage/p2p_msgrequestnextpruningpointutxosetchunk.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgRequestNextPruningPointUTXOSetChunk represents a spectre RequestNextPruningPointUTXOSetChunk message +type MsgRequestNextPruningPointUTXOSetChunk struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgRequestNextPruningPointUTXOSetChunk) Command() MessageCommand { + return CmdRequestNextPruningPointUTXOSetChunk +} + +// NewMsgRequestNextPruningPointUTXOSetChunk returns a new MsgRequestNextPruningPointUTXOSetChunk. +func NewMsgRequestNextPruningPointUTXOSetChunk() *MsgRequestNextPruningPointUTXOSetChunk { + return &MsgRequestNextPruningPointUTXOSetChunk{} +} diff --git a/app/appmessage/p2p_msgrequestpruningpointproof.go b/app/appmessage/p2p_msgrequestpruningpointproof.go new file mode 100644 index 0000000..47bd636 --- /dev/null +++ b/app/appmessage/p2p_msgrequestpruningpointproof.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgRequestPruningPointProof represents a spectre RequestPruningPointProof message +type MsgRequestPruningPointProof struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgRequestPruningPointProof) Command() MessageCommand { + return CmdRequestPruningPointProof +} + +// NewMsgRequestPruningPointProof returns a new MsgRequestPruningPointProof. +func NewMsgRequestPruningPointProof() *MsgRequestPruningPointProof { + return &MsgRequestPruningPointProof{} +} diff --git a/app/appmessage/p2p_msgrequestpruningpointutxosetandblock.go b/app/appmessage/p2p_msgrequestpruningpointutxosetandblock.go new file mode 100644 index 0000000..531b2f7 --- /dev/null +++ b/app/appmessage/p2p_msgrequestpruningpointutxosetandblock.go @@ -0,0 +1,23 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgRequestPruningPointUTXOSet represents a spectre RequestPruningPointUTXOSet message +type MsgRequestPruningPointUTXOSet struct { + baseMessage + PruningPointHash *externalapi.DomainHash +} + +// Command returns the protocol command string for the message +func (msg *MsgRequestPruningPointUTXOSet) Command() MessageCommand { + return CmdRequestPruningPointUTXOSet +} + +// NewMsgRequestPruningPointUTXOSet returns a new MsgRequestPruningPointUTXOSet +func NewMsgRequestPruningPointUTXOSet(pruningPointHash *externalapi.DomainHash) *MsgRequestPruningPointUTXOSet { + return &MsgRequestPruningPointUTXOSet{ + PruningPointHash: pruningPointHash, + } +} diff --git a/app/appmessage/p2p_msgrequestrelayblocks.go b/app/appmessage/p2p_msgrequestrelayblocks.go new file mode 100644 index 0000000..23d4c41 --- /dev/null +++ b/app/appmessage/p2p_msgrequestrelayblocks.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MaxRequestRelayBlocksHashes is the maximum number of hashes that can +// be in a single RequestRelayBlocks message. +const MaxRequestRelayBlocksHashes = MaxInvPerMsg + +// MsgRequestRelayBlocks implements the Message interface and represents a spectre +// RequestRelayBlocks message. It is used to request blocks as part of the block +// relay protocol. +type MsgRequestRelayBlocks struct { + baseMessage + Hashes []*externalapi.DomainHash +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestRelayBlocks) Command() MessageCommand { + return CmdRequestRelayBlocks +} + +// NewMsgRequestRelayBlocks returns a new spectre RequestRelayBlocks message that conforms to +// the Message interface. See MsgRequestRelayBlocks for details. +func NewMsgRequestRelayBlocks(hashes []*externalapi.DomainHash) *MsgRequestRelayBlocks { + return &MsgRequestRelayBlocks{ + Hashes: hashes, + } +} diff --git a/app/appmessage/p2p_msgrequesttransactions.go b/app/appmessage/p2p_msgrequesttransactions.go new file mode 100644 index 0000000..f167aa5 --- /dev/null +++ b/app/appmessage/p2p_msgrequesttransactions.go @@ -0,0 +1,31 @@ +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MaxInvPerRequestTransactionsMsg is the maximum number of hashes that can +// be in a single CmdInvTransaction message. +const MaxInvPerRequestTransactionsMsg = MaxInvPerMsg + +// MsgRequestTransactions implements the Message interface and represents a spectre +// RequestTransactions message. It is used to request transactions as part of the +// transactions relay protocol. +type MsgRequestTransactions struct { + baseMessage + IDs []*externalapi.DomainTransactionID +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgRequestTransactions) Command() MessageCommand { + return CmdRequestTransactions +} + +// NewMsgRequestTransactions returns a new spectre RequestTransactions message that conforms to +// the Message interface. See MsgRequestTransactions for details. +func NewMsgRequestTransactions(ids []*externalapi.DomainTransactionID) *MsgRequestTransactions { + return &MsgRequestTransactions{ + IDs: ids, + } +} diff --git a/app/appmessage/p2p_msgtransactionnotfound.go b/app/appmessage/p2p_msgtransactionnotfound.go new file mode 100644 index 0000000..14880e2 --- /dev/null +++ b/app/appmessage/p2p_msgtransactionnotfound.go @@ -0,0 +1,30 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MsgTransactionNotFound defines a spectre TransactionNotFound message which is sent in response to +// a RequestTransactions message if any of the requested data in not available on the peer. +type MsgTransactionNotFound struct { + baseMessage + ID *externalapi.DomainTransactionID +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgTransactionNotFound) Command() MessageCommand { + return CmdTransactionNotFound +} + +// NewMsgTransactionNotFound returns a new spectre transactionsnotfound message that conforms to the +// Message interface. See MsgTransactionNotFound for details. +func NewMsgTransactionNotFound(id *externalapi.DomainTransactionID) *MsgTransactionNotFound { + return &MsgTransactionNotFound{ + ID: id, + } +} diff --git a/app/appmessage/p2p_msgtrusteddata.go b/app/appmessage/p2p_msgtrusteddata.go new file mode 100644 index 0000000..f03df73 --- /dev/null +++ b/app/appmessage/p2p_msgtrusteddata.go @@ -0,0 +1,25 @@ +package appmessage + +// MsgTrustedData represents a spectre TrustedData message +type MsgTrustedData struct { + baseMessage + + DAAWindow []*TrustedDataDAAHeader + GHOSTDAGData []*BlockGHOSTDAGDataHashPair +} + +// Command returns the protocol command string for the message +func (msg *MsgTrustedData) Command() MessageCommand { + return CmdTrustedData +} + +// NewMsgTrustedData returns a new MsgTrustedData. +func NewMsgTrustedData() *MsgTrustedData { + return &MsgTrustedData{} +} + +// TrustedDataDAAHeader is an appmessage representation of externalapi.TrustedDataDataDAAHeader +type TrustedDataDAAHeader struct { + Header *MsgBlockHeader + GHOSTDAGData *BlockGHOSTDAGData +} diff --git a/app/appmessage/p2p_msgtx.go b/app/appmessage/p2p_msgtx.go new file mode 100644 index 0000000..7d0a8fe --- /dev/null +++ b/app/appmessage/p2p_msgtx.go @@ -0,0 +1,319 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "encoding/binary" + "strconv" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +const ( + // MaxPrevOutIndex is the maximum index the index field of a previous + // outpoint can be. + MaxPrevOutIndex uint32 = 0xffffffff + + // defaultTxInOutAlloc is the default size used for the backing array for + // transaction inputs and outputs. The array will dynamically grow as needed, + // but this figure is intended to provide enough space for the number of + // inputs and outputs in a typical transaction without needing to grow the + // backing array multiple times. + defaultTxInOutAlloc = 15 + + // minTxInPayload is the minimum payload size for a transaction input. + // PreviousOutpoint.TxID + PreviousOutpoint.Index 4 bytes + Varint for + // SignatureScript length 1 byte + Sequence 4 bytes. + minTxInPayload = 9 + externalapi.DomainHashSize + + // maxTxInPerMessage is the maximum number of transactions inputs that + // a transaction which fits into a message could possibly have. + maxTxInPerMessage = (MaxMessagePayload / minTxInPayload) + 1 + + // MinTxOutPayload is the minimum payload size for a transaction output. + // Value 8 bytes + version 2 bytes + Varint for ScriptPublicKey length 1 byte. + MinTxOutPayload = 11 + + // maxTxOutPerMessage is the maximum number of transactions outputs that + // a transaction which fits into a message could possibly have. + maxTxOutPerMessage = (MaxMessagePayload / MinTxOutPayload) + 1 + + // minTxPayload is the minimum payload size for a transaction. Note + // that any realistically usable transaction must have at least one + // input or output, but that is a rule enforced at a higher layer, so + // it is intentionally not included here. + // Version 4 bytes + Varint number of transaction inputs 1 byte + Varint + // number of transaction outputs 1 byte + LockTime 4 bytes + min input + // payload + min output payload. + minTxPayload = 10 +) + +// Outpoint defines a spectre data type that is used to track previous +// transaction outputs. +type Outpoint struct { + TxID externalapi.DomainTransactionID + Index uint32 +} + +// NewOutpoint returns a new spectre transaction outpoint point with the +// provided hash and index. +func NewOutpoint(txID *externalapi.DomainTransactionID, index uint32) *Outpoint { + return &Outpoint{ + TxID: *txID, + Index: index, + } +} + +// String returns the Outpoint in the human-readable form "txID:index". +func (o Outpoint) String() string { + // Allocate enough for ID string, colon, and 10 digits. Although + // at the time of writing, the number of digits can be no greater than + // the length of the decimal representation of maxTxOutPerMessage, the + // maximum message payload may increase in the future and this + // optimization may go unnoticed, so allocate space for 10 decimal + // digits, which will fit any uint32. + buf := make([]byte, 2*externalapi.DomainHashSize+1, 2*externalapi.DomainHashSize+1+10) + copy(buf, o.TxID.String()) + buf[2*externalapi.DomainHashSize] = ':' + buf = strconv.AppendUint(buf, uint64(o.Index), 10) + return string(buf) +} + +// TxIn defines a spectre transaction input. +type TxIn struct { + PreviousOutpoint Outpoint + SignatureScript []byte + Sequence uint64 + SigOpCount byte +} + +// NewTxIn returns a new spectre transaction input with the provided +// previous outpoint point and signature script with a default sequence of +// MaxTxInSequenceNum. +func NewTxIn(prevOut *Outpoint, signatureScript []byte, sequence uint64, sigOpCount byte) *TxIn { + return &TxIn{ + PreviousOutpoint: *prevOut, + SignatureScript: signatureScript, + Sequence: sequence, + SigOpCount: sigOpCount, + } +} + +// TxOut defines a spectre transaction output. +type TxOut struct { + Value uint64 + ScriptPubKey *externalapi.ScriptPublicKey +} + +// NewTxOut returns a new spectre transaction output with the provided +// transaction value and public key script. +func NewTxOut(value uint64, scriptPubKey *externalapi.ScriptPublicKey) *TxOut { + return &TxOut{ + Value: value, + ScriptPubKey: scriptPubKey, + } +} + +// MsgTx implements the Message interface and represents a spectre tx message. +// It is used to deliver transaction information in response to a getdata +// message (MsgGetData) for a given transaction. +// +// Use the AddTxIn and AddTxOut functions to build up the list of transaction +// inputs and outputs. +type MsgTx struct { + baseMessage + Version uint16 + TxIn []*TxIn + TxOut []*TxOut + LockTime uint64 + SubnetworkID externalapi.DomainSubnetworkID + Gas uint64 + Payload []byte +} + +// AddTxIn adds a transaction input to the message. +func (msg *MsgTx) AddTxIn(ti *TxIn) { + msg.TxIn = append(msg.TxIn, ti) +} + +// AddTxOut adds a transaction output to the message. +func (msg *MsgTx) AddTxOut(to *TxOut) { + msg.TxOut = append(msg.TxOut, to) +} + +// IsCoinBase determines whether or not a transaction is a coinbase transaction. A coinbase +// transaction is a special transaction created by miners that distributes fees and block subsidy +// to the previous blocks' miners, and to specify the scriptPubKey that will be used to pay the current +// miner in future blocks. Each input of the coinbase transaction should set index to maximum +// value and reference the relevant block id, instead of previous transaction id. +func (msg *MsgTx) IsCoinBase() bool { + // A coinbase transaction must have subnetwork id SubnetworkIDCoinbase + return msg.SubnetworkID == subnetworks.SubnetworkIDCoinbase +} + +// TxHash generates the Hash for the transaction. +func (msg *MsgTx) TxHash() *externalapi.DomainHash { + return consensushashing.TransactionHash(MsgTxToDomainTransaction(msg)) +} + +// TxID generates the Hash for the transaction without the signature script, gas and payload fields. +func (msg *MsgTx) TxID() *externalapi.DomainTransactionID { + return consensushashing.TransactionID(MsgTxToDomainTransaction(msg)) +} + +// Copy creates a deep copy of a transaction so that the original does not get +// modified when the copy is manipulated. +func (msg *MsgTx) Copy() *MsgTx { + // Create new tx and start by copying primitive values and making space + // for the transaction inputs and outputs. + newTx := MsgTx{ + Version: msg.Version, + TxIn: make([]*TxIn, 0, len(msg.TxIn)), + TxOut: make([]*TxOut, 0, len(msg.TxOut)), + LockTime: msg.LockTime, + SubnetworkID: msg.SubnetworkID, + Gas: msg.Gas, + } + + if msg.Payload != nil { + newTx.Payload = make([]byte, len(msg.Payload)) + copy(newTx.Payload, msg.Payload) + } + + // Deep copy the old TxIn data. + for _, oldTxIn := range msg.TxIn { + // Deep copy the old previous outpoint. + oldOutpoint := oldTxIn.PreviousOutpoint + newOutpoint := Outpoint{} + newOutpoint.TxID = oldOutpoint.TxID + newOutpoint.Index = oldOutpoint.Index + + // Deep copy the old signature script. + var newScript []byte + oldScript := oldTxIn.SignatureScript + oldScriptLen := len(oldScript) + if oldScriptLen > 0 { + newScript = make([]byte, oldScriptLen) + copy(newScript, oldScript[:oldScriptLen]) + } + + // Create new txIn with the deep copied data. + newTxIn := TxIn{ + PreviousOutpoint: newOutpoint, + SignatureScript: newScript, + Sequence: oldTxIn.Sequence, + SigOpCount: oldTxIn.SigOpCount, + } + + // Finally, append this fully copied txin. + newTx.TxIn = append(newTx.TxIn, &newTxIn) + } + + // Deep copy the old TxOut data. + for _, oldTxOut := range msg.TxOut { + // Deep copy the old ScriptPublicKey + var newScript externalapi.ScriptPublicKey + oldScript := oldTxOut.ScriptPubKey + oldScriptLen := len(oldScript.Script) + if oldScriptLen > 0 { + newScript = externalapi.ScriptPublicKey{Script: make([]byte, oldScriptLen), Version: oldScript.Version} + copy(newScript.Script, oldScript.Script[:oldScriptLen]) + } + + // Create new txOut with the deep copied data and append it to + // new Tx. + newTxOut := TxOut{ + Value: oldTxOut.Value, + ScriptPubKey: &newScript, + } + newTx.TxOut = append(newTx.TxOut, &newTxOut) + } + + return &newTx +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgTx) Command() MessageCommand { + return CmdTx +} + +// MaxPayloadLength returns the maximum length the payload can be for the +// receiver. This is part of the Message interface implementation. +func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 { + return MaxMessagePayload +} + +// IsSubnetworkCompatible return true iff subnetworkID is one or more of the following: +// 1. The SupportsAll subnetwork (full node) +// 2. The native subnetwork +// 3. The transaction's subnetwork +func (msg *MsgTx) IsSubnetworkCompatible(subnetworkID *externalapi.DomainSubnetworkID) bool { + return subnetworkID == nil || + subnetworkID.Equal(&subnetworks.SubnetworkIDNative) || + subnetworkID.Equal(&msg.SubnetworkID) +} + +// newMsgTx returns a new tx message that conforms to the Message interface. +// +// All fields except version and gas has default values if nil is passed: +// txIn, txOut - empty arrays +// payload - an empty payload +// +// The payload hash is calculated automatically according to provided payload. +// Also, the lock time is set to zero to indicate the transaction is valid +// immediately as opposed to some time in future. +func newMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID, + gas uint64, payload []byte, lockTime uint64) *MsgTx { + + if txIn == nil { + txIn = make([]*TxIn, 0, defaultTxInOutAlloc) + } + + if txOut == nil { + txOut = make([]*TxOut, 0, defaultTxInOutAlloc) + } + + return &MsgTx{ + Version: version, + TxIn: txIn, + TxOut: txOut, + SubnetworkID: *subnetworkID, + Gas: gas, + Payload: payload, + LockTime: lockTime, + } +} + +// NewNativeMsgTx returns a new tx message in the native subnetwork +func NewNativeMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut) *MsgTx { + return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, 0) +} + +// NewSubnetworkMsgTx returns a new tx message in the specified subnetwork with specified gas and payload +func NewSubnetworkMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, subnetworkID *externalapi.DomainSubnetworkID, + gas uint64, payload []byte) *MsgTx { + + return newMsgTx(version, txIn, txOut, subnetworkID, gas, payload, 0) +} + +// NewNativeMsgTxWithLocktime returns a new tx message in the native subnetwork with a locktime. +// +// See newMsgTx for further documntation of the parameters +func NewNativeMsgTxWithLocktime(version uint16, txIn []*TxIn, txOut []*TxOut, locktime uint64) *MsgTx { + return newMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDNative, 0, nil, locktime) +} + +// NewRegistryMsgTx creates a new MsgTx that registers a new subnetwork +func NewRegistryMsgTx(version uint16, txIn []*TxIn, txOut []*TxOut, gasLimit uint64) *MsgTx { + payload := make([]byte, 8) + binary.LittleEndian.PutUint64(payload, gasLimit) + + return NewSubnetworkMsgTx(version, txIn, txOut, &subnetworks.SubnetworkIDRegistry, 0, payload) +} diff --git a/app/appmessage/p2p_msgtx_test.go b/app/appmessage/p2p_msgtx_test.go new file mode 100644 index 0000000..bfa3cbf --- /dev/null +++ b/app/appmessage/p2p_msgtx_test.go @@ -0,0 +1,261 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "bytes" + "fmt" + "math" + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + + "github.com/davecgh/go-spew/spew" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestTx tests the MsgTx API. +func TestTx(t *testing.T) { + pver := uint32(4) + + txIDStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" + txID, err := transactionid.FromString(txIDStr) + if err != nil { + t.Errorf("NewTxIDFromStr: %v", err) + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(6) + msg := NewNativeMsgTx(1, nil, nil) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgAddresses: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure max payload is expected value for latest protocol version. + wantPayload := uint32(1024 * 1024 * 32) + maxPayload := msg.MaxPayloadLength(pver) + if maxPayload != wantPayload { + t.Errorf("MaxPayloadLength: wrong max payload length for "+ + "protocol version %d - got %v, want %v", pver, + maxPayload, wantPayload) + } + + // Ensure we get the same transaction outpoint data back out. + // NOTE: This is a block hash and made up index, but we're only + // testing package functionality. + prevOutIndex := uint32(1) + prevOut := NewOutpoint(txID, prevOutIndex) + if !prevOut.TxID.Equal(txID) { + t.Errorf("NewOutpoint: wrong ID - got %v, want %v", + spew.Sprint(&prevOut.TxID), spew.Sprint(txID)) + } + if prevOut.Index != prevOutIndex { + t.Errorf("NewOutpoint: wrong index - got %v, want %v", + prevOut.Index, prevOutIndex) + } + prevOutStr := fmt.Sprintf("%s:%d", txID.String(), prevOutIndex) + if s := prevOut.String(); s != prevOutStr { + t.Errorf("Outpoint.String: unexpected result - got %v, "+ + "want %v", s, prevOutStr) + } + + // Ensure we get the same transaction input back out. + sigScript := []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62} + txIn := NewTxIn(prevOut, sigScript, constants.MaxTxInSequenceNum, 1) + if !reflect.DeepEqual(&txIn.PreviousOutpoint, prevOut) { + t.Errorf("NewTxIn: wrong prev outpoint - got %v, want %v", + spew.Sprint(&txIn.PreviousOutpoint), + spew.Sprint(prevOut)) + } + if !bytes.Equal(txIn.SignatureScript, sigScript) { + t.Errorf("NewTxIn: wrong signature script - got %v, want %v", + spew.Sdump(txIn.SignatureScript), + spew.Sdump(sigScript)) + } + + // Ensure we get the same transaction output back out. + txValue := uint64(5000000000) + scriptPubKey := &externalapi.ScriptPublicKey{ + Script: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + Version: 0} + txOut := NewTxOut(txValue, scriptPubKey) + if txOut.Value != txValue { + t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v", + txOut.Value, txValue) + + } + if !bytes.Equal(txOut.ScriptPubKey.Script, scriptPubKey.Script) { + t.Errorf("NewTxOut: wrong scriptPubKey - got %v, want %v", + spew.Sdump(txOut.ScriptPubKey), + spew.Sdump(scriptPubKey)) + } + + // Ensure transaction inputs are added properly. + msg.AddTxIn(txIn) + if !reflect.DeepEqual(msg.TxIn[0], txIn) { + t.Errorf("AddTxIn: wrong transaction input added - got %v, want %v", + spew.Sprint(msg.TxIn[0]), spew.Sprint(txIn)) + } + + // Ensure transaction outputs are added properly. + msg.AddTxOut(txOut) + if !reflect.DeepEqual(msg.TxOut[0], txOut) { + t.Errorf("AddTxIn: wrong transaction output added - got %v, want %v", + spew.Sprint(msg.TxOut[0]), spew.Sprint(txOut)) + } + + // Ensure the copy produced an identical transaction message. + newMsg := msg.Copy() + if !reflect.DeepEqual(newMsg, msg) { + t.Errorf("Copy: mismatched tx messages - got %v, want %v", + spew.Sdump(newMsg), spew.Sdump(msg)) + } +} + +// TestTxHash tests the ability to generate the hash of a transaction accurately. +func TestTxHashAndID(t *testing.T) { + txHash1Str := "b06f8b650115b5cf4d59499e10764a9312742930cb43c9b4ff6495d76f332ed7" + txID1Str := "e20225c3d065ee41743607ee627db44d01ef396dc9779b05b2caf55bac50e12d" + wantTxID1, err := transactionid.FromString(txID1Str) + if err != nil { + t.Fatalf("NewTxIDFromStr: %v", err) + } + wantTxHash1, err := transactionid.FromString(txHash1Str) + if err != nil { + t.Fatalf("NewTxIDFromStr: %v", err) + } + + // A coinbase transaction + txIn := &TxIn{ + PreviousOutpoint: Outpoint{ + TxID: externalapi.DomainTransactionID{}, + Index: math.MaxUint32, + }, + SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, + Sequence: math.MaxUint64, + } + txOut := &TxOut{ + Value: 5000000000, + ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, Version: 0}, + } + tx1 := NewSubnetworkMsgTx(0, []*TxIn{txIn}, []*TxOut{txOut}, &subnetworks.SubnetworkIDCoinbase, 0, nil) + + // Ensure the hash produced is expected. + tx1Hash := tx1.TxHash() + if *tx1Hash != (externalapi.DomainHash)(*wantTxHash1) { + t.Errorf("TxHash: wrong hash - got %v, want %v", + spew.Sprint(tx1Hash), spew.Sprint(wantTxHash1)) + } + + // Ensure the TxID for coinbase transaction is the same as TxHash. + tx1ID := tx1.TxID() + if !tx1ID.Equal(wantTxID1) { + t.Errorf("TxID: wrong ID - got %v, want %v", + spew.Sprint(tx1ID), spew.Sprint(wantTxID1)) + } + + hash2Str := "fa16a8ce88d52ca1ff45187bbba0d33044e9f5fe309e8d0b22d4812dcf1782b7" + wantHash2, err := externalapi.NewDomainHashFromString(hash2Str) + if err != nil { + t.Errorf("NewTxIDFromStr: %v", err) + return + } + + id2Str := "89ffb49474637502d9059af38b8a95fc2f0d3baef5c801d7a9b9c8830671b711" + wantID2, err := transactionid.FromString(id2Str) + if err != nil { + t.Errorf("NewTxIDFromStr: %v", err) + return + } + payload := []byte{1, 2, 3} + txIns := []*TxIn{{ + PreviousOutpoint: Outpoint{ + Index: 0, + TxID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{1, 2, 3}), + }, + SignatureScript: []byte{ + 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xDA, 0x0D, 0xC6, 0xAE, 0xCE, 0xFE, 0x1E, 0x06, 0xEF, 0xDF, + 0x05, 0x77, 0x37, 0x57, 0xDE, 0xB1, 0x68, 0x82, 0x09, 0x30, 0xE3, 0xB0, 0xD0, 0x3F, 0x46, 0xF5, + 0xFC, 0xF1, 0x50, 0xBF, 0x99, 0x0C, 0x02, 0x21, 0x00, 0xD2, 0x5B, 0x5C, 0x87, 0x04, 0x00, 0x76, + 0xE4, 0xF2, 0x53, 0xF8, 0x26, 0x2E, 0x76, 0x3E, 0x2D, 0xD5, 0x1E, 0x7F, 0xF0, 0xBE, 0x15, 0x77, + 0x27, 0xC4, 0xBC, 0x42, 0x80, 0x7F, 0x17, 0xBD, 0x39, 0x01, 0x41, 0x04, 0xE6, 0xC2, 0x6E, 0xF6, + 0x7D, 0xC6, 0x10, 0xD2, 0xCD, 0x19, 0x24, 0x84, 0x78, 0x9A, 0x6C, 0xF9, 0xAE, 0xA9, 0x93, 0x0B, + 0x94, 0x4B, 0x7E, 0x2D, 0xB5, 0x34, 0x2B, 0x9D, 0x9E, 0x5B, 0x9F, 0xF7, 0x9A, 0xFF, 0x9A, 0x2E, + 0xE1, 0x97, 0x8D, 0xD7, 0xFD, 0x01, 0xDF, 0xC5, 0x22, 0xEE, 0x02, 0x28, 0x3D, 0x3B, 0x06, 0xA9, + 0xD0, 0x3A, 0xCF, 0x80, 0x96, 0x96, 0x8D, 0x7D, 0xBB, 0x0F, 0x91, 0x78, + }, + Sequence: math.MaxUint64, + }} + txOuts := []*TxOut{ + { + Value: 244623243, + ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, 0xA9, 0x14, 0xBA, 0xDE, 0xEC, 0xFD, 0xEF, 0x05, 0x07, 0x24, 0x7F, 0xC8, 0xF7, 0x42, 0x41, + 0xD7, 0x3B, 0xC0, 0x39, 0x97, 0x2D, 0x7B, 0x88, 0xAC, + }, Version: 0}, + }, + { + Value: 44602432, + ScriptPubKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, 0xA9, 0x14, 0xC1, 0x09, 0x32, 0x48, 0x3F, 0xEC, 0x93, 0xED, 0x51, 0xF5, 0xFE, 0x95, 0xE7, + 0x25, 0x59, 0xF2, 0xCC, 0x70, 0x43, 0xF9, 0x88, 0xAC, + }, Version: 0}, + }, + } + tx2 := NewSubnetworkMsgTx(1, txIns, txOuts, &externalapi.DomainSubnetworkID{1, 2, 3}, 0, payload) + + // Ensure the hash produced is expected. + tx2Hash := tx2.TxHash() + if !tx2Hash.Equal(wantHash2) { + t.Errorf("TxHash: wrong hash - got %v, want %v", + spew.Sprint(tx2Hash), spew.Sprint(wantHash2)) + } + + // Ensure the TxID for coinbase transaction is the same as TxHash. + tx2ID := tx2.TxID() + if !tx2ID.Equal(wantID2) { + t.Errorf("TxID: wrong ID - got %v, want %v", + spew.Sprint(tx2ID), spew.Sprint(wantID2)) + } + + if tx2ID.Equal((*externalapi.DomainTransactionID)(tx2Hash)) { + t.Errorf("tx2ID and tx2Hash shouldn't be the same for non-coinbase transaction with signature and/or payload") + } + + tx2.TxIn[0].SignatureScript = []byte{} + newTx2Hash := tx2.TxHash() + if *tx2ID == (externalapi.DomainTransactionID)(*newTx2Hash) { + t.Errorf("tx2ID and newTx2Hash should not be the same even for transaction with an empty signature") + } +} diff --git a/app/appmessage/p2p_msgverack.go b/app/appmessage/p2p_msgverack.go new file mode 100644 index 0000000..d41e038 --- /dev/null +++ b/app/appmessage/p2p_msgverack.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +// MsgVerAck defines a spectre verack message which is used for a peer to +// acknowledge a version message (MsgVersion) after it has used the information +// to negotiate parameters. It implements the Message interface. +// +// This message has no payload. +type MsgVerAck struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgVerAck) Command() MessageCommand { + return CmdVerAck +} + +// NewMsgVerAck returns a new spectre verack message that conforms to the +// Message interface. +func NewMsgVerAck() *MsgVerAck { + return &MsgVerAck{} +} diff --git a/app/appmessage/p2p_msgverack_test.go b/app/appmessage/p2p_msgverack_test.go new file mode 100644 index 0000000..a567876 --- /dev/null +++ b/app/appmessage/p2p_msgverack_test.go @@ -0,0 +1,20 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "testing" +) + +// TestVerAck tests the MsgVerAck API. +func TestVerAck(t *testing.T) { + // Ensure the command is expected value. + wantCmd := MessageCommand(1) + msg := NewMsgVerAck() + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgVerAck: wrong command - got %v want %v", + cmd, wantCmd) + } +} diff --git a/app/appmessage/p2p_msgversion.go b/app/appmessage/p2p_msgversion.go new file mode 100644 index 0000000..8d39af8 --- /dev/null +++ b/app/appmessage/p2p_msgversion.go @@ -0,0 +1,125 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "fmt" + "strings" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/version" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + "github.com/spectre-project/spectred/util/mstime" +) + +// MaxUserAgentLen is the maximum allowed length for the user agent field in a +// version message (MsgVersion). +const MaxUserAgentLen = 256 + +// DefaultUserAgent for appmessage in the stack +var DefaultUserAgent = fmt.Sprintf("/spectred:%s/", version.Version()) + +// MsgVersion implements the Message interface and represents a spectre version +// message. It is used for a peer to advertise itself as soon as an outbound +// connection is made. The remote peer then uses this information along with +// its own to negotiate. The remote peer must then respond with a version +// message of its own containing the negotiated values followed by a verack +// message (MsgVerAck). This exchange must take place before any further +// communication is allowed to proceed. +type MsgVersion struct { + baseMessage + // Version of the protocol the node is using. + ProtocolVersion uint32 + + // The peer's network (mainnet, testnet, etc.) + Network string + + // Bitfield which identifies the enabled services. + Services ServiceFlag + + // Time the message was generated. This is encoded as an int64 on the appmessage. + Timestamp mstime.Time + + // Address of the local peer. + Address *NetAddress + + // The peer unique ID + ID *id.ID + + // The user agent that generated messsage. This is a encoded as a varString + // on the appmessage. This has a max length of MaxUserAgentLen. + UserAgent string + + // Don't announce transactions to peer. + DisableRelayTx bool + + // The subnetwork of the generator of the version message. Should be nil in full nodes + SubnetworkID *externalapi.DomainSubnetworkID +} + +// HasService returns whether the specified service is supported by the peer +// that generated the message. +func (msg *MsgVersion) HasService(service ServiceFlag) bool { + return msg.Services&service == service +} + +// AddService adds service as a supported service by the peer generating the +// message. +func (msg *MsgVersion) AddService(service ServiceFlag) { + msg.Services |= service +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgVersion) Command() MessageCommand { + return CmdVersion +} + +// NewMsgVersion returns a new spectre version message that conforms to the +// Message interface using the passed parameters and defaults for the remaining +// fields. +func NewMsgVersion(addr *NetAddress, id *id.ID, network string, + subnetworkID *externalapi.DomainSubnetworkID, protocolVersion uint32) *MsgVersion { + + // Limit the timestamp to one millisecond precision since the protocol + // doesn't support better. + return &MsgVersion{ + ProtocolVersion: protocolVersion, + Network: network, + Services: 0, + Timestamp: mstime.Now(), + Address: addr, + ID: id, + UserAgent: DefaultUserAgent, + DisableRelayTx: false, + SubnetworkID: subnetworkID, + } +} + +// ValidateUserAgent checks userAgent length against MaxUserAgentLen +func ValidateUserAgent(userAgent string) error { + if len(userAgent) > MaxUserAgentLen { + str := fmt.Sprintf("user agent too long [len %d, max %d]", + len(userAgent), MaxUserAgentLen) + return messageError("MsgVersion", str) + } + return nil +} + +// AddUserAgent adds a user agent to the user agent string for the version +// message. The version string is not defined to any strict format, although +// it is recommended to use the form "major.minor.revision" e.g. "2.6.41". +func (msg *MsgVersion) AddUserAgent(name string, version string, + comments ...string) { + + newUserAgent := fmt.Sprintf("%s:%s", name, version) + if len(comments) != 0 { + newUserAgent = fmt.Sprintf("%s(%s)", newUserAgent, + strings.Join(comments, "; ")) + } + newUserAgent = fmt.Sprintf("%s%s/", msg.UserAgent, newUserAgent) + msg.UserAgent = newUserAgent +} diff --git a/app/appmessage/p2p_msgversion_test.go b/app/appmessage/p2p_msgversion_test.go new file mode 100644 index 0000000..49a95ad --- /dev/null +++ b/app/appmessage/p2p_msgversion_test.go @@ -0,0 +1,91 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "net" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" +) + +// TestVersion tests the MsgVersion API. +func TestVersion(t *testing.T) { + pver := uint32(4) + + // Create version message data. + tcpAddrMe := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 18111} + me := NewNetAddress(tcpAddrMe) + generatedID, err := id.GenerateID() + if err != nil { + t.Fatalf("id.GenerateID: %s", err) + } + + // Ensure we get the correct data back out. + msg := NewMsgVersion(me, generatedID, "mainnet", nil, 4) + if msg.ProtocolVersion != pver { + t.Errorf("NewMsgVersion: wrong protocol version - got %v, want %v", + msg.ProtocolVersion, pver) + } + if !reflect.DeepEqual(msg.Address, me) { + t.Errorf("NewMsgVersion: wrong me address - got %v, want %v", + spew.Sdump(&msg.Address), spew.Sdump(me)) + } + if msg.ID.String() != generatedID.String() { + t.Errorf("NewMsgVersion: wrong nonce - got %s, want %s", + msg.ID, generatedID) + } + if msg.UserAgent != DefaultUserAgent { + t.Errorf("NewMsgVersion: wrong user agent - got %v, want %v", + msg.UserAgent, DefaultUserAgent) + } + if msg.DisableRelayTx { + t.Errorf("NewMsgVersion: disable relay tx is not false by "+ + "default - got %v, want %v", msg.DisableRelayTx, false) + } + + msg.AddUserAgent("myclient", "1.2.3", "optional", "comments") + customUserAgent := DefaultUserAgent + "myclient:1.2.3(optional; comments)/" + if msg.UserAgent != customUserAgent { + t.Errorf("AddUserAgent: wrong user agent - got %s, want %s", + msg.UserAgent, customUserAgent) + } + + msg.AddUserAgent("mygui", "3.4.5") + customUserAgent += "mygui:3.4.5/" + if msg.UserAgent != customUserAgent { + t.Errorf("AddUserAgent: wrong user agent - got %s, want %s", + msg.UserAgent, customUserAgent) + } + + // Version message should not have any services set by default. + if msg.Services != 0 { + t.Errorf("NewMsgVersion: wrong default services - got %v, want %v", + msg.Services, 0) + + } + if msg.HasService(SFNodeNetwork) { + t.Errorf("HasService: SFNodeNetwork service is set") + } + + // Ensure the command is expected value. + wantCmd := MessageCommand(0) + if cmd := msg.Command(); cmd != wantCmd { + t.Errorf("NewMsgVersion: wrong command - got %v want %v", + cmd, wantCmd) + } + + // Ensure adding the full service node flag works. + msg.AddService(SFNodeNetwork) + if msg.Services != SFNodeNetwork { + t.Errorf("AddService: wrong services - got %v, want %v", + msg.Services, SFNodeNetwork) + } + if !msg.HasService(SFNodeNetwork) { + t.Errorf("HasService: SFNodeNetwork service not set") + } +} diff --git a/app/appmessage/p2p_netaddress.go b/app/appmessage/p2p_netaddress.go new file mode 100644 index 0000000..759ec46 --- /dev/null +++ b/app/appmessage/p2p_netaddress.go @@ -0,0 +1,64 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "net" + + "github.com/spectre-project/spectred/util/mstime" +) + +// NetAddress defines information about a peer on the network including the time +// it was last seen, the services it supports, its IP address, and port. +type NetAddress struct { + // Last time the address was seen. + Timestamp mstime.Time + + // IP address of the peer. + IP net.IP + + // Port the peer is using. This is encoded in big endian on the appmessage + // which differs from most everything else. + Port uint16 +} + +// TCPAddress converts the NetAddress to *net.TCPAddr +func (na *NetAddress) TCPAddress() *net.TCPAddr { + return &net.TCPAddr{ + IP: na.IP, + Port: int(na.Port), + } +} + +// NewNetAddressIPPort returns a new NetAddress using the provided IP, port, and +// supported services with defaults for the remaining fields. +func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { + return NewNetAddressTimestamp(mstime.Now(), ip, port) +} + +// NewNetAddressTimestamp returns a new NetAddress using the provided +// timestamp, IP, port, and supported services. The timestamp is rounded to +// single millisecond precision. +func NewNetAddressTimestamp( + timestamp mstime.Time, ip net.IP, port uint16) *NetAddress { + // Limit the timestamp to one millisecond precision since the protocol + // doesn't support better. + na := NetAddress{ + Timestamp: timestamp, + IP: ip, + Port: port, + } + return &na +} + +// NewNetAddress returns a new NetAddress using the provided TCP address and +// supported services with defaults for the remaining fields. +func NewNetAddress(addr *net.TCPAddr) *NetAddress { + return NewNetAddressIPPort(addr.IP, uint16(addr.Port)) +} + +func (na NetAddress) String() string { + return na.TCPAddress().String() +} diff --git a/app/appmessage/p2p_netaddress_test.go b/app/appmessage/p2p_netaddress_test.go new file mode 100644 index 0000000..3b887f6 --- /dev/null +++ b/app/appmessage/p2p_netaddress_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "net" + "testing" +) + +// TestNetAddress tests the NetAddress API. +func TestNetAddress(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + port := 18111 + + // Test NewNetAddress. + na := NewNetAddress(&net.TCPAddr{IP: ip, Port: port}) + + // Ensure we get the same ip, port, and services back out. + if !na.IP.Equal(ip) { + t.Errorf("NetNetAddress: wrong ip - got %v, want %v", na.IP, ip) + } + if na.Port != uint16(port) { + t.Errorf("NetNetAddress: wrong port - got %v, want %v", na.Port, + port) + } +} diff --git a/app/appmessage/p2p_ready.go b/app/appmessage/p2p_ready.go new file mode 100644 index 0000000..e2fd385 --- /dev/null +++ b/app/appmessage/p2p_ready.go @@ -0,0 +1,22 @@ +package appmessage + +// MsgReady implements the Message interface and represents a spectre +// Ready message. It is used to notify that the peer is ready to receive +// messages. +// +// This message has no payload. +type MsgReady struct { + baseMessage +} + +// Command returns the protocol command string for the message. This is part +// of the Message interface implementation. +func (msg *MsgReady) Command() MessageCommand { + return CmdReady +} + +// NewMsgReady returns a new spectre Ready message that conforms to the +// Message interface. +func NewMsgReady() *MsgReady { + return &MsgReady{} +} diff --git a/app/appmessage/p2p_unexpectedpruningpoint.go b/app/appmessage/p2p_unexpectedpruningpoint.go new file mode 100644 index 0000000..179b215 --- /dev/null +++ b/app/appmessage/p2p_unexpectedpruningpoint.go @@ -0,0 +1,16 @@ +package appmessage + +// MsgUnexpectedPruningPoint represents a spectre UnexpectedPruningPoint message +type MsgUnexpectedPruningPoint struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *MsgUnexpectedPruningPoint) Command() MessageCommand { + return CmdUnexpectedPruningPoint +} + +// NewMsgUnexpectedPruningPoint returns a new spectre UnexpectedPruningPoint message +func NewMsgUnexpectedPruningPoint() *MsgUnexpectedPruningPoint { + return &MsgUnexpectedPruningPoint{} +} diff --git a/app/appmessage/protocol.go b/app/appmessage/protocol.go new file mode 100644 index 0000000..39bf44b --- /dev/null +++ b/app/appmessage/protocol.go @@ -0,0 +1,129 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + // DefaultServices describes the default services that are supported by + // the server. + DefaultServices = SFNodeNetwork | SFNodeBloom | SFNodeCF +) + +// ServiceFlag identifies services supported by a spectre peer. +type ServiceFlag uint64 + +const ( + // SFNodeNetwork is a flag used to indicate a peer is a full node. + SFNodeNetwork ServiceFlag = 1 << iota + + // SFNodeGetUTXO is a flag used to indicate a peer supports the + // getutxos and utxos commands (BIP0064). + SFNodeGetUTXO + + // SFNodeBloom is a flag used to indicate a peer supports bloom + // filtering. + SFNodeBloom + + // SFNodeXthin is a flag used to indicate a peer supports xthin blocks. + SFNodeXthin + + // SFNodeBit5 is a flag used to indicate a peer supports a service + // defined by bit 5. + SFNodeBit5 + + // SFNodeCF is a flag used to indicate a peer supports committed + // filters (CFs). + SFNodeCF +) + +// Map of service flags back to their constant names for pretty printing. +var sfStrings = map[ServiceFlag]string{ + SFNodeNetwork: "SFNodeNetwork", + SFNodeGetUTXO: "SFNodeGetUTXO", + SFNodeBloom: "SFNodeBloom", + SFNodeXthin: "SFNodeXthin", + SFNodeBit5: "SFNodeBit5", + SFNodeCF: "SFNodeCF", +} + +// orderedSFStrings is an ordered list of service flags from highest to +// lowest. +var orderedSFStrings = []ServiceFlag{ + SFNodeNetwork, + SFNodeGetUTXO, + SFNodeBloom, + SFNodeXthin, + SFNodeBit5, + SFNodeCF, +} + +// String returns the ServiceFlag in human-readable form. +func (f ServiceFlag) String() string { + // No flags are set. + if f == 0 { + return "0x0" + } + + // Add individual bit flags. + s := "" + for _, flag := range orderedSFStrings { + if f&flag == flag { + s += sfStrings[flag] + "|" + f -= flag + } + } + + // Add any remaining flags which aren't accounted for as hex. + s = strings.TrimRight(s, "|") + if f != 0 { + s += "|0x" + strconv.FormatUint(uint64(f), 16) + } + s = strings.TrimLeft(s, "|") + return s +} + +// SpectreNet represents which spectre network a message belongs to. +type SpectreNet uint32 + +// Constants used to indicate the message spectre network. They can also be +// used to seek to the next message when a stream's state is unknown, but +// this package does not provide that functionality since it's generally a +// better idea to simply disconnect clients that are misbehaving over TCP. +const ( + // Mainnet represents the main spectre network. + Mainnet SpectreNet = 0x3ddcf71d + + // Testnet represents the test network. + Testnet SpectreNet = 0xddb8af8f + + // Simnet represents the simulation test network. + Simnet SpectreNet = 0x374dcf1c + + // Devnet represents the development test network. + Devnet SpectreNet = 0x732d87e1 +) + +// bnStrings is a map of spectre networks back to their constant names for +// pretty printing. +var bnStrings = map[SpectreNet]string{ + Mainnet: "Mainnet", + Testnet: "Testnet", + Simnet: "Simnet", + Devnet: "Devnet", +} + +// String returns the SpectreNet in human-readable form. +func (n SpectreNet) String() string { + if s, ok := bnStrings[n]; ok { + return s + } + + return fmt.Sprintf("Unknown SpectreNet (%d)", uint32(n)) +} diff --git a/app/appmessage/protocol_test.go b/app/appmessage/protocol_test.go new file mode 100644 index 0000000..95884f6 --- /dev/null +++ b/app/appmessage/protocol_test.go @@ -0,0 +1,57 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package appmessage + +import "testing" + +// TestServiceFlagStringer tests the stringized output for service flag types. +func TestServiceFlagStringer(t *testing.T) { + tests := []struct { + in ServiceFlag + want string + }{ + {0, "0x0"}, + {SFNodeNetwork, "SFNodeNetwork"}, + {SFNodeGetUTXO, "SFNodeGetUTXO"}, + {SFNodeBloom, "SFNodeBloom"}, + {SFNodeXthin, "SFNodeXthin"}, + {SFNodeBit5, "SFNodeBit5"}, + {SFNodeCF, "SFNodeCF"}, + {0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeXthin|SFNodeBit5|SFNodeCF|0xffffffc0"}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} + +// TestSpectreNetStringer tests the stringized output for spectre net types. +func TestSpectreNetStringer(t *testing.T) { + tests := []struct { + in SpectreNet + want string + }{ + {Mainnet, "Mainnet"}, + {Testnet, "Testnet"}, + {Simnet, "Simnet"}, + {0xffffffff, "Unknown SpectreNet (4294967295)"}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} diff --git a/app/appmessage/rpc_add_peer.go b/app/appmessage/rpc_add_peer.go new file mode 100644 index 0000000..7d6d733 --- /dev/null +++ b/app/appmessage/rpc_add_peer.go @@ -0,0 +1,39 @@ +package appmessage + +// AddPeerRequestMessage is an appmessage corresponding to +// its respective RPC message +type AddPeerRequestMessage struct { + baseMessage + Address string + IsPermanent bool +} + +// Command returns the protocol command string for the message +func (msg *AddPeerRequestMessage) Command() MessageCommand { + return CmdAddPeerRequestMessage +} + +// NewAddPeerRequestMessage returns a instance of the message +func NewAddPeerRequestMessage(address string, isPermanent bool) *AddPeerRequestMessage { + return &AddPeerRequestMessage{ + Address: address, + IsPermanent: isPermanent, + } +} + +// AddPeerResponseMessage is an appmessage corresponding to +// its respective RPC message +type AddPeerResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *AddPeerResponseMessage) Command() MessageCommand { + return CmdAddPeerResponseMessage +} + +// NewAddPeerResponseMessage returns a instance of the message +func NewAddPeerResponseMessage() *AddPeerResponseMessage { + return &AddPeerResponseMessage{} +} diff --git a/app/appmessage/rpc_ban.go b/app/appmessage/rpc_ban.go new file mode 100644 index 0000000..bf6d89a --- /dev/null +++ b/app/appmessage/rpc_ban.go @@ -0,0 +1,39 @@ +package appmessage + +// BanRequestMessage is an appmessage corresponding to +// its respective RPC message +type BanRequestMessage struct { + baseMessage + + IP string +} + +// Command returns the protocol command string for the message +func (msg *BanRequestMessage) Command() MessageCommand { + return CmdBanRequestMessage +} + +// NewBanRequestMessage returns an instance of the message +func NewBanRequestMessage(ip string) *BanRequestMessage { + return &BanRequestMessage{ + IP: ip, + } +} + +// BanResponseMessage is an appmessage corresponding to +// its respective RPC message +type BanResponseMessage struct { + baseMessage + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *BanResponseMessage) Command() MessageCommand { + return CmdBanResponseMessage +} + +// NewBanResponseMessage returns a instance of the message +func NewBanResponseMessage() *BanResponseMessage { + return &BanResponseMessage{} +} diff --git a/app/appmessage/rpc_estimate_network_hashes_per_second.go b/app/appmessage/rpc_estimate_network_hashes_per_second.go new file mode 100644 index 0000000..d298c91 --- /dev/null +++ b/app/appmessage/rpc_estimate_network_hashes_per_second.go @@ -0,0 +1,43 @@ +package appmessage + +// EstimateNetworkHashesPerSecondRequestMessage is an appmessage corresponding to +// its respective RPC message +type EstimateNetworkHashesPerSecondRequestMessage struct { + baseMessage + StartHash string + WindowSize uint32 +} + +// Command returns the protocol command string for the message +func (msg *EstimateNetworkHashesPerSecondRequestMessage) Command() MessageCommand { + return CmdEstimateNetworkHashesPerSecondRequestMessage +} + +// NewEstimateNetworkHashesPerSecondRequestMessage returns a instance of the message +func NewEstimateNetworkHashesPerSecondRequestMessage(startHash string, windowSize uint32) *EstimateNetworkHashesPerSecondRequestMessage { + return &EstimateNetworkHashesPerSecondRequestMessage{ + StartHash: startHash, + WindowSize: windowSize, + } +} + +// EstimateNetworkHashesPerSecondResponseMessage is an appmessage corresponding to +// its respective RPC message +type EstimateNetworkHashesPerSecondResponseMessage struct { + baseMessage + NetworkHashesPerSecond uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *EstimateNetworkHashesPerSecondResponseMessage) Command() MessageCommand { + return CmdEstimateNetworkHashesPerSecondResponseMessage +} + +// NewEstimateNetworkHashesPerSecondResponseMessage returns a instance of the message +func NewEstimateNetworkHashesPerSecondResponseMessage(networkHashesPerSecond uint64) *EstimateNetworkHashesPerSecondResponseMessage { + return &EstimateNetworkHashesPerSecondResponseMessage{ + NetworkHashesPerSecond: networkHashesPerSecond, + } +} diff --git a/app/appmessage/rpc_get_balance_by_address.go b/app/appmessage/rpc_get_balance_by_address.go new file mode 100644 index 0000000..3bd81b9 --- /dev/null +++ b/app/appmessage/rpc_get_balance_by_address.go @@ -0,0 +1,41 @@ +package appmessage + +// GetBalanceByAddressRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBalanceByAddressRequestMessage struct { + baseMessage + Address string +} + +// Command returns the protocol command string for the message +func (msg *GetBalanceByAddressRequestMessage) Command() MessageCommand { + return CmdGetBalanceByAddressRequestMessage +} + +// NewGetBalanceByAddressRequest returns a instance of the message +func NewGetBalanceByAddressRequest(address string) *GetBalanceByAddressRequestMessage { + return &GetBalanceByAddressRequestMessage{ + Address: address, + } +} + +// GetBalanceByAddressResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBalanceByAddressResponseMessage struct { + baseMessage + Balance uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBalanceByAddressResponseMessage) Command() MessageCommand { + return CmdGetBalanceByAddressResponseMessage +} + +// NewGetBalanceByAddressResponse returns an instance of the message +func NewGetBalanceByAddressResponse(Balance uint64) *GetBalanceByAddressResponseMessage { + return &GetBalanceByAddressResponseMessage{ + Balance: Balance, + } +} diff --git a/app/appmessage/rpc_get_balances_by_addresses.go b/app/appmessage/rpc_get_balances_by_addresses.go new file mode 100644 index 0000000..7a58b56 --- /dev/null +++ b/app/appmessage/rpc_get_balances_by_addresses.go @@ -0,0 +1,47 @@ +package appmessage + +// GetBalancesByAddressesRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBalancesByAddressesRequestMessage struct { + baseMessage + Addresses []string +} + +// Command returns the protocol command string for the message +func (msg *GetBalancesByAddressesRequestMessage) Command() MessageCommand { + return CmdGetBalancesByAddressesRequestMessage +} + +// NewGetBalancesByAddressesRequest returns a instance of the message +func NewGetBalancesByAddressesRequest(addresses []string) *GetBalancesByAddressesRequestMessage { + return &GetBalancesByAddressesRequestMessage{ + Addresses: addresses, + } +} + +// BalancesByAddressesEntry represents the balance of some address +type BalancesByAddressesEntry struct { + Address string + Balance uint64 +} + +// GetBalancesByAddressesResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBalancesByAddressesResponseMessage struct { + baseMessage + Entries []*BalancesByAddressesEntry + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBalancesByAddressesResponseMessage) Command() MessageCommand { + return CmdGetBalancesByAddressesResponseMessage +} + +// NewGetBalancesByAddressesResponse returns an instance of the message +func NewGetBalancesByAddressesResponse(entries []*BalancesByAddressesEntry) *GetBalancesByAddressesResponseMessage { + return &GetBalancesByAddressesResponseMessage{ + Entries: entries, + } +} diff --git a/app/appmessage/rpc_get_block.go b/app/appmessage/rpc_get_block.go new file mode 100644 index 0000000..688edc6 --- /dev/null +++ b/app/appmessage/rpc_get_block.go @@ -0,0 +1,41 @@ +package appmessage + +// GetBlockRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockRequestMessage struct { + baseMessage + Hash string + IncludeTransactions bool +} + +// Command returns the protocol command string for the message +func (msg *GetBlockRequestMessage) Command() MessageCommand { + return CmdGetBlockRequestMessage +} + +// NewGetBlockRequestMessage returns a instance of the message +func NewGetBlockRequestMessage(hash string, includeTransactions bool) *GetBlockRequestMessage { + return &GetBlockRequestMessage{ + Hash: hash, + IncludeTransactions: includeTransactions, + } +} + +// GetBlockResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockResponseMessage struct { + baseMessage + Block *RPCBlock + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBlockResponseMessage) Command() MessageCommand { + return CmdGetBlockResponseMessage +} + +// NewGetBlockResponseMessage returns a instance of the message +func NewGetBlockResponseMessage() *GetBlockResponseMessage { + return &GetBlockResponseMessage{} +} diff --git a/app/appmessage/rpc_get_block_count.go b/app/appmessage/rpc_get_block_count.go new file mode 100644 index 0000000..e3a73f4 --- /dev/null +++ b/app/appmessage/rpc_get_block_count.go @@ -0,0 +1,42 @@ +package appmessage + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// GetBlockCountRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockCountRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetBlockCountRequestMessage) Command() MessageCommand { + return CmdGetBlockCountRequestMessage +} + +// NewGetBlockCountRequestMessage returns a instance of the message +func NewGetBlockCountRequestMessage() *GetBlockCountRequestMessage { + return &GetBlockCountRequestMessage{} +} + +// GetBlockCountResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockCountResponseMessage struct { + baseMessage + BlockCount uint64 + HeaderCount uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBlockCountResponseMessage) Command() MessageCommand { + return CmdGetBlockCountResponseMessage +} + +// NewGetBlockCountResponseMessage returns a instance of the message +func NewGetBlockCountResponseMessage(syncInfo *externalapi.SyncInfo) *GetBlockCountResponseMessage { + return &GetBlockCountResponseMessage{ + BlockCount: syncInfo.BlockCount, + HeaderCount: syncInfo.HeaderCount, + } +} diff --git a/app/appmessage/rpc_get_block_dag_info.go b/app/appmessage/rpc_get_block_dag_info.go new file mode 100644 index 0000000..3368426 --- /dev/null +++ b/app/appmessage/rpc_get_block_dag_info.go @@ -0,0 +1,44 @@ +package appmessage + +// GetBlockDAGInfoRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockDAGInfoRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetBlockDAGInfoRequestMessage) Command() MessageCommand { + return CmdGetBlockDAGInfoRequestMessage +} + +// NewGetBlockDAGInfoRequestMessage returns a instance of the message +func NewGetBlockDAGInfoRequestMessage() *GetBlockDAGInfoRequestMessage { + return &GetBlockDAGInfoRequestMessage{} +} + +// GetBlockDAGInfoResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockDAGInfoResponseMessage struct { + baseMessage + NetworkName string + BlockCount uint64 + HeaderCount uint64 + TipHashes []string + VirtualParentHashes []string + Difficulty float64 + PastMedianTime int64 + PruningPointHash string + VirtualDAAScore uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBlockDAGInfoResponseMessage) Command() MessageCommand { + return CmdGetBlockDAGInfoResponseMessage +} + +// NewGetBlockDAGInfoResponseMessage returns a instance of the message +func NewGetBlockDAGInfoResponseMessage() *GetBlockDAGInfoResponseMessage { + return &GetBlockDAGInfoResponseMessage{} +} diff --git a/app/appmessage/rpc_get_block_template.go b/app/appmessage/rpc_get_block_template.go new file mode 100644 index 0000000..2f784e2 --- /dev/null +++ b/app/appmessage/rpc_get_block_template.go @@ -0,0 +1,45 @@ +package appmessage + +// GetBlockTemplateRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockTemplateRequestMessage struct { + baseMessage + PayAddress string + ExtraData string +} + +// Command returns the protocol command string for the message +func (msg *GetBlockTemplateRequestMessage) Command() MessageCommand { + return CmdGetBlockTemplateRequestMessage +} + +// NewGetBlockTemplateRequestMessage returns a instance of the message +func NewGetBlockTemplateRequestMessage(payAddress, extraData string) *GetBlockTemplateRequestMessage { + return &GetBlockTemplateRequestMessage{ + PayAddress: payAddress, + ExtraData: extraData, + } +} + +// GetBlockTemplateResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBlockTemplateResponseMessage struct { + baseMessage + Block *RPCBlock + IsSynced bool + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBlockTemplateResponseMessage) Command() MessageCommand { + return CmdGetBlockTemplateResponseMessage +} + +// NewGetBlockTemplateResponseMessage returns a instance of the message +func NewGetBlockTemplateResponseMessage(block *RPCBlock, isSynced bool) *GetBlockTemplateResponseMessage { + return &GetBlockTemplateResponseMessage{ + Block: block, + IsSynced: isSynced, + } +} diff --git a/app/appmessage/rpc_get_blocks.go b/app/appmessage/rpc_get_blocks.go new file mode 100644 index 0000000..4610dfe --- /dev/null +++ b/app/appmessage/rpc_get_blocks.go @@ -0,0 +1,45 @@ +package appmessage + +// GetBlocksRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetBlocksRequestMessage struct { + baseMessage + LowHash string + IncludeBlocks bool + IncludeTransactions bool +} + +// Command returns the protocol command string for the message +func (msg *GetBlocksRequestMessage) Command() MessageCommand { + return CmdGetBlocksRequestMessage +} + +// NewGetBlocksRequestMessage returns a instance of the message +func NewGetBlocksRequestMessage(lowHash string, includeBlocks bool, + includeTransactions bool) *GetBlocksRequestMessage { + return &GetBlocksRequestMessage{ + LowHash: lowHash, + IncludeBlocks: includeBlocks, + IncludeTransactions: includeTransactions, + } +} + +// GetBlocksResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetBlocksResponseMessage struct { + baseMessage + BlockHashes []string + Blocks []*RPCBlock + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetBlocksResponseMessage) Command() MessageCommand { + return CmdGetBlocksResponseMessage +} + +// NewGetBlocksResponseMessage returns a instance of the message +func NewGetBlocksResponseMessage() *GetBlocksResponseMessage { + return &GetBlocksResponseMessage{} +} diff --git a/app/appmessage/rpc_get_coin_supply.go b/app/appmessage/rpc_get_coin_supply.go new file mode 100644 index 0000000..848977d --- /dev/null +++ b/app/appmessage/rpc_get_coin_supply.go @@ -0,0 +1,40 @@ +package appmessage + +// GetCoinSupplyRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetCoinSupplyRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetCoinSupplyRequestMessage) Command() MessageCommand { + return CmdGetCoinSupplyRequestMessage +} + +// NewGetCoinSupplyRequestMessage returns a instance of the message +func NewGetCoinSupplyRequestMessage() *GetCoinSupplyRequestMessage { + return &GetCoinSupplyRequestMessage{} +} + +// GetCoinSupplyResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetCoinSupplyResponseMessage struct { + baseMessage + MaxSompi uint64 + CirculatingSompi uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetCoinSupplyResponseMessage) Command() MessageCommand { + return CmdGetCoinSupplyResponseMessage +} + +// NewGetCoinSupplyResponseMessage returns a instance of the message +func NewGetCoinSupplyResponseMessage(maxSompi uint64, circulatingSompi uint64) *GetCoinSupplyResponseMessage { + return &GetCoinSupplyResponseMessage{ + MaxSompi: maxSompi, + CirculatingSompi: circulatingSompi, + } +} diff --git a/app/appmessage/rpc_get_connected_peer_info.go b/app/appmessage/rpc_get_connected_peer_info.go new file mode 100644 index 0000000..84b55d6 --- /dev/null +++ b/app/appmessage/rpc_get_connected_peer_info.go @@ -0,0 +1,50 @@ +package appmessage + +// GetConnectedPeerInfoRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetConnectedPeerInfoRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetConnectedPeerInfoRequestMessage) Command() MessageCommand { + return CmdGetConnectedPeerInfoRequestMessage +} + +// NewGetConnectedPeerInfoRequestMessage returns a instance of the message +func NewGetConnectedPeerInfoRequestMessage() *GetConnectedPeerInfoRequestMessage { + return &GetConnectedPeerInfoRequestMessage{} +} + +// GetConnectedPeerInfoResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetConnectedPeerInfoResponseMessage struct { + baseMessage + Infos []*GetConnectedPeerInfoMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetConnectedPeerInfoResponseMessage) Command() MessageCommand { + return CmdGetConnectedPeerInfoResponseMessage +} + +// NewGetConnectedPeerInfoResponseMessage returns a instance of the message +func NewGetConnectedPeerInfoResponseMessage(infos []*GetConnectedPeerInfoMessage) *GetConnectedPeerInfoResponseMessage { + return &GetConnectedPeerInfoResponseMessage{ + Infos: infos, + } +} + +// GetConnectedPeerInfoMessage holds information about a connected peer +type GetConnectedPeerInfoMessage struct { + ID string + Address string + LastPingDuration int64 + IsOutbound bool + TimeOffset int64 + UserAgent string + AdvertisedProtocolVersion uint32 + TimeConnected int64 + IsIBDPeer bool +} diff --git a/app/appmessage/rpc_get_current_network.go b/app/appmessage/rpc_get_current_network.go new file mode 100644 index 0000000..f787a4b --- /dev/null +++ b/app/appmessage/rpc_get_current_network.go @@ -0,0 +1,38 @@ +package appmessage + +// GetCurrentNetworkRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetCurrentNetworkRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetCurrentNetworkRequestMessage) Command() MessageCommand { + return CmdGetCurrentNetworkRequestMessage +} + +// NewGetCurrentNetworkRequestMessage returns a instance of the message +func NewGetCurrentNetworkRequestMessage() *GetCurrentNetworkRequestMessage { + return &GetCurrentNetworkRequestMessage{} +} + +// GetCurrentNetworkResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetCurrentNetworkResponseMessage struct { + baseMessage + CurrentNetwork string + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetCurrentNetworkResponseMessage) Command() MessageCommand { + return CmdGetCurrentNetworkResponseMessage +} + +// NewGetCurrentNetworkResponseMessage returns a instance of the message +func NewGetCurrentNetworkResponseMessage(currentNetwork string) *GetCurrentNetworkResponseMessage { + return &GetCurrentNetworkResponseMessage{ + CurrentNetwork: currentNetwork, + } +} diff --git a/app/appmessage/rpc_get_headers.go b/app/appmessage/rpc_get_headers.go new file mode 100644 index 0000000..ff75f73 --- /dev/null +++ b/app/appmessage/rpc_get_headers.go @@ -0,0 +1,45 @@ +package appmessage + +// GetHeadersRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetHeadersRequestMessage struct { + baseMessage + StartHash string + Limit uint64 + IsAscending bool +} + +// Command returns the protocol command string for the message +func (msg *GetHeadersRequestMessage) Command() MessageCommand { + return CmdGetHeadersRequestMessage +} + +// NewGetHeadersRequestMessage returns a instance of the message +func NewGetHeadersRequestMessage(startHash string, limit uint64, isAscending bool) *GetHeadersRequestMessage { + return &GetHeadersRequestMessage{ + StartHash: startHash, + Limit: limit, + IsAscending: isAscending, + } +} + +// GetHeadersResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetHeadersResponseMessage struct { + baseMessage + Headers []string + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetHeadersResponseMessage) Command() MessageCommand { + return CmdGetHeadersResponseMessage +} + +// NewGetHeadersResponseMessage returns a instance of the message +func NewGetHeadersResponseMessage(headers []string) *GetHeadersResponseMessage { + return &GetHeadersResponseMessage{ + Headers: headers, + } +} diff --git a/app/appmessage/rpc_get_info.go b/app/appmessage/rpc_get_info.go new file mode 100644 index 0000000..00c9882 --- /dev/null +++ b/app/appmessage/rpc_get_info.go @@ -0,0 +1,46 @@ +package appmessage + +// GetInfoRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetInfoRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetInfoRequestMessage) Command() MessageCommand { + return CmdGetInfoRequestMessage +} + +// NewGetInfoRequestMessage returns a instance of the message +func NewGetInfoRequestMessage() *GetInfoRequestMessage { + return &GetInfoRequestMessage{} +} + +// GetInfoResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetInfoResponseMessage struct { + baseMessage + P2PID string + MempoolSize uint64 + ServerVersion string + IsUtxoIndexed bool + IsSynced bool + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetInfoResponseMessage) Command() MessageCommand { + return CmdGetInfoResponseMessage +} + +// NewGetInfoResponseMessage returns a instance of the message +func NewGetInfoResponseMessage(p2pID string, mempoolSize uint64, serverVersion string, isUtxoIndexed bool, isSynced bool) *GetInfoResponseMessage { + return &GetInfoResponseMessage{ + P2PID: p2pID, + MempoolSize: mempoolSize, + ServerVersion: serverVersion, + IsUtxoIndexed: isUtxoIndexed, + IsSynced: isSynced, + } +} diff --git a/app/appmessage/rpc_get_mempool_entries.go b/app/appmessage/rpc_get_mempool_entries.go new file mode 100644 index 0000000..3b1f5b7 --- /dev/null +++ b/app/appmessage/rpc_get_mempool_entries.go @@ -0,0 +1,43 @@ +package appmessage + +// GetMempoolEntriesRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntriesRequestMessage struct { + baseMessage + IncludeOrphanPool bool + FilterTransactionPool bool +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntriesRequestMessage) Command() MessageCommand { + return CmdGetMempoolEntriesRequestMessage +} + +// NewGetMempoolEntriesRequestMessage returns a instance of the message +func NewGetMempoolEntriesRequestMessage(includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesRequestMessage { + return &GetMempoolEntriesRequestMessage{ + IncludeOrphanPool: includeOrphanPool, + FilterTransactionPool: filterTransactionPool, + } +} + +// GetMempoolEntriesResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntriesResponseMessage struct { + baseMessage + Entries []*MempoolEntry + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntriesResponseMessage) Command() MessageCommand { + return CmdGetMempoolEntriesResponseMessage +} + +// NewGetMempoolEntriesResponseMessage returns a instance of the message +func NewGetMempoolEntriesResponseMessage(entries []*MempoolEntry) *GetMempoolEntriesResponseMessage { + return &GetMempoolEntriesResponseMessage{ + Entries: entries, + } +} diff --git a/app/appmessage/rpc_get_mempool_entries_by_addresses.go b/app/appmessage/rpc_get_mempool_entries_by_addresses.go new file mode 100644 index 0000000..234dcd2 --- /dev/null +++ b/app/appmessage/rpc_get_mempool_entries_by_addresses.go @@ -0,0 +1,52 @@ +package appmessage + +// MempoolEntryByAddress represents MempoolEntries associated with some address +type MempoolEntryByAddress struct { + Address string + Receiving []*MempoolEntry + Sending []*MempoolEntry +} + +// GetMempoolEntriesByAddressesRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntriesByAddressesRequestMessage struct { + baseMessage + Addresses []string + IncludeOrphanPool bool + FilterTransactionPool bool +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntriesByAddressesRequestMessage) Command() MessageCommand { + return CmdGetMempoolEntriesByAddressesRequestMessage +} + +// NewGetMempoolEntriesByAddressesRequestMessage returns a instance of the message +func NewGetMempoolEntriesByAddressesRequestMessage(addresses []string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntriesByAddressesRequestMessage { + return &GetMempoolEntriesByAddressesRequestMessage{ + Addresses: addresses, + IncludeOrphanPool: includeOrphanPool, + FilterTransactionPool: filterTransactionPool, + } +} + +// GetMempoolEntriesByAddressesResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntriesByAddressesResponseMessage struct { + baseMessage + Entries []*MempoolEntryByAddress + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntriesByAddressesResponseMessage) Command() MessageCommand { + return CmdGetMempoolEntriesByAddressesResponseMessage +} + +// NewGetMempoolEntriesByAddressesResponseMessage returns a instance of the message +func NewGetMempoolEntriesByAddressesResponseMessage(entries []*MempoolEntryByAddress) *GetMempoolEntriesByAddressesResponseMessage { + return &GetMempoolEntriesByAddressesResponseMessage{ + Entries: entries, + } +} diff --git a/app/appmessage/rpc_get_mempool_entry.go b/app/appmessage/rpc_get_mempool_entry.go new file mode 100644 index 0000000..5ac37d6 --- /dev/null +++ b/app/appmessage/rpc_get_mempool_entry.go @@ -0,0 +1,56 @@ +package appmessage + +// GetMempoolEntryRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntryRequestMessage struct { + baseMessage + TxID string + IncludeOrphanPool bool + FilterTransactionPool bool +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntryRequestMessage) Command() MessageCommand { + return CmdGetMempoolEntryRequestMessage +} + +// NewGetMempoolEntryRequestMessage returns a instance of the message +func NewGetMempoolEntryRequestMessage(txID string, includeOrphanPool bool, filterTransactionPool bool) *GetMempoolEntryRequestMessage { + return &GetMempoolEntryRequestMessage{ + TxID: txID, + IncludeOrphanPool: includeOrphanPool, + FilterTransactionPool: filterTransactionPool, + } +} + +// GetMempoolEntryResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetMempoolEntryResponseMessage struct { + baseMessage + Entry *MempoolEntry + + Error *RPCError +} + +// MempoolEntry represents a transaction in the mempool. +type MempoolEntry struct { + Fee uint64 + Transaction *RPCTransaction + IsOrphan bool +} + +// Command returns the protocol command string for the message +func (msg *GetMempoolEntryResponseMessage) Command() MessageCommand { + return CmdGetMempoolEntryResponseMessage +} + +// NewGetMempoolEntryResponseMessage returns a instance of the message +func NewGetMempoolEntryResponseMessage(fee uint64, transaction *RPCTransaction, isOrphan bool) *GetMempoolEntryResponseMessage { + return &GetMempoolEntryResponseMessage{ + Entry: &MempoolEntry{ + Fee: fee, + Transaction: transaction, + IsOrphan: isOrphan, + }, + } +} diff --git a/app/appmessage/rpc_get_peer_addresses.go b/app/appmessage/rpc_get_peer_addresses.go new file mode 100644 index 0000000..a026e53 --- /dev/null +++ b/app/appmessage/rpc_get_peer_addresses.go @@ -0,0 +1,46 @@ +package appmessage + +// GetPeerAddressesRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetPeerAddressesRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetPeerAddressesRequestMessage) Command() MessageCommand { + return CmdGetPeerAddressesRequestMessage +} + +// NewGetPeerAddressesRequestMessage returns a instance of the message +func NewGetPeerAddressesRequestMessage() *GetPeerAddressesRequestMessage { + return &GetPeerAddressesRequestMessage{} +} + +// GetPeerAddressesResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetPeerAddressesResponseMessage struct { + baseMessage + Addresses []*GetPeerAddressesKnownAddressMessage + BannedAddresses []*GetPeerAddressesKnownAddressMessage + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetPeerAddressesResponseMessage) Command() MessageCommand { + return CmdGetPeerAddressesResponseMessage +} + +// NewGetPeerAddressesResponseMessage returns a instance of the message +func NewGetPeerAddressesResponseMessage(addresses []*GetPeerAddressesKnownAddressMessage, bannedAddresses []*GetPeerAddressesKnownAddressMessage) *GetPeerAddressesResponseMessage { + return &GetPeerAddressesResponseMessage{ + Addresses: addresses, + BannedAddresses: bannedAddresses, + } +} + +// GetPeerAddressesKnownAddressMessage is an appmessage corresponding to +// its respective RPC message +type GetPeerAddressesKnownAddressMessage struct { + Addr string +} diff --git a/app/appmessage/rpc_get_selected_tip_hash.go b/app/appmessage/rpc_get_selected_tip_hash.go new file mode 100644 index 0000000..fbb18c5 --- /dev/null +++ b/app/appmessage/rpc_get_selected_tip_hash.go @@ -0,0 +1,38 @@ +package appmessage + +// GetSelectedTipHashRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetSelectedTipHashRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetSelectedTipHashRequestMessage) Command() MessageCommand { + return CmdGetSelectedTipHashRequestMessage +} + +// NewGetSelectedTipHashRequestMessage returns a instance of the message +func NewGetSelectedTipHashRequestMessage() *GetSelectedTipHashRequestMessage { + return &GetSelectedTipHashRequestMessage{} +} + +// GetSelectedTipHashResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetSelectedTipHashResponseMessage struct { + baseMessage + SelectedTipHash string + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetSelectedTipHashResponseMessage) Command() MessageCommand { + return CmdGetSelectedTipHashResponseMessage +} + +// NewGetSelectedTipHashResponseMessage returns a instance of the message +func NewGetSelectedTipHashResponseMessage(selectedTipHash string) *GetSelectedTipHashResponseMessage { + return &GetSelectedTipHashResponseMessage{ + SelectedTipHash: selectedTipHash, + } +} diff --git a/app/appmessage/rpc_get_subnetwork.go b/app/appmessage/rpc_get_subnetwork.go new file mode 100644 index 0000000..bb21bdb --- /dev/null +++ b/app/appmessage/rpc_get_subnetwork.go @@ -0,0 +1,41 @@ +package appmessage + +// GetSubnetworkRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetSubnetworkRequestMessage struct { + baseMessage + SubnetworkID string +} + +// Command returns the protocol command string for the message +func (msg *GetSubnetworkRequestMessage) Command() MessageCommand { + return CmdGetSubnetworkRequestMessage +} + +// NewGetSubnetworkRequestMessage returns a instance of the message +func NewGetSubnetworkRequestMessage(subnetworkID string) *GetSubnetworkRequestMessage { + return &GetSubnetworkRequestMessage{ + SubnetworkID: subnetworkID, + } +} + +// GetSubnetworkResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetSubnetworkResponseMessage struct { + baseMessage + GasLimit uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetSubnetworkResponseMessage) Command() MessageCommand { + return CmdGetSubnetworkResponseMessage +} + +// NewGetSubnetworkResponseMessage returns a instance of the message +func NewGetSubnetworkResponseMessage(gasLimit uint64) *GetSubnetworkResponseMessage { + return &GetSubnetworkResponseMessage{ + GasLimit: gasLimit, + } +} diff --git a/app/appmessage/rpc_get_utxos_by_addresses.go b/app/appmessage/rpc_get_utxos_by_addresses.go new file mode 100644 index 0000000..0fff8b8 --- /dev/null +++ b/app/appmessage/rpc_get_utxos_by_addresses.go @@ -0,0 +1,41 @@ +package appmessage + +// GetUTXOsByAddressesRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetUTXOsByAddressesRequestMessage struct { + baseMessage + Addresses []string +} + +// Command returns the protocol command string for the message +func (msg *GetUTXOsByAddressesRequestMessage) Command() MessageCommand { + return CmdGetUTXOsByAddressesRequestMessage +} + +// NewGetUTXOsByAddressesRequestMessage returns a instance of the message +func NewGetUTXOsByAddressesRequestMessage(addresses []string) *GetUTXOsByAddressesRequestMessage { + return &GetUTXOsByAddressesRequestMessage{ + Addresses: addresses, + } +} + +// GetUTXOsByAddressesResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetUTXOsByAddressesResponseMessage struct { + baseMessage + Entries []*UTXOsByAddressesEntry + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetUTXOsByAddressesResponseMessage) Command() MessageCommand { + return CmdGetUTXOsByAddressesResponseMessage +} + +// NewGetUTXOsByAddressesResponseMessage returns a instance of the message +func NewGetUTXOsByAddressesResponseMessage(entries []*UTXOsByAddressesEntry) *GetUTXOsByAddressesResponseMessage { + return &GetUTXOsByAddressesResponseMessage{ + Entries: entries, + } +} diff --git a/app/appmessage/rpc_get_virtual_selected_parent_blue_score.go b/app/appmessage/rpc_get_virtual_selected_parent_blue_score.go new file mode 100644 index 0000000..67fb6b4 --- /dev/null +++ b/app/appmessage/rpc_get_virtual_selected_parent_blue_score.go @@ -0,0 +1,38 @@ +package appmessage + +// GetVirtualSelectedParentBlueScoreRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetVirtualSelectedParentBlueScoreRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *GetVirtualSelectedParentBlueScoreRequestMessage) Command() MessageCommand { + return CmdGetVirtualSelectedParentBlueScoreRequestMessage +} + +// NewGetVirtualSelectedParentBlueScoreRequestMessage returns a instance of the message +func NewGetVirtualSelectedParentBlueScoreRequestMessage() *GetVirtualSelectedParentBlueScoreRequestMessage { + return &GetVirtualSelectedParentBlueScoreRequestMessage{} +} + +// GetVirtualSelectedParentBlueScoreResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetVirtualSelectedParentBlueScoreResponseMessage struct { + baseMessage + BlueScore uint64 + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetVirtualSelectedParentBlueScoreResponseMessage) Command() MessageCommand { + return CmdGetVirtualSelectedParentBlueScoreResponseMessage +} + +// NewGetVirtualSelectedParentBlueScoreResponseMessage returns a instance of the message +func NewGetVirtualSelectedParentBlueScoreResponseMessage(blueScore uint64) *GetVirtualSelectedParentBlueScoreResponseMessage { + return &GetVirtualSelectedParentBlueScoreResponseMessage{ + BlueScore: blueScore, + } +} diff --git a/app/appmessage/rpc_get_virtual_selected_parent_chain_from_block.go b/app/appmessage/rpc_get_virtual_selected_parent_chain_from_block.go new file mode 100644 index 0000000..2d04061 --- /dev/null +++ b/app/appmessage/rpc_get_virtual_selected_parent_chain_from_block.go @@ -0,0 +1,58 @@ +package appmessage + +// GetVirtualSelectedParentChainFromBlockRequestMessage is an appmessage corresponding to +// its respective RPC message +type GetVirtualSelectedParentChainFromBlockRequestMessage struct { + baseMessage + StartHash string + IncludeAcceptedTransactionIDs bool +} + +// Command returns the protocol command string for the message +func (msg *GetVirtualSelectedParentChainFromBlockRequestMessage) Command() MessageCommand { + return CmdGetVirtualSelectedParentChainFromBlockRequestMessage +} + +// NewGetVirtualSelectedParentChainFromBlockRequestMessage returns a instance of the message +func NewGetVirtualSelectedParentChainFromBlockRequestMessage( + startHash string, includeAcceptedTransactionIDs bool) *GetVirtualSelectedParentChainFromBlockRequestMessage { + + return &GetVirtualSelectedParentChainFromBlockRequestMessage{ + StartHash: startHash, + IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs, + } +} + +// AcceptedTransactionIDs is a part of the GetVirtualSelectedParentChainFromBlockResponseMessage and +// VirtualSelectedParentChainChangedNotificationMessage appmessages +type AcceptedTransactionIDs struct { + AcceptingBlockHash string + AcceptedTransactionIDs []string +} + +// GetVirtualSelectedParentChainFromBlockResponseMessage is an appmessage corresponding to +// its respective RPC message +type GetVirtualSelectedParentChainFromBlockResponseMessage struct { + baseMessage + RemovedChainBlockHashes []string + AddedChainBlockHashes []string + AcceptedTransactionIDs []*AcceptedTransactionIDs + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *GetVirtualSelectedParentChainFromBlockResponseMessage) Command() MessageCommand { + return CmdGetVirtualSelectedParentChainFromBlockResponseMessage +} + +// NewGetVirtualSelectedParentChainFromBlockResponseMessage returns a instance of the message +func NewGetVirtualSelectedParentChainFromBlockResponseMessage(removedChainBlockHashes, + addedChainBlockHashes []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *GetVirtualSelectedParentChainFromBlockResponseMessage { + + return &GetVirtualSelectedParentChainFromBlockResponseMessage{ + RemovedChainBlockHashes: removedChainBlockHashes, + AddedChainBlockHashes: addedChainBlockHashes, + AcceptedTransactionIDs: acceptedTransactionIDs, + } +} diff --git a/app/appmessage/rpc_notify_block_added.go b/app/appmessage/rpc_notify_block_added.go new file mode 100644 index 0000000..5ec626b --- /dev/null +++ b/app/appmessage/rpc_notify_block_added.go @@ -0,0 +1,53 @@ +package appmessage + +// NotifyBlockAddedRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyBlockAddedRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyBlockAddedRequestMessage) Command() MessageCommand { + return CmdNotifyBlockAddedRequestMessage +} + +// NewNotifyBlockAddedRequestMessage returns a instance of the message +func NewNotifyBlockAddedRequestMessage() *NotifyBlockAddedRequestMessage { + return &NotifyBlockAddedRequestMessage{} +} + +// NotifyBlockAddedResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyBlockAddedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyBlockAddedResponseMessage) Command() MessageCommand { + return CmdNotifyBlockAddedResponseMessage +} + +// NewNotifyBlockAddedResponseMessage returns a instance of the message +func NewNotifyBlockAddedResponseMessage() *NotifyBlockAddedResponseMessage { + return &NotifyBlockAddedResponseMessage{} +} + +// BlockAddedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type BlockAddedNotificationMessage struct { + baseMessage + Block *RPCBlock +} + +// Command returns the protocol command string for the message +func (msg *BlockAddedNotificationMessage) Command() MessageCommand { + return CmdBlockAddedNotificationMessage +} + +// NewBlockAddedNotificationMessage returns a instance of the message +func NewBlockAddedNotificationMessage(block *RPCBlock) *BlockAddedNotificationMessage { + return &BlockAddedNotificationMessage{ + Block: block, + } +} diff --git a/app/appmessage/rpc_notify_finality_conflicts.go b/app/appmessage/rpc_notify_finality_conflicts.go new file mode 100644 index 0000000..2ac321a --- /dev/null +++ b/app/appmessage/rpc_notify_finality_conflicts.go @@ -0,0 +1,72 @@ +package appmessage + +// NotifyFinalityConflictsRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyFinalityConflictsRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyFinalityConflictsRequestMessage) Command() MessageCommand { + return CmdNotifyFinalityConflictsRequestMessage +} + +// NewNotifyFinalityConflictsRequestMessage returns a instance of the message +func NewNotifyFinalityConflictsRequestMessage() *NotifyFinalityConflictsRequestMessage { + return &NotifyFinalityConflictsRequestMessage{} +} + +// NotifyFinalityConflictsResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyFinalityConflictsResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyFinalityConflictsResponseMessage) Command() MessageCommand { + return CmdNotifyFinalityConflictsResponseMessage +} + +// NewNotifyFinalityConflictsResponseMessage returns a instance of the message +func NewNotifyFinalityConflictsResponseMessage() *NotifyFinalityConflictsResponseMessage { + return &NotifyFinalityConflictsResponseMessage{} +} + +// FinalityConflictNotificationMessage is an appmessage corresponding to +// its respective RPC message +type FinalityConflictNotificationMessage struct { + baseMessage + ViolatingBlockHash string +} + +// Command returns the protocol command string for the message +func (msg *FinalityConflictNotificationMessage) Command() MessageCommand { + return CmdFinalityConflictNotificationMessage +} + +// NewFinalityConflictNotificationMessage returns a instance of the message +func NewFinalityConflictNotificationMessage(violatingBlockHash string) *FinalityConflictNotificationMessage { + return &FinalityConflictNotificationMessage{ + ViolatingBlockHash: violatingBlockHash, + } +} + +// FinalityConflictResolvedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type FinalityConflictResolvedNotificationMessage struct { + baseMessage + FinalityBlockHash string +} + +// Command returns the protocol command string for the message +func (msg *FinalityConflictResolvedNotificationMessage) Command() MessageCommand { + return CmdFinalityConflictResolvedNotificationMessage +} + +// NewFinalityConflictResolvedNotificationMessage returns a instance of the message +func NewFinalityConflictResolvedNotificationMessage(finalityBlockHash string) *FinalityConflictResolvedNotificationMessage { + return &FinalityConflictResolvedNotificationMessage{ + FinalityBlockHash: finalityBlockHash, + } +} diff --git a/app/appmessage/rpc_notify_new_block_template.go b/app/appmessage/rpc_notify_new_block_template.go new file mode 100644 index 0000000..185fef5 --- /dev/null +++ b/app/appmessage/rpc_notify_new_block_template.go @@ -0,0 +1,50 @@ +package appmessage + +// NotifyNewBlockTemplateRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyNewBlockTemplateRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyNewBlockTemplateRequestMessage) Command() MessageCommand { + return CmdNotifyNewBlockTemplateRequestMessage +} + +// NewNotifyNewBlockTemplateRequestMessage returns an instance of the message +func NewNotifyNewBlockTemplateRequestMessage() *NotifyNewBlockTemplateRequestMessage { + return &NotifyNewBlockTemplateRequestMessage{} +} + +// NotifyNewBlockTemplateResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyNewBlockTemplateResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyNewBlockTemplateResponseMessage) Command() MessageCommand { + return CmdNotifyNewBlockTemplateResponseMessage +} + +// NewNotifyNewBlockTemplateResponseMessage returns an instance of the message +func NewNotifyNewBlockTemplateResponseMessage() *NotifyNewBlockTemplateResponseMessage { + return &NotifyNewBlockTemplateResponseMessage{} +} + +// NewBlockTemplateNotificationMessage is an appmessage corresponding to +// its respective RPC message +type NewBlockTemplateNotificationMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NewBlockTemplateNotificationMessage) Command() MessageCommand { + return CmdNewBlockTemplateNotificationMessage +} + +// NewNewBlockTemplateNotificationMessage returns an instance of the message +func NewNewBlockTemplateNotificationMessage() *NewBlockTemplateNotificationMessage { + return &NewBlockTemplateNotificationMessage{} +} diff --git a/app/appmessage/rpc_notify_pruning_point_utxo_set_override.go b/app/appmessage/rpc_notify_pruning_point_utxo_set_override.go new file mode 100644 index 0000000..b16d48a --- /dev/null +++ b/app/appmessage/rpc_notify_pruning_point_utxo_set_override.go @@ -0,0 +1,83 @@ +package appmessage + +// NotifyPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyPruningPointUTXOSetOverrideRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand { + return CmdNotifyPruningPointUTXOSetOverrideRequestMessage +} + +// NewNotifyPruningPointUTXOSetOverrideRequestMessage returns a instance of the message +func NewNotifyPruningPointUTXOSetOverrideRequestMessage() *NotifyPruningPointUTXOSetOverrideRequestMessage { + return &NotifyPruningPointUTXOSetOverrideRequestMessage{} +} + +// NotifyPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyPruningPointUTXOSetOverrideResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand { + return CmdNotifyPruningPointUTXOSetOverrideResponseMessage +} + +// NewNotifyPruningPointUTXOSetOverrideResponseMessage returns a instance of the message +func NewNotifyPruningPointUTXOSetOverrideResponseMessage() *NotifyPruningPointUTXOSetOverrideResponseMessage { + return &NotifyPruningPointUTXOSetOverrideResponseMessage{} +} + +// PruningPointUTXOSetOverrideNotificationMessage is an appmessage corresponding to +// its respective RPC message +type PruningPointUTXOSetOverrideNotificationMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *PruningPointUTXOSetOverrideNotificationMessage) Command() MessageCommand { + return CmdPruningPointUTXOSetOverrideNotificationMessage +} + +// NewPruningPointUTXOSetOverrideNotificationMessage returns a instance of the message +func NewPruningPointUTXOSetOverrideNotificationMessage() *PruningPointUTXOSetOverrideNotificationMessage { + return &PruningPointUTXOSetOverrideNotificationMessage{} +} + +// StopNotifyingPruningPointUTXOSetOverrideRequestMessage is an appmessage corresponding to +// its respective RPC message +type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Command() MessageCommand { + return CmdNotifyPruningPointUTXOSetOverrideRequestMessage +} + +// NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage returns a instance of the message +func NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage { + return &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{} +} + +// StopNotifyingPruningPointUTXOSetOverrideResponseMessage is an appmessage corresponding to +// its respective RPC message +type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Command() MessageCommand { + return CmdNotifyPruningPointUTXOSetOverrideResponseMessage +} + +// NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage returns a instance of the message +func NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage { + return &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{} +} diff --git a/app/appmessage/rpc_notify_utxos_changed.go b/app/appmessage/rpc_notify_utxos_changed.go new file mode 100644 index 0000000..e8f0e6d --- /dev/null +++ b/app/appmessage/rpc_notify_utxos_changed.go @@ -0,0 +1,62 @@ +package appmessage + +// NotifyUTXOsChangedRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyUTXOsChangedRequestMessage struct { + baseMessage + Addresses []string +} + +// Command returns the protocol command string for the message +func (msg *NotifyUTXOsChangedRequestMessage) Command() MessageCommand { + return CmdNotifyUTXOsChangedRequestMessage +} + +// NewNotifyUTXOsChangedRequestMessage returns a instance of the message +func NewNotifyUTXOsChangedRequestMessage(addresses []string) *NotifyUTXOsChangedRequestMessage { + return &NotifyUTXOsChangedRequestMessage{ + Addresses: addresses, + } +} + +// NotifyUTXOsChangedResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyUTXOsChangedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyUTXOsChangedResponseMessage) Command() MessageCommand { + return CmdNotifyUTXOsChangedResponseMessage +} + +// NewNotifyUTXOsChangedResponseMessage returns a instance of the message +func NewNotifyUTXOsChangedResponseMessage() *NotifyUTXOsChangedResponseMessage { + return &NotifyUTXOsChangedResponseMessage{} +} + +// UTXOsChangedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type UTXOsChangedNotificationMessage struct { + baseMessage + Added []*UTXOsByAddressesEntry + Removed []*UTXOsByAddressesEntry +} + +// UTXOsByAddressesEntry represents a UTXO of some address +type UTXOsByAddressesEntry struct { + Address string + Outpoint *RPCOutpoint + UTXOEntry *RPCUTXOEntry +} + +// Command returns the protocol command string for the message +func (msg *UTXOsChangedNotificationMessage) Command() MessageCommand { + return CmdUTXOsChangedNotificationMessage +} + +// NewUTXOsChangedNotificationMessage returns a instance of the message +func NewUTXOsChangedNotificationMessage() *UTXOsChangedNotificationMessage { + return &UTXOsChangedNotificationMessage{} +} diff --git a/app/appmessage/rpc_notify_virtual_daa_score_changed.go b/app/appmessage/rpc_notify_virtual_daa_score_changed.go new file mode 100644 index 0000000..5831552 --- /dev/null +++ b/app/appmessage/rpc_notify_virtual_daa_score_changed.go @@ -0,0 +1,55 @@ +package appmessage + +// NotifyVirtualDaaScoreChangedRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualDaaScoreChangedRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualDaaScoreChangedRequestMessage) Command() MessageCommand { + return CmdNotifyVirtualDaaScoreChangedRequestMessage +} + +// NewNotifyVirtualDaaScoreChangedRequestMessage returns a instance of the message +func NewNotifyVirtualDaaScoreChangedRequestMessage() *NotifyVirtualDaaScoreChangedRequestMessage { + return &NotifyVirtualDaaScoreChangedRequestMessage{} +} + +// NotifyVirtualDaaScoreChangedResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualDaaScoreChangedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualDaaScoreChangedResponseMessage) Command() MessageCommand { + return CmdNotifyVirtualDaaScoreChangedResponseMessage +} + +// NewNotifyVirtualDaaScoreChangedResponseMessage returns a instance of the message +func NewNotifyVirtualDaaScoreChangedResponseMessage() *NotifyVirtualDaaScoreChangedResponseMessage { + return &NotifyVirtualDaaScoreChangedResponseMessage{} +} + +// VirtualDaaScoreChangedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type VirtualDaaScoreChangedNotificationMessage struct { + baseMessage + VirtualDaaScore uint64 +} + +// Command returns the protocol command string for the message +func (msg *VirtualDaaScoreChangedNotificationMessage) Command() MessageCommand { + return CmdVirtualDaaScoreChangedNotificationMessage +} + +// NewVirtualDaaScoreChangedNotificationMessage returns a instance of the message +func NewVirtualDaaScoreChangedNotificationMessage( + virtualDaaScore uint64) *VirtualDaaScoreChangedNotificationMessage { + + return &VirtualDaaScoreChangedNotificationMessage{ + VirtualDaaScore: virtualDaaScore, + } +} diff --git a/app/appmessage/rpc_notify_virtual_selected_parent_chain_blue_score_changed.go b/app/appmessage/rpc_notify_virtual_selected_parent_chain_blue_score_changed.go new file mode 100644 index 0000000..c4c2b35 --- /dev/null +++ b/app/appmessage/rpc_notify_virtual_selected_parent_chain_blue_score_changed.go @@ -0,0 +1,55 @@ +package appmessage + +// NotifyVirtualSelectedParentBlueScoreChangedRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualSelectedParentBlueScoreChangedRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) Command() MessageCommand { + return CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage +} + +// NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage returns a instance of the message +func NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage() *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage { + return &NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{} +} + +// NotifyVirtualSelectedParentBlueScoreChangedResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualSelectedParentBlueScoreChangedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) Command() MessageCommand { + return CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage +} + +// NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage returns a instance of the message +func NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage() *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage { + return &NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{} +} + +// VirtualSelectedParentBlueScoreChangedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type VirtualSelectedParentBlueScoreChangedNotificationMessage struct { + baseMessage + VirtualSelectedParentBlueScore uint64 +} + +// Command returns the protocol command string for the message +func (msg *VirtualSelectedParentBlueScoreChangedNotificationMessage) Command() MessageCommand { + return CmdVirtualSelectedParentBlueScoreChangedNotificationMessage +} + +// NewVirtualSelectedParentBlueScoreChangedNotificationMessage returns a instance of the message +func NewVirtualSelectedParentBlueScoreChangedNotificationMessage( + virtualSelectedParentBlueScore uint64) *VirtualSelectedParentBlueScoreChangedNotificationMessage { + + return &VirtualSelectedParentBlueScoreChangedNotificationMessage{ + VirtualSelectedParentBlueScore: virtualSelectedParentBlueScore, + } +} diff --git a/app/appmessage/rpc_notify_virtual_selected_parent_chain_changed.go b/app/appmessage/rpc_notify_virtual_selected_parent_chain_changed.go new file mode 100644 index 0000000..1c72b8f --- /dev/null +++ b/app/appmessage/rpc_notify_virtual_selected_parent_chain_changed.go @@ -0,0 +1,64 @@ +package appmessage + +// NotifyVirtualSelectedParentChainChangedRequestMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualSelectedParentChainChangedRequestMessage struct { + baseMessage + IncludeAcceptedTransactionIDs bool +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualSelectedParentChainChangedRequestMessage) Command() MessageCommand { + return CmdNotifyVirtualSelectedParentChainChangedRequestMessage +} + +// NewNotifyVirtualSelectedParentChainChangedRequestMessage returns an instance of the message +func NewNotifyVirtualSelectedParentChainChangedRequestMessage( + includeAcceptedTransactionIDs bool) *NotifyVirtualSelectedParentChainChangedRequestMessage { + + return &NotifyVirtualSelectedParentChainChangedRequestMessage{ + IncludeAcceptedTransactionIDs: includeAcceptedTransactionIDs, + } +} + +// NotifyVirtualSelectedParentChainChangedResponseMessage is an appmessage corresponding to +// its respective RPC message +type NotifyVirtualSelectedParentChainChangedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *NotifyVirtualSelectedParentChainChangedResponseMessage) Command() MessageCommand { + return CmdNotifyVirtualSelectedParentChainChangedResponseMessage +} + +// NewNotifyVirtualSelectedParentChainChangedResponseMessage returns a instance of the message +func NewNotifyVirtualSelectedParentChainChangedResponseMessage() *NotifyVirtualSelectedParentChainChangedResponseMessage { + return &NotifyVirtualSelectedParentChainChangedResponseMessage{} +} + +// VirtualSelectedParentChainChangedNotificationMessage is an appmessage corresponding to +// its respective RPC message +type VirtualSelectedParentChainChangedNotificationMessage struct { + baseMessage + RemovedChainBlockHashes []string + AddedChainBlockHashes []string + AcceptedTransactionIDs []*AcceptedTransactionIDs +} + +// Command returns the protocol command string for the message +func (msg *VirtualSelectedParentChainChangedNotificationMessage) Command() MessageCommand { + return CmdVirtualSelectedParentChainChangedNotificationMessage +} + +// NewVirtualSelectedParentChainChangedNotificationMessage returns a instance of the message +func NewVirtualSelectedParentChainChangedNotificationMessage(removedChainBlockHashes, + addedChainBlocks []string, acceptedTransactionIDs []*AcceptedTransactionIDs) *VirtualSelectedParentChainChangedNotificationMessage { + + return &VirtualSelectedParentChainChangedNotificationMessage{ + RemovedChainBlockHashes: removedChainBlockHashes, + AddedChainBlockHashes: addedChainBlocks, + AcceptedTransactionIDs: acceptedTransactionIDs, + } +} diff --git a/app/appmessage/rpc_resolve_finality_conflict.go b/app/appmessage/rpc_resolve_finality_conflict.go new file mode 100644 index 0000000..16ce546 --- /dev/null +++ b/app/appmessage/rpc_resolve_finality_conflict.go @@ -0,0 +1,37 @@ +package appmessage + +// ResolveFinalityConflictRequestMessage is an appmessage corresponding to +// its respective RPC message +type ResolveFinalityConflictRequestMessage struct { + baseMessage + FinalityBlockHash string +} + +// Command returns the protocol command string for the message +func (msg *ResolveFinalityConflictRequestMessage) Command() MessageCommand { + return CmdResolveFinalityConflictRequestMessage +} + +// NewResolveFinalityConflictRequestMessage returns a instance of the message +func NewResolveFinalityConflictRequestMessage(finalityBlockHash string) *ResolveFinalityConflictRequestMessage { + return &ResolveFinalityConflictRequestMessage{ + FinalityBlockHash: finalityBlockHash, + } +} + +// ResolveFinalityConflictResponseMessage is an appmessage corresponding to +// its respective RPC message +type ResolveFinalityConflictResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *ResolveFinalityConflictResponseMessage) Command() MessageCommand { + return CmdResolveFinalityConflictResponseMessage +} + +// NewResolveFinalityConflictResponseMessage returns a instance of the message +func NewResolveFinalityConflictResponseMessage() *ResolveFinalityConflictResponseMessage { + return &ResolveFinalityConflictResponseMessage{} +} diff --git a/app/appmessage/rpc_shut_down.go b/app/appmessage/rpc_shut_down.go new file mode 100644 index 0000000..876897d --- /dev/null +++ b/app/appmessage/rpc_shut_down.go @@ -0,0 +1,34 @@ +package appmessage + +// ShutDownRequestMessage is an appmessage corresponding to +// its respective RPC message +type ShutDownRequestMessage struct { + baseMessage +} + +// Command returns the protocol command string for the message +func (msg *ShutDownRequestMessage) Command() MessageCommand { + return CmdShutDownRequestMessage +} + +// NewShutDownRequestMessage returns a instance of the message +func NewShutDownRequestMessage() *ShutDownRequestMessage { + return &ShutDownRequestMessage{} +} + +// ShutDownResponseMessage is an appmessage corresponding to +// its respective RPC message +type ShutDownResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *ShutDownResponseMessage) Command() MessageCommand { + return CmdShutDownResponseMessage +} + +// NewShutDownResponseMessage returns a instance of the message +func NewShutDownResponseMessage() *ShutDownResponseMessage { + return &ShutDownResponseMessage{} +} diff --git a/app/appmessage/rpc_stop_notifying_utxos_changed.go b/app/appmessage/rpc_stop_notifying_utxos_changed.go new file mode 100644 index 0000000..16e0e34 --- /dev/null +++ b/app/appmessage/rpc_stop_notifying_utxos_changed.go @@ -0,0 +1,37 @@ +package appmessage + +// StopNotifyingUTXOsChangedRequestMessage is an appmessage corresponding to +// its respective RPC message +type StopNotifyingUTXOsChangedRequestMessage struct { + baseMessage + Addresses []string +} + +// Command returns the protocol command string for the message +func (msg *StopNotifyingUTXOsChangedRequestMessage) Command() MessageCommand { + return CmdStopNotifyingUTXOsChangedRequestMessage +} + +// NewStopNotifyingUTXOsChangedRequestMessage returns a instance of the message +func NewStopNotifyingUTXOsChangedRequestMessage(addresses []string) *StopNotifyingUTXOsChangedRequestMessage { + return &StopNotifyingUTXOsChangedRequestMessage{ + Addresses: addresses, + } +} + +// StopNotifyingUTXOsChangedResponseMessage is an appmessage corresponding to +// its respective RPC message +type StopNotifyingUTXOsChangedResponseMessage struct { + baseMessage + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *StopNotifyingUTXOsChangedResponseMessage) Command() MessageCommand { + return CmdStopNotifyingUTXOsChangedResponseMessage +} + +// NewStopNotifyingUTXOsChangedResponseMessage returns a instance of the message +func NewStopNotifyingUTXOsChangedResponseMessage() *StopNotifyingUTXOsChangedResponseMessage { + return &StopNotifyingUTXOsChangedResponseMessage{} +} diff --git a/app/appmessage/rpc_submit_block.go b/app/appmessage/rpc_submit_block.go new file mode 100644 index 0000000..3717f5b --- /dev/null +++ b/app/appmessage/rpc_submit_block.go @@ -0,0 +1,105 @@ +package appmessage + +// SubmitBlockRequestMessage is an appmessage corresponding to +// its respective RPC message +type SubmitBlockRequestMessage struct { + baseMessage + Block *RPCBlock + AllowNonDAABlocks bool +} + +// Command returns the protocol command string for the message +func (msg *SubmitBlockRequestMessage) Command() MessageCommand { + return CmdSubmitBlockRequestMessage +} + +// NewSubmitBlockRequestMessage returns a instance of the message +func NewSubmitBlockRequestMessage(block *RPCBlock, allowNonDAABlocks bool) *SubmitBlockRequestMessage { + return &SubmitBlockRequestMessage{ + Block: block, + AllowNonDAABlocks: allowNonDAABlocks, + } +} + +// RejectReason describes the reason why a block sent by SubmitBlock was rejected +type RejectReason byte + +// RejectReason constants +// Not using iota, since in the .proto file those are hardcoded +const ( + RejectReasonNone RejectReason = 0 + RejectReasonBlockInvalid RejectReason = 1 + RejectReasonIsInIBD RejectReason = 2 +) + +var rejectReasonToString = map[RejectReason]string{ + RejectReasonNone: "None", + RejectReasonBlockInvalid: "Block is invalid", + RejectReasonIsInIBD: "Node is in IBD", +} + +func (rr RejectReason) String() string { + return rejectReasonToString[rr] +} + +// SubmitBlockResponseMessage is an appmessage corresponding to +// its respective RPC message +type SubmitBlockResponseMessage struct { + baseMessage + RejectReason RejectReason + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *SubmitBlockResponseMessage) Command() MessageCommand { + return CmdSubmitBlockResponseMessage +} + +// NewSubmitBlockResponseMessage returns an instance of the message +func NewSubmitBlockResponseMessage() *SubmitBlockResponseMessage { + return &SubmitBlockResponseMessage{} +} + +// RPCBlock is a spectred block representation meant to be +// used over RPC +type RPCBlock struct { + Header *RPCBlockHeader + Transactions []*RPCTransaction + VerboseData *RPCBlockVerboseData +} + +// RPCBlockHeader is a spectred block header representation meant to be +// used over RPC +type RPCBlockHeader struct { + Version uint32 + Parents []*RPCBlockLevelParents + HashMerkleRoot string + AcceptedIDMerkleRoot string + UTXOCommitment string + Timestamp int64 + Bits uint32 + Nonce uint64 + DAAScore uint64 + BlueScore uint64 + BlueWork string + PruningPoint string +} + +// RPCBlockLevelParents holds parent hashes for one block level +type RPCBlockLevelParents struct { + ParentHashes []string +} + +// RPCBlockVerboseData holds verbose data about a block +type RPCBlockVerboseData struct { + Hash string + Difficulty float64 + SelectedParentHash string + TransactionIDs []string + IsHeaderOnly bool + BlueScore uint64 + ChildrenHashes []string + MergeSetBluesHashes []string + MergeSetRedsHashes []string + IsChainBlock bool +} diff --git a/app/appmessage/rpc_submit_transaction.go b/app/appmessage/rpc_submit_transaction.go new file mode 100644 index 0000000..77fcf74 --- /dev/null +++ b/app/appmessage/rpc_submit_transaction.go @@ -0,0 +1,115 @@ +package appmessage + +// SubmitTransactionRequestMessage is an appmessage corresponding to +// its respective RPC message +type SubmitTransactionRequestMessage struct { + baseMessage + Transaction *RPCTransaction + AllowOrphan bool +} + +// Command returns the protocol command string for the message +func (msg *SubmitTransactionRequestMessage) Command() MessageCommand { + return CmdSubmitTransactionRequestMessage +} + +// NewSubmitTransactionRequestMessage returns a instance of the message +func NewSubmitTransactionRequestMessage(transaction *RPCTransaction, allowOrphan bool) *SubmitTransactionRequestMessage { + return &SubmitTransactionRequestMessage{ + Transaction: transaction, + AllowOrphan: allowOrphan, + } +} + +// SubmitTransactionResponseMessage is an appmessage corresponding to +// its respective RPC message +type SubmitTransactionResponseMessage struct { + baseMessage + TransactionID string + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *SubmitTransactionResponseMessage) Command() MessageCommand { + return CmdSubmitTransactionResponseMessage +} + +// NewSubmitTransactionResponseMessage returns a instance of the message +func NewSubmitTransactionResponseMessage(transactionID string) *SubmitTransactionResponseMessage { + return &SubmitTransactionResponseMessage{ + TransactionID: transactionID, + } +} + +// RPCTransaction is a spectred transaction representation meant to be +// used over RPC +type RPCTransaction struct { + Version uint16 + Inputs []*RPCTransactionInput + Outputs []*RPCTransactionOutput + LockTime uint64 + SubnetworkID string + Gas uint64 + Payload string + VerboseData *RPCTransactionVerboseData +} + +// RPCTransactionInput is a spectred transaction input representation +// meant to be used over RPC +type RPCTransactionInput struct { + PreviousOutpoint *RPCOutpoint + SignatureScript string + Sequence uint64 + SigOpCount byte + VerboseData *RPCTransactionInputVerboseData +} + +// RPCScriptPublicKey is a spectred ScriptPublicKey representation +type RPCScriptPublicKey struct { + Version uint16 + Script string +} + +// RPCTransactionOutput is a spectred transaction output representation +// meant to be used over RPC +type RPCTransactionOutput struct { + Amount uint64 + ScriptPublicKey *RPCScriptPublicKey + VerboseData *RPCTransactionOutputVerboseData +} + +// RPCOutpoint is a spectred outpoint representation meant to be used +// over RPC +type RPCOutpoint struct { + TransactionID string + Index uint32 +} + +// RPCUTXOEntry is a spectred utxo entry representation meant to be used +// over RPC +type RPCUTXOEntry struct { + Amount uint64 + ScriptPublicKey *RPCScriptPublicKey + BlockDAAScore uint64 + IsCoinbase bool +} + +// RPCTransactionVerboseData holds verbose data about a transaction +type RPCTransactionVerboseData struct { + TransactionID string + Hash string + Mass uint64 + BlockHash string + BlockTime uint64 +} + +// RPCTransactionInputVerboseData holds data about a transaction input +type RPCTransactionInputVerboseData struct { +} + +// RPCTransactionOutputVerboseData holds data about a transaction output +type RPCTransactionOutputVerboseData struct { + ScriptPublicKeyType string + ScriptPublicKeyAddress string +} diff --git a/app/appmessage/rpc_unban.go b/app/appmessage/rpc_unban.go new file mode 100644 index 0000000..f2e5583 --- /dev/null +++ b/app/appmessage/rpc_unban.go @@ -0,0 +1,39 @@ +package appmessage + +// UnbanRequestMessage is an appmessage corresponding to +// its respective RPC message +type UnbanRequestMessage struct { + baseMessage + + IP string +} + +// Command returns the protocol command string for the message +func (msg *UnbanRequestMessage) Command() MessageCommand { + return CmdUnbanRequestMessage +} + +// NewUnbanRequestMessage returns an instance of the message +func NewUnbanRequestMessage(ip string) *UnbanRequestMessage { + return &UnbanRequestMessage{ + IP: ip, + } +} + +// UnbanResponseMessage is an appmessage corresponding to +// its respective RPC message +type UnbanResponseMessage struct { + baseMessage + + Error *RPCError +} + +// Command returns the protocol command string for the message +func (msg *UnbanResponseMessage) Command() MessageCommand { + return CmdUnbanResponseMessage +} + +// NewUnbanResponseMessage returns a instance of the message +func NewUnbanResponseMessage() *UnbanResponseMessage { + return &UnbanResponseMessage{} +} diff --git a/app/appmessage/testdata/megatx.bin.bz2 b/app/appmessage/testdata/megatx.bin.bz2 new file mode 100644 index 0000000..0e71e5b Binary files /dev/null and b/app/appmessage/testdata/megatx.bin.bz2 differ diff --git a/app/component_manager.go b/app/component_manager.go new file mode 100644 index 0000000..045da9b --- /dev/null +++ b/app/component_manager.go @@ -0,0 +1,174 @@ +package app + +import ( + "fmt" + "sync/atomic" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + + "github.com/spectre-project/spectred/app/protocol" + "github.com/spectre-project/spectred/app/rpc" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/utxoindex" + "github.com/spectre-project/spectred/infrastructure/config" + infrastructuredatabase "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + "github.com/spectre-project/spectred/util/panics" +) + +// ComponentManager is a wrapper for all the spectred services +type ComponentManager struct { + cfg *config.Config + addressManager *addressmanager.AddressManager + protocolManager *protocol.Manager + rpcManager *rpc.Manager + connectionManager *connmanager.ConnectionManager + netAdapter *netadapter.NetAdapter + + started, shutdown int32 +} + +// Start launches all the spectred services. +func (a *ComponentManager) Start() { + // Already started? + if atomic.AddInt32(&a.started, 1) != 1 { + return + } + + log.Trace("Starting spectred") + + err := a.netAdapter.Start() + if err != nil { + panics.Exit(log, fmt.Sprintf("Error starting the net adapter: %+v", err)) + } + + a.connectionManager.Start() +} + +// Stop gracefully shuts down all the spectred services. +func (a *ComponentManager) Stop() { + // Make sure this only happens once. + if atomic.AddInt32(&a.shutdown, 1) != 1 { + log.Infof("Spectred is already in the process of shutting down") + return + } + + log.Warnf("Spectred shutting down") + + a.connectionManager.Stop() + + err := a.netAdapter.Stop() + if err != nil { + log.Errorf("Error stopping the net adapter: %+v", err) + } + + a.protocolManager.Close() + close(a.protocolManager.Context().Domain().ConsensusEventsChannel()) + + return +} + +// NewComponentManager returns a new ComponentManager instance. +// Use Start() to begin all services within this ComponentManager +func NewComponentManager(cfg *config.Config, db infrastructuredatabase.Database, interrupt chan<- struct{}) ( + *ComponentManager, error) { + + consensusConfig := consensus.Config{ + Params: *cfg.ActiveNetParams, + IsArchival: cfg.IsArchivalNode, + EnableSanityCheckPruningUTXOSet: cfg.EnableSanityCheckPruningUTXOSet, + } + mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params) + mempoolConfig.MaximumOrphanTransactionCount = cfg.MaxOrphanTxs + mempoolConfig.MinimumRelayTransactionFee = cfg.MinRelayTxFee + + domain, err := domain.New(&consensusConfig, mempoolConfig, db) + if err != nil { + return nil, err + } + + netAdapter, err := netadapter.NewNetAdapter(cfg) + if err != nil { + return nil, err + } + + addressManager, err := addressmanager.New(addressmanager.NewConfig(cfg), db) + if err != nil { + return nil, err + } + + var utxoIndex *utxoindex.UTXOIndex + if cfg.UTXOIndex { + utxoIndex, err = utxoindex.New(domain, db) + if err != nil { + return nil, err + } + + log.Infof("UTXO index started") + } + + connectionManager, err := connmanager.New(cfg, netAdapter, addressManager) + if err != nil { + return nil, err + } + protocolManager, err := protocol.NewManager(cfg, domain, netAdapter, addressManager, connectionManager) + if err != nil { + return nil, err + } + rpcManager := setupRPC(cfg, domain, netAdapter, protocolManager, connectionManager, addressManager, utxoIndex, domain.ConsensusEventsChannel(), interrupt) + + return &ComponentManager{ + cfg: cfg, + protocolManager: protocolManager, + rpcManager: rpcManager, + connectionManager: connectionManager, + netAdapter: netAdapter, + addressManager: addressManager, + }, nil + +} + +func setupRPC( + cfg *config.Config, + domain domain.Domain, + netAdapter *netadapter.NetAdapter, + protocolManager *protocol.Manager, + connectionManager *connmanager.ConnectionManager, + addressManager *addressmanager.AddressManager, + utxoIndex *utxoindex.UTXOIndex, + consensusEventsChan chan externalapi.ConsensusEvent, + shutDownChan chan<- struct{}, +) *rpc.Manager { + + rpcManager := rpc.NewManager( + cfg, + domain, + netAdapter, + protocolManager, + connectionManager, + addressManager, + utxoIndex, + consensusEventsChan, + shutDownChan, + ) + protocolManager.SetOnNewBlockTemplateHandler(rpcManager.NotifyNewBlockTemplate) + protocolManager.SetOnPruningPointUTXOSetOverrideHandler(rpcManager.NotifyPruningPointUTXOSetOverride) + + return rpcManager +} + +// P2PNodeID returns the network ID associated with this ComponentManager +func (a *ComponentManager) P2PNodeID() *id.ID { + return a.netAdapter.ID() +} + +// AddressManager returns the AddressManager associated with this ComponentManager +func (a *ComponentManager) AddressManager() *addressmanager.AddressManager { + return a.addressManager +} diff --git a/app/db_version.go b/app/db_version.go new file mode 100644 index 0000000..a7bd715 --- /dev/null +++ b/app/db_version.go @@ -0,0 +1,57 @@ +package app + +import ( + "os" + "path" + "strconv" + + "github.com/pkg/errors" +) + +const currentDatabaseVersion = 1 + +func checkDatabaseVersion(dbPath string) (err error) { + versionFileName := versionFilePath(dbPath) + + versionBytes, err := os.ReadFile(versionFileName) + if err != nil { + if os.IsNotExist(err) { // If version file doesn't exist, we assume that the database is new + return createDatabaseVersionFile(dbPath, versionFileName) + } + return err + } + + databaseVersion, err := strconv.Atoi(string(versionBytes)) + if err != nil { + return err + } + + if databaseVersion != currentDatabaseVersion { + // TODO: Once there's more then one database version, it might make sense to add upgrade logic at this point + return errors.Errorf("Invalid database version %d. Expected version: %d", databaseVersion, currentDatabaseVersion) + } + + return nil +} + +func createDatabaseVersionFile(dbPath string, versionFileName string) error { + err := os.MkdirAll(dbPath, 0700) + if err != nil { + return err + } + + versionFile, err := os.Create(versionFileName) + if err != nil { + return nil + } + defer versionFile.Close() + + versionString := strconv.Itoa(currentDatabaseVersion) + _, err = versionFile.Write([]byte(versionString)) + return err +} + +func versionFilePath(dbPath string) string { + dbVersionFileName := path.Join(dbPath, "version") + return dbVersionFileName +} diff --git a/app/log.go b/app/log.go new file mode 100644 index 0000000..a2850b4 --- /dev/null +++ b/app/log.go @@ -0,0 +1,12 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Copyright (c) 2017 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package app + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("SPRD") diff --git a/app/protocol/common/common.go b/app/protocol/common/common.go new file mode 100644 index 0000000..43c3fd8 --- /dev/null +++ b/app/protocol/common/common.go @@ -0,0 +1,28 @@ +package common + +import ( + "time" + + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + + "github.com/pkg/errors" +) + +// DefaultTimeout is the default duration to wait for enqueuing/dequeuing +// to/from routes. +const DefaultTimeout = 120 * time.Second + +// ErrPeerWithSameIDExists signifies that a peer with the same ID already exist. +var ErrPeerWithSameIDExists = errors.New("ready peer with the same ID already exists") + +type flowExecuteFunc func(peer *peerpkg.Peer) + +// Flow is a a data structure that is used in order to associate a p2p flow to some route in a router. +type Flow struct { + Name string + ExecuteFunc flowExecuteFunc +} + +// FlowInitializeFunc is a function that is used in order to initialize a flow +type FlowInitializeFunc func(route *routerpkg.Route, peer *peerpkg.Peer) error diff --git a/app/protocol/flowcontext/addresses.go b/app/protocol/flowcontext/addresses.go new file mode 100644 index 0000000..f027b04 --- /dev/null +++ b/app/protocol/flowcontext/addresses.go @@ -0,0 +1,10 @@ +package flowcontext + +import ( + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" +) + +// AddressManager returns the address manager associated to the flow context. +func (f *FlowContext) AddressManager() *addressmanager.AddressManager { + return f.addressManager +} diff --git a/app/protocol/flowcontext/blocks.go b/app/protocol/flowcontext/blocks.go new file mode 100644 index 0000000..27e4b6a --- /dev/null +++ b/app/protocol/flowcontext/blocks.go @@ -0,0 +1,170 @@ +package flowcontext + +import ( + "time" + + "github.com/pkg/errors" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/app/appmessage" +) + +// OnNewBlock updates the mempool after a new block arrival, and +// relays newly unorphaned transactions and possibly rebroadcast +// manually added transactions when not in IBD. +func (f *FlowContext) OnNewBlock(block *externalapi.DomainBlock) error { + + hash := consensushashing.BlockHash(block) + log.Tracef("OnNewBlock start for block %s", hash) + defer log.Tracef("OnNewBlock end for block %s", hash) + + unorphanedBlocks, err := f.UnorphanBlocks(block) + if err != nil { + return err + } + + log.Debugf("OnNewBlock: block %s unorphaned %d blocks", hash, len(unorphanedBlocks)) + + newBlocks := []*externalapi.DomainBlock{block} + newBlocks = append(newBlocks, unorphanedBlocks...) + + allAcceptedTransactions := make([]*externalapi.DomainTransaction, 0) + for _, newBlock := range newBlocks { + log.Debugf("OnNewBlock: passing block %s transactions to mining manager", hash) + acceptedTransactions, err := f.Domain().MiningManager().HandleNewBlockTransactions(newBlock.Transactions) + if err != nil { + return err + } + allAcceptedTransactions = append(allAcceptedTransactions, acceptedTransactions...) + } + + return f.broadcastTransactionsAfterBlockAdded(newBlocks, allAcceptedTransactions) +} + +// OnNewBlockTemplate calls the handler function whenever a new block template is available for miners. +func (f *FlowContext) OnNewBlockTemplate() error { + // Clear current template cache. Note we call this even if the handler is nil, in order to keep the + // state consistent without dependency on external event registration + f.Domain().MiningManager().ClearBlockTemplate() + if f.onNewBlockTemplateHandler != nil { + return f.onNewBlockTemplateHandler() + } + + return nil +} + +// OnPruningPointUTXOSetOverride calls the handler function whenever the UTXO set +// resets due to pruning point change via IBD. +func (f *FlowContext) OnPruningPointUTXOSetOverride() error { + if f.onPruningPointUTXOSetOverrideHandler != nil { + return f.onPruningPointUTXOSetOverrideHandler() + } + return nil +} + +func (f *FlowContext) broadcastTransactionsAfterBlockAdded( + addedBlocks []*externalapi.DomainBlock, transactionsAcceptedToMempool []*externalapi.DomainTransaction) error { + + // Don't relay transactions when in IBD. + if f.IsIBDRunning() { + return nil + } + + var txIDsToRebroadcast []*externalapi.DomainTransactionID + if f.shouldRebroadcastTransactions() { + txsToRebroadcast, err := f.Domain().MiningManager().RevalidateHighPriorityTransactions() + if err != nil { + return err + } + txIDsToRebroadcast = consensushashing.TransactionIDs(txsToRebroadcast) + f.lastRebroadcastTime = time.Now() + } + + txIDsToBroadcast := make([]*externalapi.DomainTransactionID, len(transactionsAcceptedToMempool)+len(txIDsToRebroadcast)) + for i, tx := range transactionsAcceptedToMempool { + txIDsToBroadcast[i] = consensushashing.TransactionID(tx) + } + offset := len(transactionsAcceptedToMempool) + for i, txID := range txIDsToRebroadcast { + txIDsToBroadcast[offset+i] = txID + } + return f.EnqueueTransactionIDsForPropagation(txIDsToBroadcast) +} + +// SharedRequestedBlocks returns a *blockrelay.SharedRequestedBlocks for sharing +// data about requested blocks between different peers. +func (f *FlowContext) SharedRequestedBlocks() *SharedRequestedBlocks { + return f.sharedRequestedBlocks +} + +// AddBlock adds the given block to the DAG and propagates it. +func (f *FlowContext) AddBlock(block *externalapi.DomainBlock) error { + if len(block.Transactions) == 0 { + return protocolerrors.Errorf(false, "cannot add header only block") + } + + err := f.Domain().Consensus().ValidateAndInsertBlock(block, true) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + log.Warnf("Validation failed for block %s: %s", consensushashing.BlockHash(block), err) + } + return err + } + err = f.OnNewBlockTemplate() + if err != nil { + return err + } + err = f.OnNewBlock(block) + if err != nil { + return err + } + return f.Broadcast(appmessage.NewMsgInvBlock(consensushashing.BlockHash(block))) +} + +// IsIBDRunning returns true if IBD is currently marked as running +func (f *FlowContext) IsIBDRunning() bool { + f.ibdPeerMutex.RLock() + defer f.ibdPeerMutex.RUnlock() + + return f.ibdPeer != nil +} + +// TrySetIBDRunning attempts to set `isInIBD`. Returns false +// if it is already set +func (f *FlowContext) TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool { + f.ibdPeerMutex.Lock() + defer f.ibdPeerMutex.Unlock() + + if f.ibdPeer != nil { + return false + } + f.ibdPeer = ibdPeer + log.Infof("IBD started with peer %s", ibdPeer) + + return true +} + +// UnsetIBDRunning unsets isInIBD +func (f *FlowContext) UnsetIBDRunning() { + f.ibdPeerMutex.Lock() + defer f.ibdPeerMutex.Unlock() + + if f.ibdPeer == nil { + panic("attempted to unset isInIBD when it was not set to begin with") + } + + f.ibdPeer = nil +} + +// IBDPeer returns the current IBD peer or null if the node is not +// in IBD +func (f *FlowContext) IBDPeer() *peerpkg.Peer { + f.ibdPeerMutex.RLock() + defer f.ibdPeerMutex.RUnlock() + + return f.ibdPeer +} diff --git a/app/protocol/flowcontext/config.go b/app/protocol/flowcontext/config.go new file mode 100644 index 0000000..a122210 --- /dev/null +++ b/app/protocol/flowcontext/config.go @@ -0,0 +1,8 @@ +package flowcontext + +import "github.com/spectre-project/spectred/infrastructure/config" + +// Config returns an instance of *config.Config associated to the flow context. +func (f *FlowContext) Config() *config.Config { + return f.cfg +} diff --git a/app/protocol/flowcontext/domain.go b/app/protocol/flowcontext/domain.go new file mode 100644 index 0000000..72c0586 --- /dev/null +++ b/app/protocol/flowcontext/domain.go @@ -0,0 +1,10 @@ +package flowcontext + +import ( + "github.com/spectre-project/spectred/domain" +) + +// Domain returns the Domain object associated to the flow context. +func (f *FlowContext) Domain() domain.Domain { + return f.domain +} diff --git a/app/protocol/flowcontext/errors.go b/app/protocol/flowcontext/errors.go new file mode 100644 index 0000000..583c4b2 --- /dev/null +++ b/app/protocol/flowcontext/errors.go @@ -0,0 +1,49 @@ +package flowcontext + +import ( + "errors" + "strings" + "sync/atomic" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + + "github.com/spectre-project/spectred/app/protocol/protocolerrors" +) + +var ( + // ErrPingTimeout signifies that a ping operation timed out. + ErrPingTimeout = protocolerrors.New(false, "timeout expired on ping") +) + +// HandleError handles an error from a flow, +// It sends the error to errChan if isStopping == 0 and increments isStopping +// +// If this is ErrRouteClosed - forward it to errChan +// If this is ProtocolError - logs the error, and forward it to errChan +// Otherwise - panics +func (*FlowContext) HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) { + isErrRouteClosed := errors.Is(err, router.ErrRouteClosed) + if !isErrRouteClosed { + if protocolErr := (protocolerrors.ProtocolError{}); !errors.As(err, &protocolErr) { + panic(err) + } + if errors.Is(err, ErrPingTimeout) { + // Avoid printing the call stack on ping timeouts, since users get panicked and this case is not interesting + log.Errorf("error from %s: %s", flowName, err) + } else { + // Explain to the user that this is not a panic, but only a protocol error with a specific peer + logFrame := strings.Repeat("=", 52) + log.Errorf("Non-critical peer protocol error from %s, printing the full stack for debug purposes: \n%s\n%+v \n%s", + flowName, logFrame, err, logFrame) + } + } + + if atomic.AddUint32(isStopping, 1) == 1 { + errChan <- err + } +} + +// IsRecoverableError returns whether the error is recoverable +func (*FlowContext) IsRecoverableError(err error) bool { + return err == nil || errors.Is(err, router.ErrRouteClosed) || errors.As(err, &protocolerrors.ProtocolError{}) +} diff --git a/app/protocol/flowcontext/flow_context.go b/app/protocol/flowcontext/flow_context.go new file mode 100644 index 0000000..69eec66 --- /dev/null +++ b/app/protocol/flowcontext/flow_context.go @@ -0,0 +1,118 @@ +package flowcontext + +import ( + "sync" + "time" + + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain" + + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" +) + +// OnNewBlockTemplateHandler is a handler function that's triggered when a new block template is available +type OnNewBlockTemplateHandler func() error + +// OnPruningPointUTXOSetOverrideHandler is a handle function that's triggered whenever the UTXO set +// resets due to pruning point change via IBD. +type OnPruningPointUTXOSetOverrideHandler func() error + +// OnTransactionAddedToMempoolHandler is a handler function that's triggered +// when a transaction is added to the mempool +type OnTransactionAddedToMempoolHandler func() + +// FlowContext holds state that is relevant to more than one flow or one peer, and allows communication between +// different flows that can be associated to different peers. +type FlowContext struct { + cfg *config.Config + netAdapter *netadapter.NetAdapter + domain domain.Domain + addressManager *addressmanager.AddressManager + connectionManager *connmanager.ConnectionManager + + timeStarted int64 + + onNewBlockTemplateHandler OnNewBlockTemplateHandler + onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler + onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler + + lastRebroadcastTime time.Time + sharedRequestedTransactions *SharedRequestedTransactions + + sharedRequestedBlocks *SharedRequestedBlocks + + ibdPeer *peerpkg.Peer + ibdPeerMutex sync.RWMutex + + peers map[id.ID]*peerpkg.Peer + peersMutex sync.RWMutex + + orphans map[externalapi.DomainHash]*externalapi.DomainBlock + orphansMutex sync.RWMutex + + transactionIDsToPropagate []*externalapi.DomainTransactionID + lastTransactionIDPropagationTime time.Time + transactionIDPropagationLock sync.Mutex + + shutdownChan chan struct{} +} + +// New returns a new instance of FlowContext. +func New(cfg *config.Config, domain domain.Domain, addressManager *addressmanager.AddressManager, + netAdapter *netadapter.NetAdapter, connectionManager *connmanager.ConnectionManager) *FlowContext { + + return &FlowContext{ + cfg: cfg, + netAdapter: netAdapter, + domain: domain, + addressManager: addressManager, + connectionManager: connectionManager, + sharedRequestedTransactions: NewSharedRequestedTransactions(), + sharedRequestedBlocks: NewSharedRequestedBlocks(), + peers: make(map[id.ID]*peerpkg.Peer), + orphans: make(map[externalapi.DomainHash]*externalapi.DomainBlock), + timeStarted: mstime.Now().UnixMilliseconds(), + transactionIDsToPropagate: []*externalapi.DomainTransactionID{}, + lastTransactionIDPropagationTime: time.Now(), + shutdownChan: make(chan struct{}), + } +} + +// Close signals to all flows the the protocol manager is closed. +func (f *FlowContext) Close() { + close(f.shutdownChan) +} + +// ShutdownChan is a chan where flows can subscribe to shutdown +// event. +func (f *FlowContext) ShutdownChan() <-chan struct{} { + return f.shutdownChan +} + +// IsNearlySynced returns whether current consensus is considered synced or close to being synced. +func (f *FlowContext) IsNearlySynced() (bool, error) { + return f.Domain().Consensus().IsNearlySynced() +} + +// SetOnNewBlockTemplateHandler sets the onNewBlockTemplateHandler handler +func (f *FlowContext) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler OnNewBlockTemplateHandler) { + f.onNewBlockTemplateHandler = onNewBlockTemplateHandler +} + +// SetOnPruningPointUTXOSetOverrideHandler sets the onPruningPointUTXOSetOverrideHandler handler +func (f *FlowContext) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler OnPruningPointUTXOSetOverrideHandler) { + f.onPruningPointUTXOSetOverrideHandler = onPruningPointUTXOSetOverrideHandler +} + +// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler +func (f *FlowContext) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler OnTransactionAddedToMempoolHandler) { + f.onTransactionAddedToMempoolHandler = onTransactionAddedToMempoolHandler +} diff --git a/app/protocol/flowcontext/log.go b/app/protocol/flowcontext/log.go new file mode 100644 index 0000000..1f5a0aa --- /dev/null +++ b/app/protocol/flowcontext/log.go @@ -0,0 +1,7 @@ +package flowcontext + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("PROT") diff --git a/app/protocol/flowcontext/network.go b/app/protocol/flowcontext/network.go new file mode 100644 index 0000000..72c3234 --- /dev/null +++ b/app/protocol/flowcontext/network.go @@ -0,0 +1,81 @@ +package flowcontext + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" +) + +// NetAdapter returns the net adapter that is associated to the flow context. +func (f *FlowContext) NetAdapter() *netadapter.NetAdapter { + return f.netAdapter +} + +// ConnectionManager returns the connection manager that is associated to the flow context. +func (f *FlowContext) ConnectionManager() *connmanager.ConnectionManager { + return f.connectionManager +} + +// AddToPeers marks this peer as ready and adds it to the ready peers list. +func (f *FlowContext) AddToPeers(peer *peerpkg.Peer) error { + f.peersMutex.Lock() + defer f.peersMutex.Unlock() + + if _, ok := f.peers[*peer.ID()]; ok { + return errors.Wrapf(common.ErrPeerWithSameIDExists, "peer with ID %s already exists", peer.ID()) + } + + f.peers[*peer.ID()] = peer + + return nil +} + +// RemoveFromPeers remove this peer from the peers list. +func (f *FlowContext) RemoveFromPeers(peer *peerpkg.Peer) { + f.peersMutex.Lock() + defer f.peersMutex.Unlock() + + delete(f.peers, *peer.ID()) +} + +// readyPeerConnections returns the NetConnections of all the ready peers. +func (f *FlowContext) readyPeerConnections() []*netadapter.NetConnection { + f.peersMutex.RLock() + defer f.peersMutex.RUnlock() + peerConnections := make([]*netadapter.NetConnection, len(f.peers)) + i := 0 + for _, peer := range f.peers { + peerConnections[i] = peer.Connection() + i++ + } + return peerConnections +} + +// Broadcast broadcast the given message to all the ready peers. +func (f *FlowContext) Broadcast(message appmessage.Message) error { + return f.netAdapter.P2PBroadcast(f.readyPeerConnections(), message) +} + +// Peers returns the currently active peers +func (f *FlowContext) Peers() []*peerpkg.Peer { + f.peersMutex.RLock() + defer f.peersMutex.RUnlock() + + peers := make([]*peerpkg.Peer, len(f.peers)) + i := 0 + for _, peer := range f.peers { + peers[i] = peer + i++ + } + return peers +} + +// HasPeers returns whether there are currently active peers +func (f *FlowContext) HasPeers() bool { + f.peersMutex.RLock() + defer f.peersMutex.RUnlock() + return len(f.peers) > 0 +} diff --git a/app/protocol/flowcontext/orphans.go b/app/protocol/flowcontext/orphans.go new file mode 100644 index 0000000..319b729 --- /dev/null +++ b/app/protocol/flowcontext/orphans.go @@ -0,0 +1,204 @@ +package flowcontext + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// maxOrphans is the maximum amount of orphans allowed in the +// orphans collection. This number is an approximation of how +// many orphans there can possibly be on average. It is based +// on: 2^orphanResolutionRange * PHANTOM K. +const maxOrphans = 600 + +// AddOrphan adds the block to the orphan set +func (f *FlowContext) AddOrphan(orphanBlock *externalapi.DomainBlock) { + f.orphansMutex.Lock() + defer f.orphansMutex.Unlock() + + orphanHash := consensushashing.BlockHash(orphanBlock) + f.orphans[*orphanHash] = orphanBlock + + if len(f.orphans) > maxOrphans { + log.Debugf("Orphan collection size exceeded. Evicting a random orphan") + f.evictRandomOrphan() + } + + log.Infof("Received a block with missing parents, adding to orphan pool: %s", orphanHash) +} + +func (f *FlowContext) evictRandomOrphan() { + var toEvict externalapi.DomainHash + for hash := range f.orphans { + toEvict = hash + break + } + delete(f.orphans, toEvict) + log.Debugf("Evicted %s from the orphan collection", toEvict) +} + +// IsOrphan returns whether the given blockHash belongs to an orphan block +func (f *FlowContext) IsOrphan(blockHash *externalapi.DomainHash) bool { + f.orphansMutex.RLock() + defer f.orphansMutex.RUnlock() + + _, ok := f.orphans[*blockHash] + return ok +} + +// UnorphanBlocks removes the block from the orphan set, and remove all of the blocks that are not orphans anymore. +func (f *FlowContext) UnorphanBlocks(rootBlock *externalapi.DomainBlock) ([]*externalapi.DomainBlock, error) { + f.orphansMutex.Lock() + defer f.orphansMutex.Unlock() + + // Find all the children of rootBlock among the orphans + // and add them to the process queue + rootBlockHash := consensushashing.BlockHash(rootBlock) + processQueue := f.addChildOrphansToProcessQueue(rootBlockHash, []externalapi.DomainHash{}) + + var unorphanedBlocks []*externalapi.DomainBlock + for len(processQueue) > 0 { + var orphanHash externalapi.DomainHash + orphanHash, processQueue = processQueue[0], processQueue[1:] + orphanBlock := f.orphans[orphanHash] + + log.Debugf("Considering to unorphan block %s with parents %s", + orphanHash, orphanBlock.Header.DirectParents()) + + canBeUnorphaned := true + for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() { + orphanBlockParentInfo, err := f.domain.Consensus().GetBlockInfo(orphanBlockParentHash) + if err != nil { + return nil, err + } + if !orphanBlockParentInfo.Exists || orphanBlockParentInfo.BlockStatus == externalapi.StatusHeaderOnly { + log.Debugf("Cannot unorphan block %s. It's missing at "+ + "least the following parent: %s", orphanHash, orphanBlockParentHash) + + canBeUnorphaned = false + break + } + } + if canBeUnorphaned { + unorphaningSucceeded, err := f.unorphanBlock(orphanHash) + if err != nil { + return nil, err + } + if unorphaningSucceeded { + unorphanedBlocks = append(unorphanedBlocks, orphanBlock) + processQueue = f.addChildOrphansToProcessQueue(&orphanHash, processQueue) + } + } + } + + return unorphanedBlocks, nil +} + +// addChildOrphansToProcessQueue finds all child orphans of `blockHash` +// and adds them to the given `processQueue` if they don't already exist +// inside of it +// Note that this method does not modify the given `processQueue` +func (f *FlowContext) addChildOrphansToProcessQueue(blockHash *externalapi.DomainHash, + processQueue []externalapi.DomainHash) []externalapi.DomainHash { + + blockChildren := f.findChildOrphansOfBlock(blockHash) + for _, blockChild := range blockChildren { + exists := false + for _, queueOrphan := range processQueue { + if queueOrphan == blockChild { + exists = true + break + } + } + if !exists { + processQueue = append(processQueue, blockChild) + } + } + return processQueue +} + +func (f *FlowContext) findChildOrphansOfBlock(blockHash *externalapi.DomainHash) []externalapi.DomainHash { + var childOrphans []externalapi.DomainHash + for orphanHash, orphanBlock := range f.orphans { + for _, orphanBlockParentHash := range orphanBlock.Header.DirectParents() { + if orphanBlockParentHash.Equal(blockHash) { + childOrphans = append(childOrphans, orphanHash) + break + } + } + } + return childOrphans +} + +func (f *FlowContext) unorphanBlock(orphanHash externalapi.DomainHash) (bool, error) { + orphanBlock, ok := f.orphans[orphanHash] + if !ok { + return false, errors.Errorf("attempted to unorphan a non-orphan block %s", orphanHash) + } + delete(f.orphans, orphanHash) + + err := f.domain.Consensus().ValidateAndInsertBlock(orphanBlock, true) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + log.Warnf("Validation failed for orphan block %s: %s", orphanHash, err) + return false, nil + } + return false, err + } + + log.Infof("Unorphaned block %s", orphanHash) + return true, nil +} + +// GetOrphanRoots returns the roots of the missing ancestors DAG of the given orphan +func (f *FlowContext) GetOrphanRoots(orphan *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "GetOrphanRoots") + defer onEnd() + + f.orphansMutex.RLock() + defer f.orphansMutex.RUnlock() + + _, ok := f.orphans[*orphan] + if !ok { + return nil, false, nil + } + + queue := []*externalapi.DomainHash{orphan} + addedToQueueSet := hashset.New() + addedToQueueSet.Add(orphan) + + roots := []*externalapi.DomainHash{} + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + + block, ok := f.orphans[*current] + if !ok { + blockInfo, err := f.domain.Consensus().GetBlockInfo(current) + if err != nil { + return nil, false, err + } + + if !blockInfo.Exists || blockInfo.BlockStatus == externalapi.StatusHeaderOnly { + roots = append(roots, current) + } else { + log.Debugf("Block %s was skipped when checking for orphan roots: "+ + "exists: %t, status: %s", current, blockInfo.Exists, blockInfo.BlockStatus) + } + continue + } + + for _, parent := range block.Header.DirectParents() { + if !addedToQueueSet.Contains(parent) { + queue = append(queue, parent) + addedToQueueSet.Add(parent) + } + } + } + + return roots, true, nil +} diff --git a/app/protocol/flowcontext/shared_requested_blocks.go b/app/protocol/flowcontext/shared_requested_blocks.go new file mode 100644 index 0000000..8f56f4b --- /dev/null +++ b/app/protocol/flowcontext/shared_requested_blocks.go @@ -0,0 +1,49 @@ +package flowcontext + +import ( + "sync" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// SharedRequestedBlocks is a data structure that is shared between peers that +// holds the hashes of all the requested blocks to prevent redundant requests. +type SharedRequestedBlocks struct { + blocks map[externalapi.DomainHash]struct{} + sync.Mutex +} + +// Remove removes a block from the set. +func (s *SharedRequestedBlocks) Remove(hash *externalapi.DomainHash) { + s.Lock() + defer s.Unlock() + delete(s.blocks, *hash) +} + +// RemoveSet removes a set of blocks from the set. +func (s *SharedRequestedBlocks) RemoveSet(blockHashes map[externalapi.DomainHash]struct{}) { + s.Lock() + defer s.Unlock() + for hash := range blockHashes { + delete(s.blocks, hash) + } +} + +// AddIfNotExists adds a block to the set if it doesn't exist yet. +func (s *SharedRequestedBlocks) AddIfNotExists(hash *externalapi.DomainHash) (exists bool) { + s.Lock() + defer s.Unlock() + _, ok := s.blocks[*hash] + if ok { + return true + } + s.blocks[*hash] = struct{}{} + return false +} + +// NewSharedRequestedBlocks returns a new instance of SharedRequestedBlocks. +func NewSharedRequestedBlocks() *SharedRequestedBlocks { + return &SharedRequestedBlocks{ + blocks: make(map[externalapi.DomainHash]struct{}), + } +} diff --git a/app/protocol/flowcontext/shared_requested_transactions.go b/app/protocol/flowcontext/shared_requested_transactions.go new file mode 100644 index 0000000..e9a212d --- /dev/null +++ b/app/protocol/flowcontext/shared_requested_transactions.go @@ -0,0 +1,49 @@ +package flowcontext + +import ( + "sync" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// SharedRequestedTransactions is a data structure that is shared between peers that +// holds the IDs of all the requested transactions to prevent redundant requests. +type SharedRequestedTransactions struct { + transactions map[externalapi.DomainTransactionID]struct{} + sync.Mutex +} + +// Remove removes a transaction from the set. +func (s *SharedRequestedTransactions) Remove(txID *externalapi.DomainTransactionID) { + s.Lock() + defer s.Unlock() + delete(s.transactions, *txID) +} + +// RemoveMany removes a set of transactions from the set. +func (s *SharedRequestedTransactions) RemoveMany(txIDs []*externalapi.DomainTransactionID) { + s.Lock() + defer s.Unlock() + for _, txID := range txIDs { + delete(s.transactions, *txID) + } +} + +// AddIfNotExists adds a transaction to the set if it doesn't exist yet. +func (s *SharedRequestedTransactions) AddIfNotExists(txID *externalapi.DomainTransactionID) (exists bool) { + s.Lock() + defer s.Unlock() + _, ok := s.transactions[*txID] + if ok { + return true + } + s.transactions[*txID] = struct{}{} + return false +} + +// NewSharedRequestedTransactions returns a new instance of SharedRequestedTransactions. +func NewSharedRequestedTransactions() *SharedRequestedTransactions { + return &SharedRequestedTransactions{ + transactions: make(map[externalapi.DomainTransactionID]struct{}), + } +} diff --git a/app/protocol/flowcontext/transactions.go b/app/protocol/flowcontext/transactions.go new file mode 100644 index 0000000..861c955 --- /dev/null +++ b/app/protocol/flowcontext/transactions.go @@ -0,0 +1,81 @@ +package flowcontext + +import ( + "time" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// TransactionIDPropagationInterval is the interval between transaction IDs propagations +const TransactionIDPropagationInterval = 500 * time.Millisecond + +// AddTransaction adds transaction to the mempool and propagates it. +func (f *FlowContext) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error { + acceptedTransactions, err := f.Domain().MiningManager().ValidateAndInsertTransaction(tx, true, allowOrphan) + if err != nil { + return err + } + + acceptedTransactionIDs := consensushashing.TransactionIDs(acceptedTransactions) + return f.EnqueueTransactionIDsForPropagation(acceptedTransactionIDs) +} + +func (f *FlowContext) shouldRebroadcastTransactions() bool { + const rebroadcastInterval = 30 * time.Second + return time.Since(f.lastRebroadcastTime) > rebroadcastInterval +} + +// SharedRequestedTransactions returns a *transactionrelay.SharedRequestedTransactions for sharing +// data about requested transactions between different peers. +func (f *FlowContext) SharedRequestedTransactions() *SharedRequestedTransactions { + return f.sharedRequestedTransactions +} + +// OnTransactionAddedToMempool notifies the handler function that a transaction +// has been added to the mempool +func (f *FlowContext) OnTransactionAddedToMempool() { + if f.onTransactionAddedToMempoolHandler != nil { + f.onTransactionAddedToMempoolHandler() + } +} + +// EnqueueTransactionIDsForPropagation add the given transactions IDs to a set of IDs to +// propagate. The IDs will be broadcast to all peers within a single transaction Inv message. +// The broadcast itself may happen only during a subsequent call to this method +func (f *FlowContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error { + f.transactionIDPropagationLock.Lock() + defer f.transactionIDPropagationLock.Unlock() + + f.transactionIDsToPropagate = append(f.transactionIDsToPropagate, transactionIDs...) + + return f.maybePropagateTransactions() +} + +func (f *FlowContext) maybePropagateTransactions() error { + if time.Since(f.lastTransactionIDPropagationTime) < TransactionIDPropagationInterval && + len(f.transactionIDsToPropagate) < appmessage.MaxInvPerTxInvMsg { + return nil + } + + for len(f.transactionIDsToPropagate) > 0 { + transactionIDsToBroadcast := f.transactionIDsToPropagate + if len(transactionIDsToBroadcast) > appmessage.MaxInvPerTxInvMsg { + transactionIDsToBroadcast = f.transactionIDsToPropagate[:len(transactionIDsToBroadcast)] + } + log.Debugf("Transaction propagation: broadcasting %d transactions", len(transactionIDsToBroadcast)) + + inv := appmessage.NewMsgInvTransaction(transactionIDsToBroadcast) + err := f.Broadcast(inv) + if err != nil { + return err + } + + f.transactionIDsToPropagate = f.transactionIDsToPropagate[len(transactionIDsToBroadcast):] + } + + f.lastTransactionIDPropagationTime = time.Now() + + return nil +} diff --git a/app/protocol/flows/handshake/handshake.go b/app/protocol/flows/handshake/handshake.go new file mode 100644 index 0000000..8dc27de --- /dev/null +++ b/app/protocol/flows/handshake/handshake.go @@ -0,0 +1,118 @@ +package handshake + +import ( + "sync/atomic" + + "github.com/spectre-project/spectred/domain" + + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleHandshakeContext is the interface for the context needed for the HandleHandshake flow. +type HandleHandshakeContext interface { + Config() *config.Config + NetAdapter() *netadapter.NetAdapter + Domain() domain.Domain + AddressManager() *addressmanager.AddressManager + AddToPeers(peer *peerpkg.Peer) error + HandleError(err error, flowName string, isStopping *uint32, errChan chan<- error) +} + +// HandleHandshake sets up the new_handshake protocol - It sends a version message and waits for an incoming +// version message, as well as a verack for the sent version +func HandleHandshake(context HandleHandshakeContext, netConnection *netadapter.NetConnection, + receiveVersionRoute *routerpkg.Route, sendVersionRoute *routerpkg.Route, outgoingRoute *routerpkg.Route, +) (*peerpkg.Peer, error) { + + // For HandleHandshake to finish, we need to get from the other node + // a version and verack messages, so we set doneCount to 2, decrease it + // when sending and receiving the version, and close the doneChan when + // it's 0. Then we wait for on select for a tick from doneChan or from + // errChan. + doneCount := int32(2) + doneChan := make(chan struct{}) + + isStopping := uint32(0) + errChan := make(chan error) + + peer := peerpkg.New(netConnection) + + var peerAddress *appmessage.NetAddress + spawn("HandleHandshake-ReceiveVersion", func() { + address, err := ReceiveVersion(context, receiveVersionRoute, outgoingRoute, peer) + if err != nil { + handleError(err, "ReceiveVersion", &isStopping, errChan) + return + } + peerAddress = address + if atomic.AddInt32(&doneCount, -1) == 0 { + close(doneChan) + } + }) + + spawn("HandleHandshake-SendVersion", func() { + err := SendVersion(context, sendVersionRoute, outgoingRoute, peer) + if err != nil { + handleError(err, "SendVersion", &isStopping, errChan) + return + } + if atomic.AddInt32(&doneCount, -1) == 0 { + close(doneChan) + } + }) + + select { + case err := <-errChan: + if err != nil { + return nil, err + } + return nil, nil + case <-doneChan: + } + + err := context.AddToPeers(peer) + if err != nil { + if errors.Is(err, common.ErrPeerWithSameIDExists) { + return nil, protocolerrors.Wrap(false, err, "peer already exists") + } + return nil, err + } + + if peerAddress != nil { + err := context.AddressManager().AddAddresses(peerAddress) + if err != nil { + return nil, err + } + } + return peer, nil +} + +// Handshake is different from other flows, since in it should forward router.ErrRouteClosed to errChan +// Therefore we implement a separate handleError for new_handshake +func handleError(err error, flowName string, isStopping *uint32, errChan chan error) { + if errors.Is(err, routerpkg.ErrRouteClosed) { + if atomic.AddUint32(isStopping, 1) == 1 { + errChan <- err + } + return + } + + if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) { + log.Errorf("Handshake protocol error from %s: %s", flowName, err) + if atomic.AddUint32(isStopping, 1) == 1 { + errChan <- err + } + return + } + panic(err) +} diff --git a/app/protocol/flows/handshake/log.go b/app/protocol/flows/handshake/log.go new file mode 100644 index 0000000..ac9cec5 --- /dev/null +++ b/app/protocol/flows/handshake/log.go @@ -0,0 +1,9 @@ +package handshake + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("PROT") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/protocol/flows/handshake/receiveversion.go b/app/protocol/flows/handshake/receiveversion.go new file mode 100644 index 0000000..05c41b5 --- /dev/null +++ b/app/protocol/flows/handshake/receiveversion.go @@ -0,0 +1,117 @@ +package handshake + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +var ( + // allowSelfConnections is only used to allow the tests to bypass the self + // connection detecting and disconnect logic since they intentionally + // do so for testing purposes. + allowSelfConnections bool + + // minAcceptableProtocolVersion is the lowest protocol version that a + // connected peer may support. + minAcceptableProtocolVersion = uint32(5) + + maxAcceptableProtocolVersion = uint32(5) +) + +type receiveVersionFlow struct { + HandleHandshakeContext + incomingRoute, outgoingRoute *router.Route + peer *peerpkg.Peer +} + +// ReceiveVersion waits for the peer to send a version message, sends a +// verack in response, and updates its info accordingly. +func ReceiveVersion(context HandleHandshakeContext, incomingRoute *router.Route, outgoingRoute *router.Route, + peer *peerpkg.Peer) (*appmessage.NetAddress, error) { + + flow := &receiveVersionFlow{ + HandleHandshakeContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + + return flow.start() +} + +func (flow *receiveVersionFlow) start() (*appmessage.NetAddress, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "receiveVersionFlow.start") + defer onEnd() + + log.Debugf("Starting receiveVersionFlow with %s", flow.peer.Address()) + + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, err + } + + log.Debugf("Got version message") + + msgVersion, ok := message.(*appmessage.MsgVersion) + if !ok { + return nil, protocolerrors.New(true, "a version message must precede all others") + } + + if !allowSelfConnections && flow.NetAdapter().ID().IsEqual(msgVersion.ID) { + return nil, protocolerrors.New(false, "connected to self") + } + + // Disconnect and ban peers from a different network + if msgVersion.Network != flow.Config().ActiveNetParams.Name { + return nil, protocolerrors.Errorf(true, "wrong network") + } + + // Notify and disconnect clients that have a protocol version that is + // too old. + // + // NOTE: If minAcceptableProtocolVersion is raised to be higher than + // appmessage.RejectVersion, this should send a reject packet before + // disconnecting. + if msgVersion.ProtocolVersion < minAcceptableProtocolVersion { + return nil, protocolerrors.Errorf(false, "protocol version must be %d or greater", + minAcceptableProtocolVersion) + } + + // Disconnect from partial nodes in networks that don't allow them + if !flow.Config().ActiveNetParams.EnableNonNativeSubnetworks && msgVersion.SubnetworkID != nil { + return nil, protocolerrors.New(true, "partial nodes are not allowed") + } + + // Disconnect if: + // - we are a full node and the outbound connection we've initiated is a partial node + // - the remote node is partial and our subnetwork doesn't match their subnetwork + localSubnetworkID := flow.Config().SubnetworkID + isLocalNodeFull := localSubnetworkID == nil + isRemoteNodeFull := msgVersion.SubnetworkID == nil + isOutbound := flow.peer.Connection().IsOutbound() + if (isLocalNodeFull && !isRemoteNodeFull && isOutbound) || + (!isLocalNodeFull && !isRemoteNodeFull && !msgVersion.SubnetworkID.Equal(localSubnetworkID)) { + + return nil, protocolerrors.New(false, "incompatible subnetworks") + } + + if flow.Config().ProtocolVersion > maxAcceptableProtocolVersion { + return nil, errors.Errorf("%d is a non existing protocol version", flow.Config().ProtocolVersion) + } + + maxProtocolVersion := flow.Config().ProtocolVersion + flow.peer.UpdateFieldsFromMsgVersion(msgVersion, maxProtocolVersion) + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgVerAck()) + if err != nil { + return nil, err + } + + flow.peer.Connection().SetID(msgVersion.ID) + + return msgVersion.Address, nil +} diff --git a/app/protocol/flows/handshake/sendversion.go b/app/protocol/flows/handshake/sendversion.go new file mode 100644 index 0000000..50e77f5 --- /dev/null +++ b/app/protocol/flows/handshake/sendversion.go @@ -0,0 +1,89 @@ +package handshake + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/version" +) + +var ( + // userAgentName is the user agent name and is used to help identify + // ourselves to other spectre peers. + userAgentName = "spectred" + + // userAgentVersion is the user agent version and is used to help + // identify ourselves to other spectre peers. + userAgentVersion = version.Version() + + // defaultServices describes the default services that are supported by + // the server. + defaultServices = appmessage.DefaultServices + + // defaultRequiredServices describes the default services that are + // required to be supported by outbound peers. + defaultRequiredServices = appmessage.SFNodeNetwork +) + +type sendVersionFlow struct { + HandleHandshakeContext + + incomingRoute, outgoingRoute *router.Route + peer *peerpkg.Peer +} + +// SendVersion sends a version to a peer and waits for verack. +func SendVersion(context HandleHandshakeContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peerpkg.Peer) error { + + flow := &sendVersionFlow{ + HandleHandshakeContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + return flow.start() +} + +func (flow *sendVersionFlow) start() error { + onEnd := logger.LogAndMeasureExecutionTime(log, "sendVersionFlow.start") + defer onEnd() + + log.Debugf("Starting sendVersionFlow with %s", flow.peer.Address()) + + // Version message. + localAddress := flow.AddressManager().BestLocalAddress(flow.peer.Connection().NetAddress()) + subnetworkID := flow.Config().SubnetworkID + if flow.Config().ProtocolVersion < minAcceptableProtocolVersion { + return errors.Errorf("configured protocol version %d is obsolete", flow.Config().ProtocolVersion) + } + msg := appmessage.NewMsgVersion(localAddress, flow.NetAdapter().ID(), + flow.Config().ActiveNetParams.Name, subnetworkID, flow.Config().ProtocolVersion) + msg.AddUserAgent(userAgentName, userAgentVersion, flow.Config().UserAgentComments...) + + // Advertise the services flag + msg.Services = defaultServices + + // Advertise our max supported protocol version. + msg.ProtocolVersion = flow.Config().ProtocolVersion + + // Advertise if inv messages for transactions are desired. + msg.DisableRelayTx = flow.Config().BlocksOnly + + err := flow.outgoingRoute.Enqueue(msg) + if err != nil { + return err + } + + // Wait for verack + log.Debugf("Waiting for verack") + _, err = flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + log.Debugf("Got verack") + return nil +} diff --git a/app/protocol/flows/ready/log.go b/app/protocol/flows/ready/log.go new file mode 100644 index 0000000..da6d41a --- /dev/null +++ b/app/protocol/flows/ready/log.go @@ -0,0 +1,9 @@ +package ready + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("PROT") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/protocol/flows/ready/ready.go b/app/protocol/flows/ready/ready.go new file mode 100644 index 0000000..82b400e --- /dev/null +++ b/app/protocol/flows/ready/ready.go @@ -0,0 +1,57 @@ +package ready + +import ( + "sync/atomic" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + + "github.com/pkg/errors" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleReady notify the other peer that peer is ready for messages, and wait for the other peer +// to send a ready message before start running the flows. +func HandleReady(incomingRoute *routerpkg.Route, outgoingRoute *routerpkg.Route, + peer *peerpkg.Peer, +) error { + + log.Debugf("Sending ready message to %s", peer) + + isStopping := uint32(0) + err := outgoingRoute.Enqueue(appmessage.NewMsgReady()) + if err != nil { + return handleError(err, "HandleReady", &isStopping) + } + + _, err = incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return handleError(err, "HandleReady", &isStopping) + } + + log.Debugf("Got ready message from %s", peer) + + return nil +} + +// Ready is different from other flows, since in it should forward router.ErrRouteClosed to errChan +// Therefore we implement a separate handleError for 'ready' +func handleError(err error, flowName string, isStopping *uint32) error { + if errors.Is(err, routerpkg.ErrRouteClosed) { + if atomic.AddUint32(isStopping, 1) == 1 { + return err + } + return nil + } + + if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) { + log.Errorf("Ready protocol error from %s: %s", flowName, err) + if atomic.AddUint32(isStopping, 1) == 1 { + return err + } + return nil + } + panic(err) +} diff --git a/app/protocol/flows/v5/addressexchange/receiveaddresses.go b/app/protocol/flows/v5/addressexchange/receiveaddresses.go new file mode 100644 index 0000000..c1fbb9b --- /dev/null +++ b/app/protocol/flows/v5/addressexchange/receiveaddresses.go @@ -0,0 +1,39 @@ +package addressexchange + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// ReceiveAddressesContext is the interface for the context needed for the ReceiveAddresses flow. +type ReceiveAddressesContext interface { + AddressManager() *addressmanager.AddressManager +} + +// ReceiveAddresses asks a peer for more addresses if needed. +func ReceiveAddresses(context ReceiveAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route, + peer *peerpkg.Peer) error { + + subnetworkID := peer.SubnetworkID() + msgGetAddresses := appmessage.NewMsgRequestAddresses(false, subnetworkID) + err := outgoingRoute.Enqueue(msgGetAddresses) + if err != nil { + return err + } + + message, err := incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + + msgAddresses := message.(*appmessage.MsgAddresses) + if len(msgAddresses.AddressList) > addressmanager.GetAddressesMax { + return protocolerrors.Errorf(true, "address count exceeded %d", addressmanager.GetAddressesMax) + } + + return context.AddressManager().AddAddresses(msgAddresses.AddressList...) +} diff --git a/app/protocol/flows/v5/addressexchange/sendaddresses.go b/app/protocol/flows/v5/addressexchange/sendaddresses.go new file mode 100644 index 0000000..4112bfc --- /dev/null +++ b/app/protocol/flows/v5/addressexchange/sendaddresses.go @@ -0,0 +1,52 @@ +package addressexchange + +import ( + "math/rand" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// SendAddressesContext is the interface for the context needed for the SendAddresses flow. +type SendAddressesContext interface { + AddressManager() *addressmanager.AddressManager +} + +// SendAddresses sends addresses to a peer that requests it. +func SendAddresses(context SendAddressesContext, incomingRoute *router.Route, outgoingRoute *router.Route) error { + for { + _, err := incomingRoute.Dequeue() + if err != nil { + return err + } + + addresses := context.AddressManager().Addresses() + msgAddresses := appmessage.NewMsgAddresses(shuffleAddresses(addresses)) + + err = outgoingRoute.Enqueue(msgAddresses) + if err != nil { + return err + } + } +} + +// shuffleAddresses randomizes the given addresses sent if there are more than the maximum allowed in one message. +func shuffleAddresses(addresses []*appmessage.NetAddress) []*appmessage.NetAddress { + addressCount := len(addresses) + + if addressCount < appmessage.MaxAddressesPerMsg { + return addresses + } + + shuffleAddresses := make([]*appmessage.NetAddress, addressCount) + copy(shuffleAddresses, addresses) + + rand.Shuffle(addressCount, func(i, j int) { + shuffleAddresses[i], shuffleAddresses[j] = shuffleAddresses[j], shuffleAddresses[i] + }) + + // Truncate it to the maximum size. + shuffleAddresses = shuffleAddresses[:appmessage.MaxAddressesPerMsg] + return shuffleAddresses +} diff --git a/app/protocol/flows/v5/blockrelay/batch_size_test.go b/app/protocol/flows/v5/blockrelay/batch_size_test.go new file mode 100644 index 0000000..9d9460c --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/batch_size_test.go @@ -0,0 +1,17 @@ +package blockrelay + +import ( + "testing" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +func TestIBDBatchSizeLessThanRouteCapacity(t *testing.T) { + // The `ibdBatchSize` constant must be equal at both syncer and syncee. Therefore, we do not want + // to set it to `router.DefaultMaxMessages` to avoid confusion and human errors. + // However, nonetheless we must enforce that it does not exceed `router.DefaultMaxMessages` + if ibdBatchSize >= router.DefaultMaxMessages { + t.Fatalf("IBD batch size (%d) must be smaller than router.DefaultMaxMessages (%d)", + ibdBatchSize, router.DefaultMaxMessages) + } +} diff --git a/app/protocol/flows/v5/blockrelay/block_locator.go b/app/protocol/flows/v5/blockrelay/block_locator.go new file mode 100644 index 0000000..3046561 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/block_locator.go @@ -0,0 +1,33 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (flow *handleRelayInvsFlow) sendGetBlockLocator(highHash *externalapi.DomainHash, limit uint32) error { + msgGetBlockLocator := appmessage.NewMsgRequestBlockLocator(highHash, limit) + return flow.outgoingRoute.Enqueue(msgGetBlockLocator) +} + +func (flow *handleRelayInvsFlow) receiveBlockLocator() (blockLocatorHashes []*externalapi.DomainHash, err error) { + for { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, err + } + + switch message := message.(type) { + case *appmessage.MsgInvRelayBlock: + flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false}) + case *appmessage.MsgBlockLocator: + return message.BlockLocatorHashes, nil + default: + return nil, + protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdBlockLocator, message.Command()) + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_ibd_block_locator.go b/app/protocol/flows/v5/blockrelay/handle_ibd_block_locator.go new file mode 100644 index 0000000..c8f4779 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_ibd_block_locator.go @@ -0,0 +1,85 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleIBDBlockLocatorContext is the interface for the context needed for the HandleIBDBlockLocator flow. +type HandleIBDBlockLocatorContext interface { + Domain() domain.Domain +} + +// HandleIBDBlockLocator listens to appmessage.MsgIBDBlockLocator messages and sends +// the highest known block that's in the selected parent chain of `targetHash` to the +// requesting peer. +func HandleIBDBlockLocator(context HandleIBDBlockLocatorContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peer.Peer) error { + + for { + message, err := incomingRoute.Dequeue() + if err != nil { + return err + } + ibdBlockLocatorMessage := message.(*appmessage.MsgIBDBlockLocator) + + targetHash := ibdBlockLocatorMessage.TargetHash + log.Debugf("Received IBDBlockLocator from %s with targetHash %s", peer, targetHash) + + blockInfo, err := context.Domain().Consensus().GetBlockInfo(targetHash) + if err != nil { + return err + } + if !blockInfo.HasHeader() { + return protocolerrors.Errorf(true, "received IBDBlockLocator "+ + "with an unknown targetHash %s", targetHash) + } + + foundHighestHashInTheSelectedParentChainOfTargetHash := false + for _, blockLocatorHash := range ibdBlockLocatorMessage.BlockLocatorHashes { + blockInfo, err := context.Domain().Consensus().GetBlockInfo(blockLocatorHash) + if err != nil { + return err + } + + // The IBD block locator is checking only existing blocks with bodies. + if !blockInfo.HasBody() { + continue + } + + isBlockLocatorHashInSelectedParentChainOfHighHash, err := + context.Domain().Consensus().IsInSelectedParentChainOf(blockLocatorHash, targetHash) + if err != nil { + return err + } + if !isBlockLocatorHashInSelectedParentChainOfHighHash { + continue + } + + foundHighestHashInTheSelectedParentChainOfTargetHash = true + log.Debugf("Found a known hash %s amongst peer %s's "+ + "blockLocator that's in the selected parent chain of targetHash %s", blockLocatorHash, peer, targetHash) + + ibdBlockLocatorHighestHashMessage := appmessage.NewMsgIBDBlockLocatorHighestHash(blockLocatorHash) + err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashMessage) + if err != nil { + return err + } + break + } + + if !foundHighestHashInTheSelectedParentChainOfTargetHash { + log.Warnf("no hash was found in the blockLocator "+ + "that was in the selected parent chain of targetHash %s", targetHash) + + ibdBlockLocatorHighestHashNotFoundMessage := appmessage.NewMsgIBDBlockLocatorHighestHashNotFound() + err = outgoingRoute.Enqueue(ibdBlockLocatorHighestHashNotFoundMessage) + if err != nil { + return err + } + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_ibd_block_requests.go b/app/protocol/flows/v5/blockrelay/handle_ibd_block_requests.go new file mode 100644 index 0000000..45b27b2 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_ibd_block_requests.go @@ -0,0 +1,50 @@ +package blockrelay + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleIBDBlockRequestsContext is the interface for the context needed for the HandleIBDBlockRequests flow. +type HandleIBDBlockRequestsContext interface { + Domain() domain.Domain +} + +// HandleIBDBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends +// their corresponding blocks to the requesting peer. +func HandleIBDBlockRequests(context HandleIBDBlockRequestsContext, incomingRoute *router.Route, + outgoingRoute *router.Route) error { + + for { + message, err := incomingRoute.Dequeue() + if err != nil { + return err + } + msgRequestIBDBlocks := message.(*appmessage.MsgRequestIBDBlocks) + log.Debugf("Got request for %d ibd blocks", len(msgRequestIBDBlocks.Hashes)) + for i, hash := range msgRequestIBDBlocks.Hashes { + // Fetch the block from the database. + block, found, err := context.Domain().Consensus().GetBlock(hash) + if err != nil { + return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) + } + + if !found { + return protocolerrors.Errorf(false, "IBD block %s not found", hash) + } + + // TODO (Partial nodes): Convert block to partial block if needed + + blockMessage := appmessage.DomainBlockToMsgBlock(block) + ibdBlockMessage := appmessage.NewMsgIBDBlock(blockMessage) + err = outgoingRoute.Enqueue(ibdBlockMessage) + if err != nil { + return err + } + log.Debugf("sent %d out of %d", i+1, len(msgRequestIBDBlocks.Hashes)) + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_ibd_request_chain_block_locator.go b/app/protocol/flows/v5/blockrelay/handle_ibd_request_chain_block_locator.go new file mode 100644 index 0000000..773d015 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_ibd_request_chain_block_locator.go @@ -0,0 +1,85 @@ +package blockrelay + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RequestIBDChainBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow. +type RequestIBDChainBlockLocatorContext interface { + Domain() domain.Domain +} + +type handleRequestIBDChainBlockLocatorFlow struct { + RequestIBDChainBlockLocatorContext + incomingRoute, outgoingRoute *router.Route +} + +// HandleRequestIBDChainBlockLocator handles getBlockLocator messages +func HandleRequestIBDChainBlockLocator(context RequestIBDChainBlockLocatorContext, incomingRoute *router.Route, + outgoingRoute *router.Route) error { + + flow := &handleRequestIBDChainBlockLocatorFlow{ + RequestIBDChainBlockLocatorContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + return flow.start() +} + +func (flow *handleRequestIBDChainBlockLocatorFlow) start() error { + for { + highHash, lowHash, err := flow.receiveRequestIBDChainBlockLocator() + if err != nil { + return err + } + log.Debugf("Received getIBDChainBlockLocator with highHash: %s, lowHash: %s", highHash, lowHash) + + var locator externalapi.BlockLocator + if highHash == nil || lowHash == nil { + locator, err = flow.Domain().Consensus().CreateFullHeadersSelectedChainBlockLocator() + } else { + locator, err = flow.Domain().Consensus().CreateHeadersSelectedChainBlockLocator(lowHash, highHash) + if errors.Is(model.ErrBlockNotInSelectedParentChain, err) { + // The chain has been modified, signal it by sending an empty locator + locator, err = externalapi.BlockLocator{}, nil + } + } + + if err != nil { + log.Debugf("Received error from CreateHeadersSelectedChainBlockLocator: %s", err) + return protocolerrors.Errorf(true, "couldn't build a block "+ + "locator between %s and %s", lowHash, highHash) + } + + err = flow.sendIBDChainBlockLocator(locator) + if err != nil { + return err + } + } +} + +func (flow *handleRequestIBDChainBlockLocatorFlow) receiveRequestIBDChainBlockLocator() (highHash, lowHash *externalapi.DomainHash, err error) { + + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return nil, nil, err + } + msgGetBlockLocator := message.(*appmessage.MsgRequestIBDChainBlockLocator) + + return msgGetBlockLocator.HighHash, msgGetBlockLocator.LowHash, nil +} + +func (flow *handleRequestIBDChainBlockLocatorFlow) sendIBDChainBlockLocator(locator externalapi.BlockLocator) error { + msgIBDChainBlockLocator := appmessage.NewMsgIBDChainBlockLocator(locator) + err := flow.outgoingRoute.Enqueue(msgIBDChainBlockLocator) + if err != nil { + return err + } + return nil +} diff --git a/app/protocol/flows/v5/blockrelay/handle_pruning_point_and_its_anticone_requests.go b/app/protocol/flows/v5/blockrelay/handle_pruning_point_and_its_anticone_requests.go new file mode 100644 index 0000000..583e0b0 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_pruning_point_and_its_anticone_requests.go @@ -0,0 +1,163 @@ +package blockrelay + +import ( + "sync/atomic" + + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// PruningPointAndItsAnticoneRequestsContext is the interface for the context needed for the HandlePruningPointAndItsAnticoneRequests flow. +type PruningPointAndItsAnticoneRequestsContext interface { + Domain() domain.Domain + Config() *config.Config +} + +var isBusy uint32 + +// HandlePruningPointAndItsAnticoneRequests listens to appmessage.MsgRequestPruningPointAndItsAnticone messages and sends +// the pruning point and its anticone to the requesting peer. +func HandlePruningPointAndItsAnticoneRequests(context PruningPointAndItsAnticoneRequestsContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peerpkg.Peer) error { + + for { + err := func() error { + _, err := incomingRoute.Dequeue() + if err != nil { + return err + } + + if !atomic.CompareAndSwapUint32(&isBusy, 0, 1) { + return protocolerrors.Errorf(false, "node is busy with other pruning point anticone requests") + } + defer atomic.StoreUint32(&isBusy, 0) + + log.Debugf("Got request for pruning point and its anticone from %s", peer) + + pruningPointHeaders, err := context.Domain().Consensus().PruningPointHeaders() + if err != nil { + return err + } + + msgPruningPointHeaders := make([]*appmessage.MsgBlockHeader, len(pruningPointHeaders)) + for i, header := range pruningPointHeaders { + msgPruningPointHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(header) + } + + err = outgoingRoute.Enqueue(appmessage.NewMsgPruningPoints(msgPruningPointHeaders)) + if err != nil { + return err + } + + pointAndItsAnticone, err := context.Domain().Consensus().PruningPointAndItsAnticone() + if err != nil { + return err + } + + windowSize := context.Config().NetParams().DifficultyAdjustmentWindowSize + daaWindowBlocks := make([]*externalapi.TrustedDataDataDAAHeader, 0, windowSize) + daaWindowHashesToIndex := make(map[externalapi.DomainHash]int, windowSize) + trustedDataDAABlockIndexes := make(map[externalapi.DomainHash][]uint64) + + ghostdagData := make([]*externalapi.BlockGHOSTDAGDataHashPair, 0) + ghostdagDataHashToIndex := make(map[externalapi.DomainHash]int) + trustedDataGHOSTDAGDataIndexes := make(map[externalapi.DomainHash][]uint64) + for _, blockHash := range pointAndItsAnticone { + blockDAAWindowHashes, err := context.Domain().Consensus().BlockDAAWindowHashes(blockHash) + if err != nil { + return err + } + + trustedDataDAABlockIndexes[*blockHash] = make([]uint64, 0, windowSize) + for i, daaBlockHash := range blockDAAWindowHashes { + index, exists := daaWindowHashesToIndex[*daaBlockHash] + if !exists { + trustedDataDataDAAHeader, err := context.Domain().Consensus().TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i)) + if err != nil { + return err + } + daaWindowBlocks = append(daaWindowBlocks, trustedDataDataDAAHeader) + index = len(daaWindowBlocks) - 1 + daaWindowHashesToIndex[*daaBlockHash] = index + } + + trustedDataDAABlockIndexes[*blockHash] = append(trustedDataDAABlockIndexes[*blockHash], uint64(index)) + } + + ghostdagDataBlockHashes, err := context.Domain().Consensus().TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash) + if err != nil { + return err + } + + trustedDataGHOSTDAGDataIndexes[*blockHash] = make([]uint64, 0, context.Config().NetParams().K) + for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes { + index, exists := ghostdagDataHashToIndex[*ghostdagDataBlockHash] + if !exists { + data, err := context.Domain().Consensus().TrustedGHOSTDAGData(ghostdagDataBlockHash) + if err != nil { + return err + } + ghostdagData = append(ghostdagData, &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: ghostdagDataBlockHash, + GHOSTDAGData: data, + }) + index = len(ghostdagData) - 1 + ghostdagDataHashToIndex[*ghostdagDataBlockHash] = index + } + + trustedDataGHOSTDAGDataIndexes[*blockHash] = append(trustedDataGHOSTDAGDataIndexes[*blockHash], uint64(index)) + } + } + + err = outgoingRoute.Enqueue(appmessage.DomainTrustedDataToTrustedData(daaWindowBlocks, ghostdagData)) + if err != nil { + return err + } + + for i, blockHash := range pointAndItsAnticone { + block, found, err := context.Domain().Consensus().GetBlock(blockHash) + if err != nil { + return err + } + + if !found { + return protocolerrors.Errorf(false, "pruning point anticone block %s not found", blockHash) + } + + err = outgoingRoute.Enqueue(appmessage.DomainBlockWithTrustedDataToBlockWithTrustedDataV4(block, trustedDataDAABlockIndexes[*blockHash], trustedDataGHOSTDAGDataIndexes[*blockHash])) + if err != nil { + return err + } + + if (i+1)%ibdBatchSize == 0 { + // No timeout here, as we don't care if the syncee takes its time computing, + // since it only blocks this dedicated flow + message, err := incomingRoute.Dequeue() + if err != nil { + return err + } + if _, ok := message.(*appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks); !ok { + return protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks, message.Command()) + } + } + } + + err = outgoingRoute.Enqueue(appmessage.NewMsgDoneBlocksWithTrustedData()) + if err != nil { + return err + } + + log.Debugf("Sent pruning point and its anticone to %s", peer) + return nil + }() + if err != nil { + return err + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_pruning_point_proof_requests.go b/app/protocol/flows/v5/blockrelay/handle_pruning_point_proof_requests.go new file mode 100644 index 0000000..fe41c20 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_pruning_point_proof_requests.go @@ -0,0 +1,40 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// PruningPointProofRequestsContext is the interface for the context needed for the HandlePruningPointProofRequests flow. +type PruningPointProofRequestsContext interface { + Domain() domain.Domain +} + +// HandlePruningPointProofRequests listens to appmessage.MsgRequestPruningPointProof messages and sends +// the pruning point proof to the requesting peer. +func HandlePruningPointProofRequests(context PruningPointProofRequestsContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peerpkg.Peer) error { + + for { + _, err := incomingRoute.Dequeue() + if err != nil { + return err + } + + log.Debugf("Got request for pruning point proof from %s", peer) + + pruningPointProof, err := context.Domain().Consensus().BuildPruningPointProof() + if err != nil { + return err + } + pruningPointProofMessage := appmessage.DomainPruningPointProofToMsgPruningPointProof(pruningPointProof) + err = outgoingRoute.Enqueue(pruningPointProofMessage) + if err != nil { + return err + } + + log.Debugf("Sent pruning point proof to %s", peer) + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_relay_block_requests.go b/app/protocol/flows/v5/blockrelay/handle_relay_block_requests.go new file mode 100644 index 0000000..71615fe --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_relay_block_requests.go @@ -0,0 +1,49 @@ +package blockrelay + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RelayBlockRequestsContext is the interface for the context needed for the HandleRelayBlockRequests flow. +type RelayBlockRequestsContext interface { + Domain() domain.Domain +} + +// HandleRelayBlockRequests listens to appmessage.MsgRequestRelayBlocks messages and sends +// their corresponding blocks to the requesting peer. +func HandleRelayBlockRequests(context RelayBlockRequestsContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peerpkg.Peer) error { + + for { + message, err := incomingRoute.Dequeue() + if err != nil { + return err + } + getRelayBlocksMessage := message.(*appmessage.MsgRequestRelayBlocks) + log.Debugf("Got request for relay blocks with hashes %s", getRelayBlocksMessage.Hashes) + for _, hash := range getRelayBlocksMessage.Hashes { + // Fetch the block from the database. + block, found, err := context.Domain().Consensus().GetBlock(hash) + if err != nil { + return errors.Wrapf(err, "unable to fetch requested block hash %s", hash) + } + + if !found { + return protocolerrors.Errorf(false, "Relay block %s not found", hash) + } + + // TODO (Partial nodes): Convert block to partial block if needed + + err = outgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block)) + if err != nil { + return err + } + log.Debugf("Relayed block with hash %s", hash) + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/handle_relay_invs.go b/app/protocol/flows/v5/blockrelay/handle_relay_invs.go new file mode 100644 index 0000000..0429762 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_relay_invs.go @@ -0,0 +1,466 @@ +package blockrelay + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/flowcontext" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// orphanResolutionRange is the maximum amount of blockLocator hashes +// to search for known blocks. See isBlockInOrphanResolutionRange for +// further details +var orphanResolutionRange uint32 = 5 + +// RelayInvsContext is the interface for the context needed for the HandleRelayInvs flow. +type RelayInvsContext interface { + Domain() domain.Domain + Config() *config.Config + OnNewBlock(block *externalapi.DomainBlock) error + OnNewBlockTemplate() error + OnPruningPointUTXOSetOverride() error + SharedRequestedBlocks() *flowcontext.SharedRequestedBlocks + Broadcast(message appmessage.Message) error + AddOrphan(orphanBlock *externalapi.DomainBlock) + GetOrphanRoots(orphanHash *externalapi.DomainHash) ([]*externalapi.DomainHash, bool, error) + IsOrphan(blockHash *externalapi.DomainHash) bool + IsIBDRunning() bool + IsRecoverableError(err error) bool + IsNearlySynced() (bool, error) +} + +type invRelayBlock struct { + Hash *externalapi.DomainHash + IsOrphanRoot bool +} + +type handleRelayInvsFlow struct { + RelayInvsContext + incomingRoute, outgoingRoute *router.Route + peer *peerpkg.Peer + invsQueue []invRelayBlock +} + +// HandleRelayInvs listens to appmessage.MsgInvRelayBlock messages, requests their corresponding blocks if they +// are missing, adds them to the DAG and propagates them to the rest of the network. +func HandleRelayInvs(context RelayInvsContext, incomingRoute *router.Route, outgoingRoute *router.Route, + peer *peerpkg.Peer) error { + + flow := &handleRelayInvsFlow{ + RelayInvsContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + invsQueue: make([]invRelayBlock, 0), + } + err := flow.start() + // Currently, HandleRelayInvs flow is the only place where IBD is triggered, so the channel can be closed now + close(peer.IBDRequestChannel()) + return err +} + +func (flow *handleRelayInvsFlow) start() error { + for { + log.Debugf("Waiting for inv") + inv, err := flow.readInv() + if err != nil { + return err + } + + log.Debugf("Got relay inv for block %s", inv.Hash) + + blockInfo, err := flow.Domain().Consensus().GetBlockInfo(inv.Hash) + if err != nil { + return err + } + if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly { + if blockInfo.BlockStatus == externalapi.StatusInvalid { + return protocolerrors.Errorf(true, "sent inv of an invalid block %s", + inv.Hash) + } + log.Debugf("Block %s already exists. continuing...", inv.Hash) + continue + } + + isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent() + if err != nil { + return err + } + + if flow.IsOrphan(inv.Hash) { + if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && isGenesisVirtualSelectedParent { + log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+ + "to the recent pruning point before normal operation can resume.", inv.Hash) + continue + } + + log.Debugf("Block %s is a known orphan. Requesting its missing ancestors", inv.Hash) + err := flow.AddOrphanRootsToQueue(inv.Hash) + if err != nil { + return err + } + continue + } + + // Block relay is disabled if the node is already during IBD AND considered out of sync + if flow.IsIBDRunning() { + isNearlySynced, err := flow.IsNearlySynced() + if err != nil { + return err + } + if !isNearlySynced { + log.Debugf("Got block %s while in IBD and the node is out of sync. Continuing...", inv.Hash) + continue + } + } + + log.Debugf("Requesting block %s", inv.Hash) + block, exists, err := flow.requestBlock(inv.Hash) + if err != nil { + return err + } + if exists { + log.Debugf("Aborting requesting block %s because it already exists", inv.Hash) + continue + } + + err = flow.banIfBlockIsHeaderOnly(block) + if err != nil { + return err + } + + if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced && !flow.Config().Devnet && flow.isChildOfGenesis(block) { + log.Infof("Cannot process %s because it's a direct child of genesis.", consensushashing.BlockHash(block)) + continue + } + + // Note we do not apply the heuristic below if inv was queued as an orphan root, since + // that means the process started by a proper and relevant relay block + if !inv.IsOrphanRoot { + // Check bounded merge depth to avoid requesting irrelevant data which cannot be merged under virtual + virtualMergeDepthRoot, err := flow.Domain().Consensus().VirtualMergeDepthRoot() + if err != nil { + return err + } + if !virtualMergeDepthRoot.Equal(model.VirtualGenesisBlockHash) { + mergeDepthRootHeader, err := flow.Domain().Consensus().GetBlockHeader(virtualMergeDepthRoot) + if err != nil { + return err + } + // Since `BlueWork` respects topology, this condition means that the relay + // block is not in the future of virtual's merge depth root, and thus cannot be merged unless + // other valid blocks Kosherize it, in which case it will be obtained once the merger is relayed + if block.Header.BlueWork().Cmp(mergeDepthRootHeader.BlueWork()) <= 0 { + log.Debugf("Block %s has lower blue work than virtual's merge root %s (%d <= %d), hence we are skipping it", + inv.Hash, virtualMergeDepthRoot, block.Header.BlueWork(), mergeDepthRootHeader.BlueWork()) + continue + } + } + } + + log.Debugf("Processing block %s", inv.Hash) + oldVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo() + if err != nil { + return err + } + missingParents, err := flow.processBlock(block) + if err != nil { + if errors.Is(err, ruleerrors.ErrPrunedBlock) { + log.Infof("Ignoring pruned block %s", inv.Hash) + continue + } + + if errors.Is(err, ruleerrors.ErrDuplicateBlock) { + log.Infof("Ignoring duplicate block %s", inv.Hash) + continue + } + return err + } + if len(missingParents) > 0 { + log.Debugf("Block %s is orphan and has missing parents: %s", inv.Hash, missingParents) + err := flow.processOrphan(block) + if err != nil { + return err + } + continue + } + + oldVirtualParents := hashset.New() + for _, parent := range oldVirtualInfo.ParentHashes { + oldVirtualParents.Add(parent) + } + + newVirtualInfo, err := flow.Domain().Consensus().GetVirtualInfo() + if err != nil { + return err + } + + virtualHasNewParents := false + for _, parent := range newVirtualInfo.ParentHashes { + if oldVirtualParents.Contains(parent) { + continue + } + virtualHasNewParents = true + block, found, err := flow.Domain().Consensus().GetBlock(parent) + if err != nil { + return err + } + + if !found { + return protocolerrors.Errorf(false, "Virtual parent %s not found", parent) + } + blockHash := consensushashing.BlockHash(block) + log.Debugf("Relaying block %s", blockHash) + err = flow.relayBlock(block) + if err != nil { + return err + } + } + + if virtualHasNewParents { + log.Debugf("Virtual %d has new parents, raising new block template event", newVirtualInfo.DAAScore) + err = flow.OnNewBlockTemplate() + if err != nil { + return err + } + } + + log.Infof("Accepted block %s via relay", inv.Hash) + err = flow.OnNewBlock(block) + if err != nil { + return err + } + } +} + +func (flow *handleRelayInvsFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error { + if len(block.Transactions) == 0 { + return protocolerrors.Errorf(true, "sent header of %s block where expected block with body", + consensushashing.BlockHash(block)) + } + + return nil +} + +func (flow *handleRelayInvsFlow) readInv() (invRelayBlock, error) { + if len(flow.invsQueue) > 0 { + var inv invRelayBlock + inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:] + return inv, nil + } + + msg, err := flow.incomingRoute.Dequeue() + if err != nil { + return invRelayBlock{}, err + } + + msgInv, ok := msg.(*appmessage.MsgInvRelayBlock) + if !ok { + return invRelayBlock{}, protocolerrors.Errorf(true, "unexpected %s message in the block relay handleRelayInvsFlow while "+ + "expecting an inv message", msg.Command()) + } + return invRelayBlock{Hash: msgInv.Hash, IsOrphanRoot: false}, nil +} + +func (flow *handleRelayInvsFlow) requestBlock(requestHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) { + exists := flow.SharedRequestedBlocks().AddIfNotExists(requestHash) + if exists { + return nil, true, nil + } + + // In case the function returns earlier than expected, we want to make sure flow.SharedRequestedBlocks() is + // clean from any pending blocks. + defer flow.SharedRequestedBlocks().Remove(requestHash) + + getRelayBlocksMsg := appmessage.NewMsgRequestRelayBlocks([]*externalapi.DomainHash{requestHash}) + err := flow.outgoingRoute.Enqueue(getRelayBlocksMsg) + if err != nil { + return nil, false, err + } + + msgBlock, err := flow.readMsgBlock() + if err != nil { + return nil, false, err + } + + block := appmessage.MsgBlockToDomainBlock(msgBlock) + blockHash := consensushashing.BlockHash(block) + if !blockHash.Equal(requestHash) { + return nil, false, protocolerrors.Errorf(true, "got unrequested block %s", blockHash) + } + + return block, false, nil +} + +// readMsgBlock returns the next msgBlock in msgChan, and populates invsQueue with any inv messages that meanwhile arrive. +// +// Note: this function assumes msgChan can contain only appmessage.MsgInvRelayBlock and appmessage.MsgBlock messages. +func (flow *handleRelayInvsFlow) readMsgBlock() (msgBlock *appmessage.MsgBlock, err error) { + for { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, err + } + + switch message := message.(type) { + case *appmessage.MsgInvRelayBlock: + flow.invsQueue = append(flow.invsQueue, invRelayBlock{Hash: message.Hash, IsOrphanRoot: false}) + case *appmessage.MsgBlock: + return message, nil + default: + return nil, errors.Errorf("unexpected message %s", message.Command()) + } + } +} + +func (flow *handleRelayInvsFlow) processBlock(block *externalapi.DomainBlock) ([]*externalapi.DomainHash, error) { + blockHash := consensushashing.BlockHash(block) + err := flow.Domain().Consensus().ValidateAndInsertBlock(block, true) + if err != nil { + if !errors.As(err, &ruleerrors.RuleError{}) { + return nil, errors.Wrapf(err, "failed to process block %s", blockHash) + } + + missingParentsError := &ruleerrors.ErrMissingParents{} + if errors.As(err, missingParentsError) { + return missingParentsError.MissingParentHashes, nil + } + // A duplicate block should not appear to the user as a warning and is already reported in the calling function + if !errors.Is(err, ruleerrors.ErrDuplicateBlock) { + log.Warnf("Rejected block %s from %s: %s", blockHash, flow.peer, err) + } + return nil, protocolerrors.Wrapf(true, err, "got invalid block %s from relay", blockHash) + } + return nil, nil +} + +func (flow *handleRelayInvsFlow) relayBlock(block *externalapi.DomainBlock) error { + blockHash := consensushashing.BlockHash(block) + return flow.Broadcast(appmessage.NewMsgInvBlock(blockHash)) +} + +func (flow *handleRelayInvsFlow) processOrphan(block *externalapi.DomainBlock) error { + blockHash := consensushashing.BlockHash(block) + + // Return if the block has been orphaned from elsewhere already + if flow.IsOrphan(blockHash) { + log.Debugf("Skipping orphan processing for block %s because it is already an orphan", blockHash) + return nil + } + + // Add the block to the orphan set if it's within orphan resolution range + isBlockInOrphanResolutionRange, err := flow.isBlockInOrphanResolutionRange(blockHash) + if err != nil { + return err + } + if isBlockInOrphanResolutionRange { + if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced { + isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent() + if err != nil { + return err + } + + if isGenesisVirtualSelectedParent { + log.Infof("Cannot process orphan %s for a node with only the genesis block. The node needs to IBD "+ + "to the recent pruning point before normal operation can resume.", blockHash) + return nil + } + } + + log.Debugf("Block %s is within orphan resolution range. "+ + "Adding it to the orphan set", blockHash) + flow.AddOrphan(block) + log.Debugf("Requesting block %s missing ancestors", blockHash) + return flow.AddOrphanRootsToQueue(blockHash) + } + + // Start IBD unless we already are in IBD + log.Debugf("Block %s is out of orphan resolution range. "+ + "Attempting to start IBD against it.", blockHash) + + // Send the block to IBD flow via the IBDRequestChannel. + // Note that this is a non-blocking send, since if IBD is already running, there is no need to trigger it + select { + case flow.peer.IBDRequestChannel() <- block: + default: + } + return nil +} + +func (flow *handleRelayInvsFlow) isGenesisVirtualSelectedParent() (bool, error) { + virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent() + if err != nil { + return false, err + } + + return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil +} + +func (flow *handleRelayInvsFlow) isChildOfGenesis(block *externalapi.DomainBlock) bool { + parents := block.Header.DirectParents() + return len(parents) == 1 && parents[0].Equal(flow.Config().NetParams().GenesisHash) +} + +// isBlockInOrphanResolutionRange finds out whether the given blockHash should be +// retrieved via the unorphaning mechanism or via IBD. This method sends a +// getBlockLocator request to the peer with a limit of orphanResolutionRange. +// In the response, if we know none of the hashes, we should retrieve the given +// blockHash via IBD. Otherwise, via unorphaning. +func (flow *handleRelayInvsFlow) isBlockInOrphanResolutionRange(blockHash *externalapi.DomainHash) (bool, error) { + err := flow.sendGetBlockLocator(blockHash, orphanResolutionRange) + if err != nil { + return false, err + } + + blockLocatorHashes, err := flow.receiveBlockLocator() + if err != nil { + return false, err + } + for _, blockLocatorHash := range blockLocatorHashes { + blockInfo, err := flow.Domain().Consensus().GetBlockInfo(blockLocatorHash) + if err != nil { + return false, err + } + if blockInfo.Exists && blockInfo.BlockStatus != externalapi.StatusHeaderOnly { + return true, nil + } + } + return false, nil +} + +func (flow *handleRelayInvsFlow) AddOrphanRootsToQueue(orphan *externalapi.DomainHash) error { + orphanRoots, orphanExists, err := flow.GetOrphanRoots(orphan) + if err != nil { + return err + } + + if !orphanExists { + log.Infof("Orphan block %s was missing from the orphan pool while requesting for its roots. This "+ + "probably happened because it was randomly evicted immediately after it was added.", orphan) + } + + if len(orphanRoots) == 0 { + // In some rare cases we get here when there are no orphan roots already + return nil + } + log.Infof("Block %s has %d missing ancestors. Adding them to the invs queue...", orphan, len(orphanRoots)) + + invMessages := make([]invRelayBlock, len(orphanRoots)) + for i, root := range orphanRoots { + log.Debugf("Adding block %s missing ancestor %s to the invs queue", orphan, root) + invMessages[i] = invRelayBlock{Hash: root, IsOrphanRoot: true} + } + + flow.invsQueue = append(invMessages, flow.invsQueue...) + return nil +} diff --git a/app/protocol/flows/v5/blockrelay/handle_request_anticone.go b/app/protocol/flows/v5/blockrelay/handle_request_anticone.go new file mode 100644 index 0000000..1c29cb2 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_request_anticone.go @@ -0,0 +1,96 @@ +package blockrelay + +import ( + "sort" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RequestAnticoneContext is the interface for the context needed for the HandleRequestHeaders flow. +type RequestAnticoneContext interface { + Domain() domain.Domain + Config() *config.Config +} + +type handleRequestAnticoneFlow struct { + RequestAnticoneContext + incomingRoute, outgoingRoute *router.Route + peer *peer.Peer +} + +// HandleRequestAnticone handles RequestAnticone messages +func HandleRequestAnticone(context RequestAnticoneContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peer.Peer) error { + + flow := &handleRequestAnticoneFlow{ + RequestAnticoneContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + return flow.start() +} + +func (flow *handleRequestAnticoneFlow) start() error { + for { + blockHash, contextHash, err := receiveRequestAnticone(flow.incomingRoute) + if err != nil { + return err + } + log.Debugf("Received requestAnticone with blockHash: %s, contextHash: %s", blockHash, contextHash) + log.Debugf("Getting past(%s) cap anticone(%s) for peer %s", contextHash, blockHash, flow.peer) + + // GetAnticone is expected to be called by the syncee for getting the anticone of the header selected tip + // intersected by past of relayed block, and is thus expected to be bounded by mergeset limit since + // we relay blocks only if they enter virtual's mergeset. We add a 2 factor for possible sync gaps. + blockHashes, err := flow.Domain().Consensus().GetAnticone(blockHash, contextHash, + flow.Config().ActiveNetParams.MergeSetSizeLimit*2) + if err != nil { + return protocolerrors.Wrap(true, err, "Failed querying anticone") + } + log.Debugf("Got %d header hashes in past(%s) cap anticone(%s)", len(blockHashes), contextHash, blockHash) + + blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes)) + for i, blockHash := range blockHashes { + blockHeader, err := flow.Domain().Consensus().GetBlockHeader(blockHash) + if err != nil { + return err + } + blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader) + } + + // We sort the headers in bottom-up topological order before sending + sort.Slice(blockHeaders, func(i, j int) bool { + return blockHeaders[i].BlueWork.Cmp(blockHeaders[j].BlueWork) < 0 + }) + + blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders) + err = flow.outgoingRoute.Enqueue(blockHeadersMessage) + if err != nil { + return err + } + + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders()) + if err != nil { + return err + } + } +} + +func receiveRequestAnticone(incomingRoute *router.Route) (blockHash *externalapi.DomainHash, + contextHash *externalapi.DomainHash, err error) { + + message, err := incomingRoute.Dequeue() + if err != nil { + return nil, nil, err + } + msgRequestAnticone := message.(*appmessage.MsgRequestAnticone) + + return msgRequestAnticone.BlockHash, msgRequestAnticone.ContextHash, nil +} diff --git a/app/protocol/flows/v5/blockrelay/handle_request_block_locator.go b/app/protocol/flows/v5/blockrelay/handle_request_block_locator.go new file mode 100644 index 0000000..74861fa --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_request_block_locator.go @@ -0,0 +1,75 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RequestBlockLocatorContext is the interface for the context needed for the HandleRequestBlockLocator flow. +type RequestBlockLocatorContext interface { + Domain() domain.Domain +} + +type handleRequestBlockLocatorFlow struct { + RequestBlockLocatorContext + incomingRoute, outgoingRoute *router.Route +} + +// HandleRequestBlockLocator handles getBlockLocator messages +func HandleRequestBlockLocator(context RequestBlockLocatorContext, incomingRoute *router.Route, + outgoingRoute *router.Route) error { + + flow := &handleRequestBlockLocatorFlow{ + RequestBlockLocatorContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + return flow.start() +} + +func (flow *handleRequestBlockLocatorFlow) start() error { + for { + highHash, limit, err := flow.receiveGetBlockLocator() + if err != nil { + return err + } + log.Debugf("Received getBlockLocator with highHash: %s, limit: %d", highHash, limit) + + locator, err := flow.Domain().Consensus().CreateBlockLocatorFromPruningPoint(highHash, limit) + if err != nil || len(locator) == 0 { + if err != nil { + log.Debugf("Received error from CreateBlockLocatorFromPruningPoint: %s", err) + } + return protocolerrors.Errorf(true, "couldn't build a block "+ + "locator between the pruning point and %s", highHash) + } + + err = flow.sendBlockLocator(locator) + if err != nil { + return err + } + } +} + +func (flow *handleRequestBlockLocatorFlow) receiveGetBlockLocator() (highHash *externalapi.DomainHash, limit uint32, err error) { + + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return nil, 0, err + } + msgGetBlockLocator := message.(*appmessage.MsgRequestBlockLocator) + + return msgGetBlockLocator.HighHash, msgGetBlockLocator.Limit, nil +} + +func (flow *handleRequestBlockLocatorFlow) sendBlockLocator(locator externalapi.BlockLocator) error { + msgBlockLocator := appmessage.NewMsgBlockLocator(locator) + err := flow.outgoingRoute.Enqueue(msgBlockLocator) + if err != nil { + return err + } + return nil +} diff --git a/app/protocol/flows/v5/blockrelay/handle_request_headers.go b/app/protocol/flows/v5/blockrelay/handle_request_headers.go new file mode 100644 index 0000000..87c220e --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_request_headers.go @@ -0,0 +1,134 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// This constant must be equal at both syncer and syncee. Therefore, never (!!) change this constant unless a new p2p +// version is introduced. See `TestIBDBatchSizeLessThanRouteCapacity` as well. +const ibdBatchSize = 99 + +// RequestHeadersContext is the interface for the context needed for the HandleRequestHeaders flow. +type RequestHeadersContext interface { + Domain() domain.Domain +} + +type handleRequestHeadersFlow struct { + RequestHeadersContext + incomingRoute, outgoingRoute *router.Route + peer *peer.Peer +} + +// HandleRequestHeaders handles RequestHeaders messages +func HandleRequestHeaders(context RequestHeadersContext, incomingRoute *router.Route, + outgoingRoute *router.Route, peer *peer.Peer) error { + + flow := &handleRequestHeadersFlow{ + RequestHeadersContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + return flow.start() +} + +func (flow *handleRequestHeadersFlow) start() error { + for { + lowHash, highHash, err := receiveRequestHeaders(flow.incomingRoute) + if err != nil { + return err + } + log.Debugf("Received requestHeaders with lowHash: %s, highHash: %s", lowHash, highHash) + + consensus := flow.Domain().Consensus() + + lowHashInfo, err := consensus.GetBlockInfo(lowHash) + if err != nil { + return err + } + if !lowHashInfo.HasHeader() { + return protocolerrors.Errorf(true, "Block %s does not exist", lowHash) + } + + highHashInfo, err := consensus.GetBlockInfo(highHash) + if err != nil { + return err + } + if !highHashInfo.HasHeader() { + return protocolerrors.Errorf(true, "Block %s does not exist", highHash) + } + + isLowSelectedAncestorOfHigh, err := consensus.IsInSelectedParentChainOf(lowHash, highHash) + if err != nil { + return err + } + if !isLowSelectedAncestorOfHigh { + return protocolerrors.Errorf(true, "Expected %s to be on the selected chain of %s", + lowHash, highHash) + } + + for !lowHash.Equal(highHash) { + log.Debugf("Getting block headers between %s and %s to %s", lowHash, highHash, flow.peer) + + // GetHashesBetween is a relatively heavy operation so we limit it + // in order to avoid locking the consensus for too long + // maxBlocks MUST be >= MergeSetSizeLimit + 1 + const maxBlocks = 1 << 10 + blockHashes, _, err := consensus.GetHashesBetween(lowHash, highHash, maxBlocks) + if err != nil { + return err + } + log.Debugf("Got %d header hashes above lowHash %s", len(blockHashes), lowHash) + + blockHeaders := make([]*appmessage.MsgBlockHeader, len(blockHashes)) + for i, blockHash := range blockHashes { + blockHeader, err := consensus.GetBlockHeader(blockHash) + if err != nil { + return err + } + blockHeaders[i] = appmessage.DomainBlockHeaderToBlockHeader(blockHeader) + } + + blockHeadersMessage := appmessage.NewBlockHeadersMessage(blockHeaders) + err = flow.outgoingRoute.Enqueue(blockHeadersMessage) + if err != nil { + return err + } + + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return err + } + if _, ok := message.(*appmessage.MsgRequestNextHeaders); !ok { + return protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdRequestNextHeaders, message.Command()) + } + + // The next lowHash is the last element in blockHashes + lowHash = blockHashes[len(blockHashes)-1] + } + + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgDoneHeaders()) + if err != nil { + return err + } + } +} + +func receiveRequestHeaders(incomingRoute *router.Route) (lowHash *externalapi.DomainHash, + highHash *externalapi.DomainHash, err error) { + + message, err := incomingRoute.Dequeue() + if err != nil { + return nil, nil, err + } + msgRequestIBDBlocks := message.(*appmessage.MsgRequestHeaders) + + return msgRequestIBDBlocks.LowHash, msgRequestIBDBlocks.HighHash, nil +} diff --git a/app/protocol/flows/v5/blockrelay/handle_request_pruning_point_utxo_set.go b/app/protocol/flows/v5/blockrelay/handle_request_pruning_point_utxo_set.go new file mode 100644 index 0000000..993a980 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/handle_request_pruning_point_utxo_set.go @@ -0,0 +1,141 @@ +package blockrelay + +import ( + "errors" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleRequestPruningPointUTXOSetContext is the interface for the context needed for the HandleRequestPruningPointUTXOSet flow. +type HandleRequestPruningPointUTXOSetContext interface { + Domain() domain.Domain +} + +type handleRequestPruningPointUTXOSetFlow struct { + HandleRequestPruningPointUTXOSetContext + incomingRoute, outgoingRoute *router.Route +} + +// HandleRequestPruningPointUTXOSet listens to appmessage.MsgRequestPruningPointUTXOSet messages and sends +// the pruning point UTXO set and block body. +func HandleRequestPruningPointUTXOSet(context HandleRequestPruningPointUTXOSetContext, incomingRoute, + outgoingRoute *router.Route) error { + + flow := &handleRequestPruningPointUTXOSetFlow{ + HandleRequestPruningPointUTXOSetContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + + return flow.start() +} + +func (flow *handleRequestPruningPointUTXOSetFlow) start() error { + for { + msgRequestPruningPointUTXOSet, err := flow.waitForRequestPruningPointUTXOSetMessages() + if err != nil { + return err + } + + err = flow.handleRequestPruningPointUTXOSetMessage(msgRequestPruningPointUTXOSet) + if err != nil { + return err + } + } +} + +func (flow *handleRequestPruningPointUTXOSetFlow) handleRequestPruningPointUTXOSetMessage( + msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error { + + onEnd := logger.LogAndMeasureExecutionTime(log, "handleRequestPruningPointUTXOSetFlow") + defer onEnd() + + log.Debugf("Got request for pruning point UTXO set") + + return flow.sendPruningPointUTXOSet(msgRequestPruningPointUTXOSet) +} + +func (flow *handleRequestPruningPointUTXOSetFlow) waitForRequestPruningPointUTXOSetMessages() ( + *appmessage.MsgRequestPruningPointUTXOSet, error) { + + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return nil, err + } + msgRequestPruningPointUTXOSet, ok := message.(*appmessage.MsgRequestPruningPointUTXOSet) + if !ok { + // TODO: Change to shouldBan: true once we fix the bug of getting redundant messages + return nil, protocolerrors.Errorf(false, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdRequestPruningPointUTXOSet, message.Command()) + } + return msgRequestPruningPointUTXOSet, nil +} + +func (flow *handleRequestPruningPointUTXOSetFlow) sendPruningPointUTXOSet( + msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error { + + // Send the UTXO set in `step`-sized chunks + const step = 1000 + var fromOutpoint *externalapi.DomainOutpoint + chunksSent := 0 + for { + pruningPointUTXOs, err := flow.Domain().Consensus().GetPruningPointUTXOs( + msgRequestPruningPointUTXOSet.PruningPointHash, fromOutpoint, step) + if err != nil { + if errors.Is(err, ruleerrors.ErrWrongPruningPointHash) { + return flow.outgoingRoute.Enqueue(appmessage.NewMsgUnexpectedPruningPoint()) + } + } + + log.Debugf("Retrieved %d UTXOs for pruning block %s", + len(pruningPointUTXOs), msgRequestPruningPointUTXOSet.PruningPointHash) + + outpointAndUTXOEntryPairs := + appmessage.DomainOutpointAndUTXOEntryPairsToOutpointAndUTXOEntryPairs(pruningPointUTXOs) + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgPruningPointUTXOSetChunk(outpointAndUTXOEntryPairs)) + if err != nil { + return err + } + + finished := len(pruningPointUTXOs) < step + if finished && chunksSent%ibdBatchSize != 0 { + log.Debugf("Finished sending UTXOs for pruning block %s", + msgRequestPruningPointUTXOSet.PruningPointHash) + + return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks()) + } + + if len(pruningPointUTXOs) > 0 { + fromOutpoint = pruningPointUTXOs[len(pruningPointUTXOs)-1].Outpoint + } + chunksSent++ + + // Wait for the peer to request more chunks every `ibdBatchSize` chunks + if chunksSent%ibdBatchSize == 0 { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + _, ok := message.(*appmessage.MsgRequestNextPruningPointUTXOSetChunk) + if !ok { + // TODO: Change to shouldBan: true once we fix the bug of getting redundant messages + return protocolerrors.Errorf(false, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdRequestNextPruningPointUTXOSetChunk, message.Command()) + } + + if finished { + log.Debugf("Finished sending UTXOs for pruning block %s", + msgRequestPruningPointUTXOSet.PruningPointHash) + + return flow.outgoingRoute.Enqueue(appmessage.NewMsgDonePruningPointUTXOSetChunks()) + } + } + } +} diff --git a/app/protocol/flows/v5/blockrelay/ibd.go b/app/protocol/flows/v5/blockrelay/ibd.go new file mode 100644 index 0000000..c1655bf --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/ibd.go @@ -0,0 +1,752 @@ +package blockrelay + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// IBDContext is the interface for the context needed for the HandleIBD flow. +type IBDContext interface { + Domain() domain.Domain + Config() *config.Config + OnNewBlock(block *externalapi.DomainBlock) error + OnNewBlockTemplate() error + OnPruningPointUTXOSetOverride() error + IsIBDRunning() bool + TrySetIBDRunning(ibdPeer *peerpkg.Peer) bool + UnsetIBDRunning() + IsRecoverableError(err error) bool +} + +type handleIBDFlow struct { + IBDContext + incomingRoute, outgoingRoute *router.Route + peer *peerpkg.Peer +} + +// HandleIBD handles IBD +func HandleIBD(context IBDContext, incomingRoute *router.Route, outgoingRoute *router.Route, + peer *peerpkg.Peer) error { + + flow := &handleIBDFlow{ + IBDContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + return flow.start() +} + +func (flow *handleIBDFlow) start() error { + for { + // Wait for IBD requests triggered by other flows + block, ok := <-flow.peer.IBDRequestChannel() + if !ok { + return nil + } + err := flow.runIBDIfNotRunning(block) + if err != nil { + return err + } + } +} + +func (flow *handleIBDFlow) runIBDIfNotRunning(block *externalapi.DomainBlock) error { + wasIBDNotRunning := flow.TrySetIBDRunning(flow.peer) + if !wasIBDNotRunning { + log.Debugf("IBD is already running") + return nil + } + + isFinishedSuccessfully := false + var err error + defer func() { + flow.UnsetIBDRunning() + flow.logIBDFinished(isFinishedSuccessfully, err) + }() + + relayBlockHash := consensushashing.BlockHash(block) + + log.Infof("IBD started with peer %s and relayBlockHash %s", flow.peer, relayBlockHash) + log.Infof("Syncing blocks up to %s", relayBlockHash) + log.Infof("Trying to find highest known syncer chain block from peer %s with relay hash %s", flow.peer, relayBlockHash) + + syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, err := flow.negotiateMissingSyncerChainSegment() + if err != nil { + return err + } + + shouldDownloadHeadersProof, shouldSync, err := flow.shouldSyncAndShouldDownloadHeadersProof( + block, highestKnownSyncerChainHash) + if err != nil { + return err + } + + if !shouldSync { + return nil + } + + if shouldDownloadHeadersProof { + log.Infof("Starting IBD with headers proof") + err = flow.ibdWithHeadersProof(syncerHeaderSelectedTipHash, relayBlockHash, block.Header.DAAScore()) + if err != nil { + return err + } + } else { + if flow.Config().NetParams().DisallowDirectBlocksOnTopOfGenesis && !flow.Config().AllowSubmitBlockWhenNotSynced { + isGenesisVirtualSelectedParent, err := flow.isGenesisVirtualSelectedParent() + if err != nil { + return err + } + + if isGenesisVirtualSelectedParent { + log.Infof("Cannot IBD to %s because it won't change the pruning point. The node needs to IBD "+ + "to the recent pruning point before normal operation can resume.", relayBlockHash) + return nil + } + } + + err = flow.syncPruningPointFutureHeaders( + flow.Domain().Consensus(), + syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash, block.Header.DAAScore()) + if err != nil { + return err + } + } + + // We start by syncing missing bodies over the syncer selected chain + err = flow.syncMissingBlockBodies(syncerHeaderSelectedTipHash) + if err != nil { + return err + } + relayBlockInfo, err := flow.Domain().Consensus().GetBlockInfo(relayBlockHash) + if err != nil { + return err + } + // Relay block might be in the anticone of syncer selected tip, thus + // check his chain for missing bodies as well. + // Note: this operation can be slightly optimized to avoid the full chain search since relay block + // is in syncer virtual mergeset which has bounded size. + if relayBlockInfo.BlockStatus == externalapi.StatusHeaderOnly { + err = flow.syncMissingBlockBodies(relayBlockHash) + if err != nil { + return err + } + } + + log.Debugf("Finished syncing blocks up to %s", relayBlockHash) + isFinishedSuccessfully = true + return nil +} + +func (flow *handleIBDFlow) negotiateMissingSyncerChainSegment() (*externalapi.DomainHash, *externalapi.DomainHash, error) { + /* + Algorithm: + Request full selected chain block locator from syncer + Find the highest block which we know + Repeat the locator step over the new range until finding max(past(syncee) \cap chain(syncer)) + */ + + // Empty hashes indicate that the full chain is queried + locatorHashes, err := flow.getSyncerChainBlockLocator(nil, nil, common.DefaultTimeout) + if err != nil { + return nil, nil, err + } + if len(locatorHashes) == 0 { + return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+ + "to contain at least one element") + } + log.Debugf("IBD chain negotiation with peer %s started and received %d hashes (%s, %s)", flow.peer, + len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1]) + syncerHeaderSelectedTipHash := locatorHashes[0] + var highestKnownSyncerChainHash *externalapi.DomainHash + chainNegotiationRestartCounter := 0 + chainNegotiationZoomCounts := 0 + initialLocatorLen := len(locatorHashes) + pruningPoint, err := flow.Domain().Consensus().PruningPoint() + if err != nil { + return nil, nil, err + } + + for { + var lowestUnknownSyncerChainHash, currentHighestKnownSyncerChainHash *externalapi.DomainHash + for _, syncerChainHash := range locatorHashes { + info, err := flow.Domain().Consensus().GetBlockInfo(syncerChainHash) + if err != nil { + return nil, nil, err + } + if info.Exists { + if info.BlockStatus == externalapi.StatusInvalid { + return nil, nil, protocolerrors.Errorf(true, "Sent invalid chain block %s", syncerChainHash) + } + + isPruningPointOnSyncerChain, err := flow.Domain().Consensus().IsInSelectedParentChainOf(pruningPoint, syncerChainHash) + if err != nil { + log.Errorf("Error checking isPruningPointOnSyncerChain: %s", err) + } + + // We're only interested in syncer chain blocks that have our pruning + // point in their selected chain. Otherwise, it means one of the following: + // 1) We will not switch the virtual selected chain to the syncers chain since it will violate finality + // (hence we can ignore it unless merged by others). + // 2) syncerChainHash is actually in the past of our pruning point so there's no + // point in syncing from it. + if err == nil && isPruningPointOnSyncerChain { + currentHighestKnownSyncerChainHash = syncerChainHash + break + } + } + lowestUnknownSyncerChainHash = syncerChainHash + } + // No unknown blocks, break. Note this can only happen in the first iteration + if lowestUnknownSyncerChainHash == nil { + highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash + break + } + // No shared block, break + if currentHighestKnownSyncerChainHash == nil { + highestKnownSyncerChainHash = nil + break + } + // No point in zooming further + if len(locatorHashes) == 1 { + highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash + break + } + // Zoom in + locatorHashes, err = flow.getSyncerChainBlockLocator( + lowestUnknownSyncerChainHash, + currentHighestKnownSyncerChainHash, time.Second*10) + if err != nil { + return nil, nil, err + } + if len(locatorHashes) > 0 { + if !locatorHashes[0].Equal(lowestUnknownSyncerChainHash) || + !locatorHashes[len(locatorHashes)-1].Equal(currentHighestKnownSyncerChainHash) { + return nil, nil, protocolerrors.Errorf(true, "Expecting the high and low "+ + "hashes to match the locator bounds") + } + + chainNegotiationZoomCounts++ + log.Debugf("IBD chain negotiation with peer %s zoomed in (%d) and received %d hashes (%s, %s)", flow.peer, + chainNegotiationZoomCounts, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1]) + + if len(locatorHashes) == 2 { + // We found our search target + highestKnownSyncerChainHash = currentHighestKnownSyncerChainHash + break + } + + if chainNegotiationZoomCounts > initialLocatorLen*2 { + // Since the zoom-in always queries two consecutive entries in the previous locator, it is + // expected to decrease in size at least every two iterations + return nil, nil, protocolerrors.Errorf(true, + "IBD chain negotiation: Number of zoom-in steps %d exceeded the upper bound of 2*%d", + chainNegotiationZoomCounts, initialLocatorLen) + } + + } else { // Empty locator signals a restart due to chain changes + chainNegotiationZoomCounts = 0 + chainNegotiationRestartCounter++ + if chainNegotiationRestartCounter > 32 { + return nil, nil, protocolerrors.Errorf(false, + "IBD chain negotiation with syncer %s exceeded restart limit %d", flow.peer, chainNegotiationRestartCounter) + } + log.Warnf("IBD chain negotiation with syncer %s restarted %d times", flow.peer, chainNegotiationRestartCounter) + + // An empty locator signals that the syncer chain was modified and no longer contains one of + // the queried hashes, so we restart the search. We use a shorter timeout here to avoid a timeout attack + locatorHashes, err = flow.getSyncerChainBlockLocator(nil, nil, time.Second*10) + if err != nil { + return nil, nil, err + } + if len(locatorHashes) == 0 { + return nil, nil, protocolerrors.Errorf(true, "Expecting initial syncer chain block locator "+ + "to contain at least one element") + } + log.Infof("IBD chain negotiation with peer %s restarted (%d) and received %d hashes (%s, %s)", flow.peer, + chainNegotiationRestartCounter, len(locatorHashes), locatorHashes[0], locatorHashes[len(locatorHashes)-1]) + + initialLocatorLen = len(locatorHashes) + // Reset syncer's header selected tip + syncerHeaderSelectedTipHash = locatorHashes[0] + } + } + + log.Infof("Found highest known syncer chain block %s from peer %s", + highestKnownSyncerChainHash, flow.peer) + + return syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, nil +} + +func (flow *handleIBDFlow) isGenesisVirtualSelectedParent() (bool, error) { + virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent() + if err != nil { + return false, err + } + + return virtualSelectedParent.Equal(flow.Config().NetParams().GenesisHash), nil +} + +func (flow *handleIBDFlow) logIBDFinished(isFinishedSuccessfully bool, err error) { + successString := "successfully" + if !isFinishedSuccessfully { + if err != nil { + successString = fmt.Sprintf("(interrupted: %s)", err) + } else { + successString = fmt.Sprintf("(interrupted)") + } + } + log.Infof("IBD with peer %s finished %s", flow.peer, successString) +} + +func (flow *handleIBDFlow) getSyncerChainBlockLocator( + highHash, lowHash *externalapi.DomainHash, timeout time.Duration) ([]*externalapi.DomainHash, error) { + + requestIbdChainBlockLocatorMessage := appmessage.NewMsgIBDRequestChainBlockLocator(highHash, lowHash) + err := flow.outgoingRoute.Enqueue(requestIbdChainBlockLocatorMessage) + if err != nil { + return nil, err + } + message, err := flow.incomingRoute.DequeueWithTimeout(timeout) + if err != nil { + return nil, err + } + switch message := message.(type) { + case *appmessage.MsgIBDChainBlockLocator: + if len(message.BlockLocatorHashes) > 64 { + return nil, protocolerrors.Errorf(true, + "Got block locator of size %d>64 while expecting locator to have size "+ + "which is logarithmic in DAG size (which should never exceed 2^64)", + len(message.BlockLocatorHashes)) + } + return message.BlockLocatorHashes, nil + default: + return nil, protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdIBDChainBlockLocator, message.Command()) + } +} + +func (flow *handleIBDFlow) syncPruningPointFutureHeaders(consensus externalapi.Consensus, + syncerHeaderSelectedTipHash, highestKnownSyncerChainHash, relayBlockHash *externalapi.DomainHash, + highBlockDAAScoreHint uint64) error { + + log.Infof("Downloading headers from %s", flow.peer) + + if highestKnownSyncerChainHash.Equal(syncerHeaderSelectedTipHash) { + // No need to get syncer selected tip headers, so sync relay past and return + return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash) + } + + err := flow.sendRequestHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash) + if err != nil { + return err + } + + highestSharedBlockHeader, err := consensus.GetBlockHeader(highestKnownSyncerChainHash) + if err != nil { + return err + } + progressReporter := newIBDProgressReporter(highestSharedBlockHeader.DAAScore(), highBlockDAAScoreHint, "block headers") + + // Keep a short queue of BlockHeadersMessages so that there's + // never a moment when the node is not validating and inserting + // headers + blockHeadersMessageChan := make(chan *appmessage.BlockHeadersMessage, 2) + errChan := make(chan error) + spawn("handleRelayInvsFlow-syncPruningPointFutureHeaders", func() { + for { + blockHeadersMessage, doneIBD, err := flow.receiveHeaders() + if err != nil { + errChan <- err + return + } + if doneIBD { + close(blockHeadersMessageChan) + return + } + if len(blockHeadersMessage.BlockHeaders) == 0 { + // The syncer should have sent a done message if the search completed, and not an empty list + errChan <- protocolerrors.Errorf(true, "Received an empty headers message from peer %s", flow.peer) + return + } + + blockHeadersMessageChan <- blockHeadersMessage + + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextHeaders()) + if err != nil { + errChan <- err + return + } + } + }) + + for { + select { + case ibdBlocksMessage, ok := <-blockHeadersMessageChan: + if !ok { + return flow.syncMissingRelayPast(consensus, syncerHeaderSelectedTipHash, relayBlockHash) + } + for _, header := range ibdBlocksMessage.BlockHeaders { + err = flow.processHeader(consensus, header) + if err != nil { + return err + } + } + + lastReceivedHeader := ibdBlocksMessage.BlockHeaders[len(ibdBlocksMessage.BlockHeaders)-1] + progressReporter.reportProgress(len(ibdBlocksMessage.BlockHeaders), lastReceivedHeader.DAAScore) + case err := <-errChan: + return err + } + } +} + +func (flow *handleIBDFlow) syncMissingRelayPast(consensus externalapi.Consensus, syncerHeaderSelectedTipHash *externalapi.DomainHash, relayBlockHash *externalapi.DomainHash) error { + // Finished downloading syncer selected tip blocks, + // check if we already have the triggering relayBlockHash + relayBlockInfo, err := consensus.GetBlockInfo(relayBlockHash) + if err != nil { + return err + } + if !relayBlockInfo.Exists { + // Send a special header request for the selected tip anticone. This is expected to + // be a small set, as it is bounded to the size of virtual's mergeset. + err = flow.sendRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash) + if err != nil { + return err + } + anticoneHeadersMessage, anticoneDone, err := flow.receiveHeaders() + if err != nil { + return err + } + if anticoneDone { + return protocolerrors.Errorf(true, + "Expected one anticone header chunk for past(%s) cap anticone(%s) but got zero", + relayBlockHash, syncerHeaderSelectedTipHash) + } + _, anticoneDone, err = flow.receiveHeaders() + if err != nil { + return err + } + if !anticoneDone { + return protocolerrors.Errorf(true, + "Expected only one anticone header chunk for past(%s) cap anticone(%s)", + relayBlockHash, syncerHeaderSelectedTipHash) + } + for _, header := range anticoneHeadersMessage.BlockHeaders { + err = flow.processHeader(consensus, header) + if err != nil { + return err + } + } + } + + // If the relayBlockHash has still not been received, the peer is misbehaving + relayBlockInfo, err = consensus.GetBlockInfo(relayBlockHash) + if err != nil { + return err + } + if !relayBlockInfo.Exists { + return protocolerrors.Errorf(true, "did not receive "+ + "relayBlockHash block %s from peer %s during block download", relayBlockHash, flow.peer) + } + return nil +} + +func (flow *handleIBDFlow) sendRequestAnticone( + syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash) error { + + msgRequestAnticone := appmessage.NewMsgRequestAnticone(syncerHeaderSelectedTipHash, relayBlockHash) + return flow.outgoingRoute.Enqueue(msgRequestAnticone) +} + +func (flow *handleIBDFlow) sendRequestHeaders( + highestKnownSyncerChainHash, syncerHeaderSelectedTipHash *externalapi.DomainHash) error { + + msgRequestHeaders := appmessage.NewMsgRequstHeaders(highestKnownSyncerChainHash, syncerHeaderSelectedTipHash) + return flow.outgoingRoute.Enqueue(msgRequestHeaders) +} + +func (flow *handleIBDFlow) receiveHeaders() (msgIBDBlock *appmessage.BlockHeadersMessage, doneHeaders bool, err error) { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, false, err + } + switch message := message.(type) { + case *appmessage.BlockHeadersMessage: + return message, false, nil + case *appmessage.MsgDoneHeaders: + return nil, true, nil + default: + return nil, false, + protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s or %s, got: %s", + appmessage.CmdBlockHeaders, + appmessage.CmdDoneHeaders, + message.Command()) + } +} + +func (flow *handleIBDFlow) processHeader(consensus externalapi.Consensus, msgBlockHeader *appmessage.MsgBlockHeader) error { + header := appmessage.BlockHeaderToDomainBlockHeader(msgBlockHeader) + block := &externalapi.DomainBlock{ + Header: header, + Transactions: nil, + } + + blockHash := consensushashing.BlockHash(block) + blockInfo, err := consensus.GetBlockInfo(blockHash) + if err != nil { + return err + } + if blockInfo.Exists { + log.Debugf("Block header %s is already in the DAG. Skipping...", blockHash) + return nil + } + err = consensus.ValidateAndInsertBlock(block, false) + if err != nil { + if !errors.As(err, &ruleerrors.RuleError{}) { + return errors.Wrapf(err, "failed to process header %s during IBD", blockHash) + } + + if errors.Is(err, ruleerrors.ErrDuplicateBlock) { + log.Debugf("Skipping block header %s as it is a duplicate", blockHash) + } else { + log.Infof("Rejected block header %s from %s during IBD: %s", blockHash, flow.peer, err) + return protocolerrors.Wrapf(true, err, "got invalid block header %s during IBD", blockHash) + } + } + + return nil +} + +func (flow *handleIBDFlow) validatePruningPointFutureHeaderTimestamps() error { + headerSelectedTipHash, err := flow.Domain().StagingConsensus().GetHeadersSelectedTip() + if err != nil { + return err + } + headerSelectedTipHeader, err := flow.Domain().StagingConsensus().GetBlockHeader(headerSelectedTipHash) + if err != nil { + return err + } + headerSelectedTipTimestamp := headerSelectedTipHeader.TimeInMilliseconds() + + currentSelectedTipHash, err := flow.Domain().Consensus().GetHeadersSelectedTip() + if err != nil { + return err + } + currentSelectedTipHeader, err := flow.Domain().Consensus().GetBlockHeader(currentSelectedTipHash) + if err != nil { + return err + } + currentSelectedTipTimestamp := currentSelectedTipHeader.TimeInMilliseconds() + + if headerSelectedTipTimestamp < currentSelectedTipTimestamp { + return protocolerrors.Errorf(false, "the timestamp of the candidate selected "+ + "tip is smaller than the current selected tip") + } + + minTimestampDifferenceInMilliseconds := (10 * time.Minute).Milliseconds() + if headerSelectedTipTimestamp-currentSelectedTipTimestamp < minTimestampDifferenceInMilliseconds { + return protocolerrors.Errorf(false, "difference between the timestamps of "+ + "the current pruning point and the candidate pruning point is too small. Aborting IBD...") + } + return nil +} + +func (flow *handleIBDFlow) receiveAndInsertPruningPointUTXOSet( + consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (bool, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "receiveAndInsertPruningPointUTXOSet") + defer onEnd() + + receivedChunkCount := 0 + receivedUTXOCount := 0 + for { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return false, err + } + + switch message := message.(type) { + case *appmessage.MsgPruningPointUTXOSetChunk: + receivedUTXOCount += len(message.OutpointAndUTXOEntryPairs) + domainOutpointAndUTXOEntryPairs := + appmessage.OutpointAndUTXOEntryPairsToDomainOutpointAndUTXOEntryPairs(message.OutpointAndUTXOEntryPairs) + + err := consensus.AppendImportedPruningPointUTXOs(domainOutpointAndUTXOEntryPairs) + if err != nil { + return false, err + } + + receivedChunkCount++ + if receivedChunkCount%ibdBatchSize == 0 { + log.Infof("Received %d UTXO set chunks so far, totaling in %d UTXOs", + receivedChunkCount, receivedUTXOCount) + + requestNextPruningPointUTXOSetChunkMessage := appmessage.NewMsgRequestNextPruningPointUTXOSetChunk() + err := flow.outgoingRoute.Enqueue(requestNextPruningPointUTXOSetChunkMessage) + if err != nil { + return false, err + } + } + + case *appmessage.MsgDonePruningPointUTXOSetChunks: + log.Infof("Finished receiving the UTXO set. Total UTXOs: %d", receivedUTXOCount) + return true, nil + + case *appmessage.MsgUnexpectedPruningPoint: + log.Infof("Could not receive the next UTXO chunk because the pruning point %s "+ + "is no longer the pruning point of peer %s", pruningPointHash, flow.peer) + return false, nil + + default: + return false, protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s or %s or %s, got: %s", appmessage.CmdPruningPointUTXOSetChunk, + appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdUnexpectedPruningPoint, message.Command(), + ) + } + } +} + +func (flow *handleIBDFlow) syncMissingBlockBodies(highHash *externalapi.DomainHash) error { + hashes, err := flow.Domain().Consensus().GetMissingBlockBodyHashes(highHash) + if err != nil { + return err + } + if len(hashes) == 0 { + // Blocks can be inserted inside the DAG during IBD if those were requested before IBD started. + // In rare cases, all the IBD blocks might be already inserted by the time we reach this point. + // In these cases - GetMissingBlockBodyHashes would return an empty array. + log.Debugf("No missing block body hashes found.") + return nil + } + + lowBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[0]) + if err != nil { + return err + } + highBlockHeader, err := flow.Domain().Consensus().GetBlockHeader(hashes[len(hashes)-1]) + if err != nil { + return err + } + progressReporter := newIBDProgressReporter(lowBlockHeader.DAAScore(), highBlockHeader.DAAScore(), "blocks") + highestProcessedDAAScore := lowBlockHeader.DAAScore() + + // If the IBD is small, we want to update the virtual after each block in order to avoid complications and possible bugs. + updateVirtual, err := flow.Domain().Consensus().IsNearlySynced() + if err != nil { + return err + } + + for offset := 0; offset < len(hashes); offset += ibdBatchSize { + var hashesToRequest []*externalapi.DomainHash + if offset+ibdBatchSize < len(hashes) { + hashesToRequest = hashes[offset : offset+ibdBatchSize] + } else { + hashesToRequest = hashes[offset:] + } + + err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestIBDBlocks(hashesToRequest)) + if err != nil { + return err + } + + for _, expectedHash := range hashesToRequest { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + + msgIBDBlock, ok := message.(*appmessage.MsgIBDBlock) + if !ok { + return protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdIBDBlock, message.Command()) + } + + block := appmessage.MsgBlockToDomainBlock(msgIBDBlock.MsgBlock) + blockHash := consensushashing.BlockHash(block) + if !expectedHash.Equal(blockHash) { + return protocolerrors.Errorf(true, "expected block %s but got %s", expectedHash, blockHash) + } + + err = flow.banIfBlockIsHeaderOnly(block) + if err != nil { + return err + } + + err = flow.Domain().Consensus().ValidateAndInsertBlock(block, updateVirtual) + if err != nil { + if errors.Is(err, ruleerrors.ErrDuplicateBlock) { + log.Debugf("Skipping IBD Block %s as it has already been added to the DAG", blockHash) + continue + } + return protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "invalid block %s", blockHash) + } + err = flow.OnNewBlock(block) + if err != nil { + return err + } + + highestProcessedDAAScore = block.Header.DAAScore() + } + + progressReporter.reportProgress(len(hashesToRequest), highestProcessedDAAScore) + } + + // We need to resolve virtual only if it wasn't updated while syncing block bodies + if !updateVirtual { + err := flow.resolveVirtual(highestProcessedDAAScore) + if err != nil { + return err + } + } + + return flow.OnNewBlockTemplate() +} + +func (flow *handleIBDFlow) banIfBlockIsHeaderOnly(block *externalapi.DomainBlock) error { + if len(block.Transactions) == 0 { + return protocolerrors.Errorf(true, "sent header of %s block where expected block with body", + consensushashing.BlockHash(block)) + } + + return nil +} + +func (flow *handleIBDFlow) resolveVirtual(estimatedVirtualDAAScoreTarget uint64) error { + err := flow.Domain().Consensus().ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) { + var percents int + if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 { + percents = 100 + } else { + percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100) + } + if percents < 0 { + percents = 0 + } else if percents > 100 { + percents = 100 + } + log.Infof("Resolving virtual. Estimated progress: %d%%", percents) + }) + if err != nil { + return err + } + + log.Infof("Resolved virtual") + return nil +} diff --git a/app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go b/app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go new file mode 100644 index 0000000..bd6d1d9 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/ibd_progress_reporter.go @@ -0,0 +1,45 @@ +package blockrelay + +type ibdProgressReporter struct { + lowDAAScore uint64 + highDAAScore uint64 + objectName string + totalDAAScoreDifference uint64 + lastReportedProgressPercent int + processed int +} + +func newIBDProgressReporter(lowDAAScore uint64, highDAAScore uint64, objectName string) *ibdProgressReporter { + if highDAAScore <= lowDAAScore { + // Avoid a zero or negative diff + highDAAScore = lowDAAScore + 1 + } + return &ibdProgressReporter{ + lowDAAScore: lowDAAScore, + highDAAScore: highDAAScore, + objectName: objectName, + totalDAAScoreDifference: highDAAScore - lowDAAScore, + lastReportedProgressPercent: 0, + processed: 0, + } +} + +func (ipr *ibdProgressReporter) reportProgress(processedDelta int, highestProcessedDAAScore uint64) { + ipr.processed += processedDelta + + // Avoid exploding numbers in the percentage report, since the original `highDAAScore` might have been only a hint + if highestProcessedDAAScore > ipr.highDAAScore { + ipr.highDAAScore = highestProcessedDAAScore + 1 // + 1 for keeping it at 99% + ipr.totalDAAScoreDifference = ipr.highDAAScore - ipr.lowDAAScore + } + relativeDAAScore := uint64(0) + if highestProcessedDAAScore > ipr.lowDAAScore { + // Avoid a negative diff + relativeDAAScore = highestProcessedDAAScore - ipr.lowDAAScore + } + progressPercent := int((float64(relativeDAAScore) / float64(ipr.totalDAAScoreDifference)) * 100) + if progressPercent > ipr.lastReportedProgressPercent { + log.Infof("IBD: Processed %d %s (%d%%)", ipr.processed, ipr.objectName, progressPercent) + ipr.lastReportedProgressPercent = progressPercent + } +} diff --git a/app/protocol/flows/v5/blockrelay/ibd_with_headers_proof.go b/app/protocol/flows/v5/blockrelay/ibd_with_headers_proof.go new file mode 100644 index 0000000..0ee2b64 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/ibd_with_headers_proof.go @@ -0,0 +1,445 @@ +package blockrelay + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +func (flow *handleIBDFlow) ibdWithHeadersProof( + syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, highBlockDAAScore uint64) error { + err := flow.Domain().InitStagingConsensusWithoutGenesis() + if err != nil { + return err + } + + err = flow.downloadHeadersAndPruningUTXOSet(syncerHeaderSelectedTipHash, relayBlockHash, highBlockDAAScore) + if err != nil { + if !flow.IsRecoverableError(err) { + return err + } + + log.Infof("IBD with pruning proof from %s was unsuccessful. Deleting the staging consensus. (%s)", flow.peer, err) + deleteStagingConsensusErr := flow.Domain().DeleteStagingConsensus() + if deleteStagingConsensusErr != nil { + return deleteStagingConsensusErr + } + + return err + } + + log.Infof("Header download stage of IBD with pruning proof completed successfully from %s. "+ + "Committing the staging consensus and deleting the previous obsolete one if such exists.", flow.peer) + err = flow.Domain().CommitStagingConsensus() + if err != nil { + return err + } + + err = flow.OnPruningPointUTXOSetOverride() + if err != nil { + return err + } + + return nil +} + +func (flow *handleIBDFlow) shouldSyncAndShouldDownloadHeadersProof( + relayBlock *externalapi.DomainBlock, + highestKnownSyncerChainHash *externalapi.DomainHash) (shouldDownload, shouldSync bool, err error) { + + var highestSharedBlockFound, isPruningPointInSharedBlockChain bool + if highestKnownSyncerChainHash != nil { + blockInfo, err := flow.Domain().Consensus().GetBlockInfo(highestKnownSyncerChainHash) + if err != nil { + return false, false, err + } + + highestSharedBlockFound = blockInfo.HasBody() + pruningPoint, err := flow.Domain().Consensus().PruningPoint() + if err != nil { + return false, false, err + } + + isPruningPointInSharedBlockChain, err = flow.Domain().Consensus().IsInSelectedParentChainOf( + pruningPoint, highestKnownSyncerChainHash) + if err != nil { + return false, false, err + } + } + // Note: in the case where `highestSharedBlockFound == true && isPruningPointInSharedBlockChain == false` + // we might have here info which is relevant to finality conflict decisions. This should be taken into + // account when we improve this aspect. + if !highestSharedBlockFound || !isPruningPointInSharedBlockChain { + hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore, err := flow.checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock) + if err != nil { + return false, false, err + } + + if hasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore { + return true, true, nil + } + + if highestKnownSyncerChainHash == nil { + log.Infof("Stopping IBD since IBD from this node will cause a finality conflict") + return false, false, nil + } + + return false, true, nil + } + + return false, true, nil +} + +func (flow *handleIBDFlow) checkIfHighHashHasMoreBlueWorkThanSelectedTipAndPruningDepthMoreBlueScore(relayBlock *externalapi.DomainBlock) (bool, error) { + virtualSelectedParent, err := flow.Domain().Consensus().GetVirtualSelectedParent() + if err != nil { + return false, err + } + + virtualSelectedTipInfo, err := flow.Domain().Consensus().GetBlockInfo(virtualSelectedParent) + if err != nil { + return false, err + } + + if relayBlock.Header.BlueScore() < virtualSelectedTipInfo.BlueScore+flow.Config().NetParams().PruningDepth() { + return false, nil + } + + return relayBlock.Header.BlueWork().Cmp(virtualSelectedTipInfo.BlueWork) > 0, nil +} + +func (flow *handleIBDFlow) syncAndValidatePruningPointProof() (*externalapi.DomainHash, error) { + log.Infof("Downloading the pruning point proof from %s", flow.peer) + err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointProof()) + if err != nil { + return nil, err + } + message, err := flow.incomingRoute.DequeueWithTimeout(10 * time.Minute) + if err != nil { + return nil, err + } + pruningPointProofMessage, ok := message.(*appmessage.MsgPruningPointProof) + if !ok { + return nil, protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdPruningPointProof, message.Command()) + } + pruningPointProof := appmessage.MsgPruningPointProofToDomainPruningPointProof(pruningPointProofMessage) + err = flow.Domain().Consensus().ValidatePruningPointProof(pruningPointProof) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + return nil, protocolerrors.Wrapf(true, err, "pruning point proof validation failed") + } + return nil, err + } + + err = flow.Domain().StagingConsensus().ApplyPruningPointProof(pruningPointProof) + if err != nil { + return nil, err + } + + return consensushashing.HeaderHash(pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1]), nil +} + +func (flow *handleIBDFlow) downloadHeadersAndPruningUTXOSet( + syncerHeaderSelectedTipHash, relayBlockHash *externalapi.DomainHash, + highBlockDAAScore uint64) error { + + proofPruningPoint, err := flow.syncAndValidatePruningPointProof() + if err != nil { + return err + } + + err = flow.syncPruningPointsAndPruningPointAnticone(proofPruningPoint) + if err != nil { + return err + } + + // TODO: Remove this condition once there's more proper way to check finality violation + // in the headers proof. + if proofPruningPoint.Equal(flow.Config().NetParams().GenesisHash) { + return protocolerrors.Errorf(true, "the genesis pruning point violates finality") + } + + err = flow.syncPruningPointFutureHeaders(flow.Domain().StagingConsensus(), + syncerHeaderSelectedTipHash, proofPruningPoint, relayBlockHash, highBlockDAAScore) + if err != nil { + return err + } + + log.Infof("Headers downloaded from peer %s", flow.peer) + + relayBlockInfo, err := flow.Domain().StagingConsensus().GetBlockInfo(relayBlockHash) + if err != nil { + return err + } + + if !relayBlockInfo.Exists { + return protocolerrors.Errorf(true, "the triggering IBD block was not sent") + } + + err = flow.validatePruningPointFutureHeaderTimestamps() + if err != nil { + return err + } + + log.Debugf("Syncing the current pruning point UTXO set") + syncedPruningPointUTXOSetSuccessfully, err := flow.syncPruningPointUTXOSet(flow.Domain().StagingConsensus(), proofPruningPoint) + if err != nil { + return err + } + if !syncedPruningPointUTXOSetSuccessfully { + log.Debugf("Aborting IBD because the pruning point UTXO set failed to sync") + return nil + } + log.Debugf("Finished syncing the current pruning point UTXO set") + return nil +} + +func (flow *handleIBDFlow) syncPruningPointsAndPruningPointAnticone(proofPruningPoint *externalapi.DomainHash) error { + log.Infof("Downloading the past pruning points and the pruning point anticone from %s", flow.peer) + err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointAndItsAnticone()) + if err != nil { + return err + } + + err = flow.validateAndInsertPruningPoints(proofPruningPoint) + if err != nil { + return err + } + + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + + msgTrustedData, ok := message.(*appmessage.MsgTrustedData) + if !ok { + return protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdTrustedData, message.Command()) + } + + pruningPointWithMetaData, done, err := flow.receiveBlockWithTrustedData() + if err != nil { + return err + } + + if done { + return protocolerrors.Errorf(true, "got `done` message before receiving the pruning point") + } + + if !pruningPointWithMetaData.Block.Header.BlockHash().Equal(proofPruningPoint) { + return protocolerrors.Errorf(true, "first block with trusted data is not the pruning point") + } + + err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), pruningPointWithMetaData, msgTrustedData) + if err != nil { + return err + } + + i := 0 + for ; ; i++ { + blockWithTrustedData, done, err := flow.receiveBlockWithTrustedData() + if err != nil { + return err + } + + if done { + break + } + + err = flow.processBlockWithTrustedData(flow.Domain().StagingConsensus(), blockWithTrustedData, msgTrustedData) + if err != nil { + return err + } + + // We're using i+2 because we want to check if the next block will belong to the next batch, but we already downloaded + // the pruning point outside the loop so we use i+2 instead of i+1. + if (i+2)%ibdBatchSize == 0 { + log.Infof("Downloaded %d blocks from the pruning point anticone", i+1) + err := flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestNextPruningPointAndItsAnticoneBlocks()) + if err != nil { + return err + } + } + } + + log.Infof("Finished downloading pruning point and its anticone from %s. Total blocks downloaded: %d", flow.peer, i+1) + return nil +} + +func (flow *handleIBDFlow) processBlockWithTrustedData( + consensus externalapi.Consensus, block *appmessage.MsgBlockWithTrustedDataV4, data *appmessage.MsgTrustedData) error { + + blockWithTrustedData := &externalapi.BlockWithTrustedData{ + Block: appmessage.MsgBlockToDomainBlock(block.Block), + DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(block.DAAWindowIndices)), + GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(block.GHOSTDAGDataIndices)), + } + + for _, index := range block.DAAWindowIndices { + blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, appmessage.TrustedDataDataDAABlockV4ToTrustedDataDataDAAHeader(data.DAAWindow[index])) + } + + for _, index := range block.GHOSTDAGDataIndices { + blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, appmessage.GHOSTDAGHashPairToDomainGHOSTDAGHashPair(data.GHOSTDAGData[index])) + } + + err := consensus.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + return protocolerrors.Wrapf(true, err, "failed validating block with trusted data") + } + return err + } + return nil +} + +func (flow *handleIBDFlow) receiveBlockWithTrustedData() (*appmessage.MsgBlockWithTrustedDataV4, bool, error) { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, false, err + } + + switch downCastedMessage := message.(type) { + case *appmessage.MsgBlockWithTrustedDataV4: + return downCastedMessage, false, nil + case *appmessage.MsgDoneBlocksWithTrustedData: + return nil, true, nil + default: + return nil, false, + protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s or %s, got: %s", + (&appmessage.MsgBlockWithTrustedData{}).Command(), + (&appmessage.MsgDoneBlocksWithTrustedData{}).Command(), + downCastedMessage.Command()) + } +} + +func (flow *handleIBDFlow) receivePruningPoints() (*appmessage.MsgPruningPoints, error) { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, err + } + + msgPruningPoints, ok := message.(*appmessage.MsgPruningPoints) + if !ok { + return nil, + protocolerrors.Errorf(true, "received unexpected message type. "+ + "expected: %s, got: %s", appmessage.CmdPruningPoints, message.Command()) + } + + return msgPruningPoints, nil +} + +func (flow *handleIBDFlow) validateAndInsertPruningPoints(proofPruningPoint *externalapi.DomainHash) error { + currentPruningPoint, err := flow.Domain().Consensus().PruningPoint() + if err != nil { + return err + } + + if currentPruningPoint.Equal(proofPruningPoint) { + return protocolerrors.Errorf(true, "the proposed pruning point is the same as the current pruning point") + } + + pruningPoints, err := flow.receivePruningPoints() + if err != nil { + return err + } + + headers := make([]externalapi.BlockHeader, len(pruningPoints.Headers)) + for i, header := range pruningPoints.Headers { + headers[i] = appmessage.BlockHeaderToDomainBlockHeader(header) + } + + arePruningPointsViolatingFinality, err := flow.Domain().Consensus().ArePruningPointsViolatingFinality(headers) + if err != nil { + return err + } + + if arePruningPointsViolatingFinality { + // TODO: Find a better way to deal with finality conflicts. + return protocolerrors.Errorf(false, "pruning points are violating finality") + } + + lastPruningPoint := consensushashing.HeaderHash(headers[len(headers)-1]) + if !lastPruningPoint.Equal(proofPruningPoint) { + return protocolerrors.Errorf(true, "the proof pruning point is not equal to the last pruning "+ + "point in the list") + } + + err = flow.Domain().StagingConsensus().ImportPruningPoints(headers) + if err != nil { + return err + } + + return nil +} + +func (flow *handleIBDFlow) syncPruningPointUTXOSet(consensus externalapi.Consensus, + pruningPoint *externalapi.DomainHash) (bool, error) { + + log.Infof("Checking if the suggested pruning point %s is compatible to the node DAG", pruningPoint) + isValid, err := flow.Domain().StagingConsensus().IsValidPruningPoint(pruningPoint) + if err != nil { + return false, err + } + + if !isValid { + return false, protocolerrors.Errorf(true, "invalid pruning point %s", pruningPoint) + } + + log.Info("Fetching the pruning point UTXO set") + isSuccessful, err := flow.fetchMissingUTXOSet(consensus, pruningPoint) + if err != nil { + log.Infof("An error occurred while fetching the pruning point UTXO set. Stopping IBD. (%s)", err) + return false, err + } + + if !isSuccessful { + log.Infof("Couldn't successfully fetch the pruning point UTXO set. Stopping IBD.") + return false, nil + } + + log.Info("Fetched the new pruning point UTXO set") + return true, nil +} + +func (flow *handleIBDFlow) fetchMissingUTXOSet(consensus externalapi.Consensus, pruningPointHash *externalapi.DomainHash) (succeed bool, err error) { + defer func() { + err := flow.Domain().StagingConsensus().ClearImportedPruningPointData() + if err != nil { + panic(fmt.Sprintf("failed to clear imported pruning point data: %s", err)) + } + }() + + err = flow.outgoingRoute.Enqueue(appmessage.NewMsgRequestPruningPointUTXOSet(pruningPointHash)) + if err != nil { + return false, err + } + + receivedAll, err := flow.receiveAndInsertPruningPointUTXOSet(consensus, pruningPointHash) + if err != nil { + return false, err + } + if !receivedAll { + return false, nil + } + + err = flow.Domain().StagingConsensus().ValidateAndInsertImportedPruningPoint(pruningPointHash) + if err != nil { + // TODO: Find a better way to deal with finality conflicts. + if errors.Is(err, ruleerrors.ErrSuggestedPruningViolatesFinality) { + return false, nil + } + return false, protocolerrors.ConvertToBanningProtocolErrorIfRuleError(err, "error with pruning point UTXO set") + } + + return true, nil +} diff --git a/app/protocol/flows/v5/blockrelay/log.go b/app/protocol/flows/v5/blockrelay/log.go new file mode 100644 index 0000000..c67d8b4 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/log.go @@ -0,0 +1,9 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("PROT") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/protocol/flows/v5/blockrelay/send_virtual_selected_parent_inv.go b/app/protocol/flows/v5/blockrelay/send_virtual_selected_parent_inv.go new file mode 100644 index 0000000..df05084 --- /dev/null +++ b/app/protocol/flows/v5/blockrelay/send_virtual_selected_parent_inv.go @@ -0,0 +1,35 @@ +package blockrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// SendVirtualSelectedParentInvContext is the interface for the context needed for the SendVirtualSelectedParentInv flow. +type SendVirtualSelectedParentInvContext interface { + Domain() domain.Domain + Config() *config.Config +} + +// SendVirtualSelectedParentInv sends a peer the selected parent hash of the virtual +func SendVirtualSelectedParentInv(context SendVirtualSelectedParentInvContext, + outgoingRoute *router.Route, peer *peerpkg.Peer) error { + + virtualSelectedParent, err := context.Domain().Consensus().GetVirtualSelectedParent() + if err != nil { + return err + } + + if virtualSelectedParent.Equal(context.Config().NetParams().GenesisHash) { + log.Debugf("Skipping sending the virtual selected parent hash to peer %s because it's the genesis", peer) + return nil + } + + log.Debugf("Sending virtual selected parent hash %s to peer %s", virtualSelectedParent, peer) + + virtualSelectedParentInv := appmessage.NewMsgInvBlock(virtualSelectedParent) + return outgoingRoute.Enqueue(virtualSelectedParentInv) +} diff --git a/app/protocol/flows/v5/ping/receive.go b/app/protocol/flows/v5/ping/receive.go new file mode 100644 index 0000000..2cf3b29 --- /dev/null +++ b/app/protocol/flows/v5/ping/receive.go @@ -0,0 +1,42 @@ +package ping + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// ReceivePingsContext is the interface for the context needed for the ReceivePings flow. +type ReceivePingsContext interface { +} + +type receivePingsFlow struct { + ReceivePingsContext + incomingRoute, outgoingRoute *router.Route +} + +// ReceivePings handles all ping messages coming through incomingRoute. +// This function assumes that incomingRoute will only return MsgPing. +func ReceivePings(context ReceivePingsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error { + flow := &receivePingsFlow{ + ReceivePingsContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + return flow.start() +} + +func (flow *receivePingsFlow) start() error { + for { + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return err + } + pingMessage := message.(*appmessage.MsgPing) + + pongMessage := appmessage.NewMsgPong(pingMessage.Nonce) + err = flow.outgoingRoute.Enqueue(pongMessage) + if err != nil { + return err + } + } +} diff --git a/app/protocol/flows/v5/ping/send.go b/app/protocol/flows/v5/ping/send.go new file mode 100644 index 0000000..82405c8 --- /dev/null +++ b/app/protocol/flows/v5/ping/send.go @@ -0,0 +1,78 @@ +package ping + +import ( + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/flowcontext" + + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util/random" +) + +// SendPingsContext is the interface for the context needed for the SendPings flow. +type SendPingsContext interface { + ShutdownChan() <-chan struct{} +} + +type sendPingsFlow struct { + SendPingsContext + incomingRoute, outgoingRoute *router.Route + peer *peerpkg.Peer +} + +// SendPings starts sending MsgPings every pingInterval seconds to the +// given peer. +// This function assumes that incomingRoute will only return MsgPong. +func SendPings(context SendPingsContext, incomingRoute *router.Route, outgoingRoute *router.Route, peer *peerpkg.Peer) error { + flow := &sendPingsFlow{ + SendPingsContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + peer: peer, + } + return flow.start() +} + +func (flow *sendPingsFlow) start() error { + const pingInterval = 2 * time.Minute + ticker := time.NewTicker(pingInterval) + defer ticker.Stop() + + for { + select { + case <-flow.ShutdownChan(): + return nil + case <-ticker.C: + } + + nonce, err := random.Uint64() + if err != nil { + return err + } + flow.peer.SetPingPending(nonce) + + pingMessage := appmessage.NewMsgPing(nonce) + err = flow.outgoingRoute.Enqueue(pingMessage) + if err != nil { + return err + } + + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + if errors.Is(err, router.ErrTimeout) { + return errors.Wrapf(flowcontext.ErrPingTimeout, err.Error()) + } + return err + } + pongMessage := message.(*appmessage.MsgPong) + if pongMessage.Nonce != pingMessage.Nonce { + return protocolerrors.New(true, "nonce mismatch between ping and pong") + } + flow.peer.SetPingIdle() + } +} diff --git a/app/protocol/flows/v5/register.go b/app/protocol/flows/v5/register.go new file mode 100644 index 0000000..b293d4e --- /dev/null +++ b/app/protocol/flows/v5/register.go @@ -0,0 +1,209 @@ +package v5 + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/flowcontext" + "github.com/spectre-project/spectred/app/protocol/flows/v5/addressexchange" + "github.com/spectre-project/spectred/app/protocol/flows/v5/blockrelay" + "github.com/spectre-project/spectred/app/protocol/flows/v5/ping" + "github.com/spectre-project/spectred/app/protocol/flows/v5/rejects" + "github.com/spectre-project/spectred/app/protocol/flows/v5/transactionrelay" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type protocolManager interface { + RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32, + errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow + RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, + isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow + RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router, + messageTypes []appmessage.MessageCommand, isStopping *uint32, + errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow + Context() *flowcontext.FlowContext +} + +// Register is used in order to register all the protocol flows to the given router. +func Register(m protocolManager, router *routerpkg.Router, errChan chan error, isStopping *uint32) (flows []*common.Flow) { + flows = registerAddressFlows(m, router, isStopping, errChan) + flows = append(flows, registerBlockRelayFlows(m, router, isStopping, errChan)...) + flows = append(flows, registerPingFlows(m, router, isStopping, errChan)...) + flows = append(flows, registerTransactionRelayFlow(m, router, isStopping, errChan)...) + flows = append(flows, registerRejectsFlow(m, router, isStopping, errChan)...) + + return flows +} + +func registerAddressFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow { + outgoingRoute := router.OutgoingRoute() + + return []*common.Flow{ + m.RegisterFlow("SendAddresses", router, []appmessage.MessageCommand{appmessage.CmdRequestAddresses}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return addressexchange.SendAddresses(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterOneTimeFlow("ReceiveAddresses", router, []appmessage.MessageCommand{appmessage.CmdAddresses}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return addressexchange.ReceiveAddresses(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + } +} + +func registerBlockRelayFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow { + outgoingRoute := router.OutgoingRoute() + + return []*common.Flow{ + m.RegisterOneTimeFlow("SendVirtualSelectedParentInv", router, []appmessage.MessageCommand{}, + isStopping, errChan, func(route *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.SendVirtualSelectedParentInv(m.Context(), outgoingRoute, peer) + }), + + m.RegisterFlow("HandleRelayInvs", router, []appmessage.MessageCommand{ + appmessage.CmdInvRelayBlock, appmessage.CmdBlock, appmessage.CmdBlockLocator, + }, + isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRelayInvs(m.Context(), incomingRoute, + outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleIBD", router, []appmessage.MessageCommand{ + appmessage.CmdDoneHeaders, appmessage.CmdUnexpectedPruningPoint, appmessage.CmdPruningPointUTXOSetChunk, + appmessage.CmdBlockHeaders, appmessage.CmdIBDBlockLocatorHighestHash, appmessage.CmdBlockWithTrustedDataV4, + appmessage.CmdDoneBlocksWithTrustedData, appmessage.CmdIBDBlockLocatorHighestHashNotFound, + appmessage.CmdDonePruningPointUTXOSetChunks, appmessage.CmdIBDBlock, appmessage.CmdPruningPoints, + appmessage.CmdPruningPointProof, + appmessage.CmdTrustedData, + appmessage.CmdIBDChainBlockLocator, + }, + isStopping, errChan, func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleIBD(m.Context(), incomingRoute, + outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleRelayBlockRequests", router, []appmessage.MessageCommand{appmessage.CmdRequestRelayBlocks}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRelayBlockRequests(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleRequestBlockLocator", router, + []appmessage.MessageCommand{appmessage.CmdRequestBlockLocator}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRequestBlockLocator(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterFlow("HandleRequestHeaders", router, + []appmessage.MessageCommand{appmessage.CmdRequestHeaders, appmessage.CmdRequestNextHeaders}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRequestHeaders(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleIBDBlockRequests", router, + []appmessage.MessageCommand{appmessage.CmdRequestIBDBlocks}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleIBDBlockRequests(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterFlow("HandleRequestPruningPointUTXOSet", router, + []appmessage.MessageCommand{appmessage.CmdRequestPruningPointUTXOSet, + appmessage.CmdRequestNextPruningPointUTXOSetChunk}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRequestPruningPointUTXOSet(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterFlow("HandlePruningPointAndItsAnticoneRequests", router, + []appmessage.MessageCommand{appmessage.CmdRequestPruningPointAndItsAnticone, appmessage.CmdRequestNextPruningPointAndItsAnticoneBlocks}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandlePruningPointAndItsAnticoneRequests(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleIBDBlockLocator", router, + []appmessage.MessageCommand{appmessage.CmdIBDBlockLocator}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleIBDBlockLocator(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandleRequestIBDChainBlockLocator", router, + []appmessage.MessageCommand{appmessage.CmdRequestIBDChainBlockLocator}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRequestIBDChainBlockLocator(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterFlow("HandleRequestAnticone", router, + []appmessage.MessageCommand{appmessage.CmdRequestAnticone}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandleRequestAnticone(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + + m.RegisterFlow("HandlePruningPointProofRequests", router, + []appmessage.MessageCommand{appmessage.CmdRequestPruningPointProof}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return blockrelay.HandlePruningPointProofRequests(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + } +} + +func registerPingFlows(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow { + outgoingRoute := router.OutgoingRoute() + + return []*common.Flow{ + m.RegisterFlow("ReceivePings", router, []appmessage.MessageCommand{appmessage.CmdPing}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return ping.ReceivePings(m.Context(), incomingRoute, outgoingRoute) + }, + ), + + m.RegisterFlow("SendPings", router, []appmessage.MessageCommand{appmessage.CmdPong}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return ping.SendPings(m.Context(), incomingRoute, outgoingRoute, peer) + }, + ), + } +} + +func registerTransactionRelayFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow { + outgoingRoute := router.OutgoingRoute() + + return []*common.Flow{ + m.RegisterFlowWithCapacity("HandleRelayedTransactions", 10_000, router, + []appmessage.MessageCommand{appmessage.CmdInvTransaction, appmessage.CmdTx, appmessage.CmdTransactionNotFound}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return transactionrelay.HandleRelayedTransactions(m.Context(), incomingRoute, outgoingRoute) + }, + ), + m.RegisterFlow("HandleRequestTransactions", router, + []appmessage.MessageCommand{appmessage.CmdRequestTransactions}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return transactionrelay.HandleRequestedTransactions(m.Context(), incomingRoute, outgoingRoute) + }, + ), + } +} + +func registerRejectsFlow(m protocolManager, router *routerpkg.Router, isStopping *uint32, errChan chan error) []*common.Flow { + outgoingRoute := router.OutgoingRoute() + + return []*common.Flow{ + m.RegisterFlow("HandleRejects", router, + []appmessage.MessageCommand{appmessage.CmdReject}, isStopping, errChan, + func(incomingRoute *routerpkg.Route, peer *peerpkg.Peer) error { + return rejects.HandleRejects(m.Context(), incomingRoute, outgoingRoute) + }, + ), + } +} diff --git a/app/protocol/flows/v5/rejects/handle_rejects.go b/app/protocol/flows/v5/rejects/handle_rejects.go new file mode 100644 index 0000000..8054a90 --- /dev/null +++ b/app/protocol/flows/v5/rejects/handle_rejects.go @@ -0,0 +1,37 @@ +package rejects + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleRejectsContext is the interface for the context needed for the HandleRejects flow. +type HandleRejectsContext interface { +} + +type handleRejectsFlow struct { + HandleRejectsContext + incomingRoute, outgoingRoute *router.Route +} + +// HandleRejects handles all reject messages coming through incomingRoute. +// This function assumes that incomingRoute will only return MsgReject. +func HandleRejects(context HandleRejectsContext, incomingRoute *router.Route, outgoingRoute *router.Route) error { + flow := &handleRejectsFlow{ + HandleRejectsContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + return flow.start() +} + +func (flow *handleRejectsFlow) start() error { + message, err := flow.incomingRoute.Dequeue() + if err != nil { + return err + } + rejectMessage := message.(*appmessage.MsgReject) + + return protocolerrors.Errorf(false, "got reject message: `%s`", rejectMessage.Reason) +} diff --git a/app/protocol/flows/v5/testing/common_test.go b/app/protocol/flows/v5/testing/common_test.go new file mode 100644 index 0000000..498cf28 --- /dev/null +++ b/app/protocol/flows/v5/testing/common_test.go @@ -0,0 +1,24 @@ +package testing + +import ( + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" +) + +func checkFlowError(t *testing.T, err error, isProtocolError bool, shouldBan bool, contains string) { + pErr := protocolerrors.ProtocolError{} + if errors.As(err, &pErr) != isProtocolError { + t.Fatalf("Unexepcted error %+v", err) + } + + if pErr.ShouldBan != shouldBan { + t.Fatalf("Exepcted shouldBan %t but got %t", shouldBan, pErr.ShouldBan) + } + + if !strings.Contains(err.Error(), contains) { + t.Fatalf("Unexpected error. Expected error to contain '%s' but got: %+v", contains, err) + } +} diff --git a/app/protocol/flows/v5/testing/receiveaddresses_test.go b/app/protocol/flows/v5/testing/receiveaddresses_test.go new file mode 100644 index 0000000..560ee3c --- /dev/null +++ b/app/protocol/flows/v5/testing/receiveaddresses_test.go @@ -0,0 +1,52 @@ +package testing + +import ( + "testing" + "time" + + "github.com/spectre-project/spectred/app/protocol/flows/v5/addressexchange" + + "github.com/spectre-project/spectred/app/appmessage" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type fakeReceiveAddressesContext struct{} + +func (f fakeReceiveAddressesContext) AddressManager() *addressmanager.AddressManager { + return nil +} + +func TestReceiveAddressesErrors(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + incomingRoute := router.NewRoute("incoming") + outgoingRoute := router.NewRoute("outgoing") + peer := peerpkg.New(nil) + errChan := make(chan error) + go func() { + errChan <- addressexchange.ReceiveAddresses(fakeReceiveAddressesContext{}, incomingRoute, outgoingRoute, peer) + }() + + _, err := outgoingRoute.DequeueWithTimeout(time.Second) + if err != nil { + t.Fatalf("DequeueWithTimeout: %+v", err) + } + + // Sending addressmanager.GetAddressesMax+1 addresses should trigger a ban + err = incomingRoute.Enqueue(appmessage.NewMsgAddresses(make([]*appmessage.NetAddress, + addressmanager.GetAddressesMax+1))) + if err != nil { + t.Fatalf("Enqueue: %+v", err) + } + + select { + case err := <-errChan: + checkFlowError(t, err, true, true, "address count exceeded") + case <-time.After(time.Second): + t.Fatalf("timed out after %s", time.Second) + } + }) +} diff --git a/app/protocol/flows/v5/testing/testing.go b/app/protocol/flows/v5/testing/testing.go new file mode 100644 index 0000000..a31e8c8 --- /dev/null +++ b/app/protocol/flows/v5/testing/testing.go @@ -0,0 +1,4 @@ +package testing + +// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333 +// So this is a dummy non-test go file in the package. diff --git a/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions.go b/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions.go new file mode 100644 index 0000000..2b5ac19 --- /dev/null +++ b/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions.go @@ -0,0 +1,214 @@ +package transactionrelay + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/flowcontext" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// TransactionsRelayContext is the interface for the context needed for the +// HandleRelayedTransactions and HandleRequestedTransactions flows. +type TransactionsRelayContext interface { + NetAdapter() *netadapter.NetAdapter + Domain() domain.Domain + SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions + OnTransactionAddedToMempool() + EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error + IsNearlySynced() (bool, error) +} + +type handleRelayedTransactionsFlow struct { + TransactionsRelayContext + incomingRoute, outgoingRoute *router.Route + invsQueue []*appmessage.MsgInvTransaction +} + +// HandleRelayedTransactions listens to appmessage.MsgInvTransaction messages, requests their corresponding transactions if they +// are missing, adds them to the mempool and propagates them to the rest of the network. +func HandleRelayedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error { + flow := &handleRelayedTransactionsFlow{ + TransactionsRelayContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + invsQueue: make([]*appmessage.MsgInvTransaction, 0), + } + return flow.start() +} + +func (flow *handleRelayedTransactionsFlow) start() error { + for { + inv, err := flow.readInv() + if err != nil { + return err + } + + isNearlySynced, err := flow.IsNearlySynced() + if err != nil { + return err + } + // Transaction relay is disabled if the node is out of sync and thus not mining + if !isNearlySynced { + continue + } + + requestedIDs, err := flow.requestInvTransactions(inv) + if err != nil { + return err + } + + err = flow.receiveTransactions(requestedIDs) + if err != nil { + return err + } + } +} + +func (flow *handleRelayedTransactionsFlow) requestInvTransactions( + inv *appmessage.MsgInvTransaction) (requestedIDs []*externalapi.DomainTransactionID, err error) { + + idsToRequest := make([]*externalapi.DomainTransactionID, 0, len(inv.TxIDs)) + for _, txID := range inv.TxIDs { + if flow.isKnownTransaction(txID) { + continue + } + exists := flow.SharedRequestedTransactions().AddIfNotExists(txID) + if exists { + continue + } + idsToRequest = append(idsToRequest, txID) + } + + if len(idsToRequest) == 0 { + return idsToRequest, nil + } + + msgGetTransactions := appmessage.NewMsgRequestTransactions(idsToRequest) + err = flow.outgoingRoute.Enqueue(msgGetTransactions) + if err != nil { + flow.SharedRequestedTransactions().RemoveMany(idsToRequest) + return nil, err + } + return idsToRequest, nil +} + +func (flow *handleRelayedTransactionsFlow) isKnownTransaction(txID *externalapi.DomainTransactionID) bool { + // Ask the transaction memory pool if the transaction is known + // to it in any form (main pool or orphan). + if _, _, ok := flow.Domain().MiningManager().GetTransaction(txID, true, true); ok { + return true + } + + return false +} + +func (flow *handleRelayedTransactionsFlow) readInv() (*appmessage.MsgInvTransaction, error) { + if len(flow.invsQueue) > 0 { + var inv *appmessage.MsgInvTransaction + inv, flow.invsQueue = flow.invsQueue[0], flow.invsQueue[1:] + return inv, nil + } + + msg, err := flow.incomingRoute.Dequeue() + if err != nil { + return nil, err + } + + inv, ok := msg.(*appmessage.MsgInvTransaction) + if !ok { + return nil, protocolerrors.Errorf(true, "unexpected %s message in the block relay flow while "+ + "expecting an inv message", msg.Command()) + } + return inv, nil +} + +func (flow *handleRelayedTransactionsFlow) broadcastAcceptedTransactions(acceptedTxIDs []*externalapi.DomainTransactionID) error { + return flow.EnqueueTransactionIDsForPropagation(acceptedTxIDs) +} + +// readMsgTxOrNotFound returns the next msgTx or msgTransactionNotFound in incomingRoute, +// returning only one of the message types at a time. +// +// and populates invsQueue with any inv messages that meanwhile arrive. +func (flow *handleRelayedTransactionsFlow) readMsgTxOrNotFound() ( + msgTx *appmessage.MsgTx, msgNotFound *appmessage.MsgTransactionNotFound, err error) { + + for { + message, err := flow.incomingRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return nil, nil, err + } + + switch message := message.(type) { + case *appmessage.MsgInvTransaction: + flow.invsQueue = append(flow.invsQueue, message) + case *appmessage.MsgTx: + return message, nil, nil + case *appmessage.MsgTransactionNotFound: + return nil, message, nil + default: + return nil, nil, errors.Errorf("unexpected message %s", message.Command()) + } + } +} + +func (flow *handleRelayedTransactionsFlow) receiveTransactions(requestedTransactions []*externalapi.DomainTransactionID) error { + // In case the function returns earlier than expected, we want to make sure sharedRequestedTransactions is + // clean from any pending transactions. + defer flow.SharedRequestedTransactions().RemoveMany(requestedTransactions) + for _, expectedID := range requestedTransactions { + msgTx, msgTxNotFound, err := flow.readMsgTxOrNotFound() + if err != nil { + return err + } + if msgTxNotFound != nil { + if !msgTxNotFound.ID.Equal(expectedID) { + return protocolerrors.Errorf(true, "expected transaction %s, but got %s", + expectedID, msgTxNotFound.ID) + } + + continue + } + tx := appmessage.MsgTxToDomainTransaction(msgTx) + txID := consensushashing.TransactionID(tx) + if !txID.Equal(expectedID) { + return protocolerrors.Errorf(true, "expected transaction %s, but got %s", + expectedID, txID) + } + + acceptedTransactions, err := + flow.Domain().MiningManager().ValidateAndInsertTransaction(tx, false, true) + if err != nil { + ruleErr := &mempool.RuleError{} + if !errors.As(err, ruleErr) { + return errors.Wrapf(err, "failed to process transaction %s", txID) + } + + shouldBan := false + if txRuleErr := (&mempool.TxRuleError{}); errors.As(ruleErr.Err, txRuleErr) { + if txRuleErr.RejectCode == mempool.RejectInvalid { + shouldBan = true + } + } + + if !shouldBan { + continue + } + + return protocolerrors.Errorf(true, "rejected transaction %s: %s", txID, ruleErr) + } + err = flow.broadcastAcceptedTransactions(consensushashing.TransactionIDs(acceptedTransactions)) + if err != nil { + return err + } + flow.OnTransactionAddedToMempool() + } + return nil +} diff --git a/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions_test.go b/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions_test.go new file mode 100644 index 0000000..a5c7cb2 --- /dev/null +++ b/app/protocol/flows/v5/transactionrelay/handle_relayed_transactions_test.go @@ -0,0 +1,197 @@ +package transactionrelay_test + +import ( + "errors" + "strings" + "testing" + + "github.com/spectre-project/spectred/app/protocol/flowcontext" + "github.com/spectre-project/spectred/app/protocol/flows/v5/transactionrelay" + + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type mocTransactionsRelayContext struct { + netAdapter *netadapter.NetAdapter + domain domain.Domain + sharedRequestedTransactions *flowcontext.SharedRequestedTransactions +} + +func (m *mocTransactionsRelayContext) NetAdapter() *netadapter.NetAdapter { + return m.netAdapter +} + +func (m *mocTransactionsRelayContext) Domain() domain.Domain { + return m.domain +} + +func (m *mocTransactionsRelayContext) SharedRequestedTransactions() *flowcontext.SharedRequestedTransactions { + return m.sharedRequestedTransactions +} + +func (m *mocTransactionsRelayContext) EnqueueTransactionIDsForPropagation(transactionIDs []*externalapi.DomainTransactionID) error { + return nil +} + +func (m *mocTransactionsRelayContext) OnTransactionAddedToMempool() { +} + +func (m *mocTransactionsRelayContext) IsNearlySynced() (bool, error) { + return true, nil +} + +// TestHandleRelayedTransactionsNotFound tests the flow of HandleRelayedTransactions when the peer doesn't +// have the requested transactions in the mempool. +func TestHandleRelayedTransactionsNotFound(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + + var log = logger.RegisterSubSystem("PROT") + var spawn = panics.GoroutineWrapperFunc(log) + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRelayedTransactionsNotFound") + if err != nil { + t.Fatalf("Error setting up test consensus: %+v", err) + } + defer teardown(false) + + sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() + adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) + if err != nil { + t.Fatalf("Failed to create a NetAdapter: %v", err) + } + domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database()) + if err != nil { + t.Fatalf("Failed to set up a domain instance: %v", err) + } + context := &mocTransactionsRelayContext{ + netAdapter: adapter, + domain: domainInstance, + sharedRequestedTransactions: sharedRequestedTransactions, + } + incomingRoute := router.NewRoute("incoming") + defer incomingRoute.Close() + peerIncomingRoute := router.NewRoute("outgoing") + defer peerIncomingRoute.Close() + + txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) + txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}) + txIDs := []*externalapi.DomainTransactionID{txID1, txID2} + invMessage := appmessage.NewMsgInvTransaction(txIDs) + err = incomingRoute.Enqueue(invMessage) + if err != nil { + t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err) + } + // The goroutine is representing the peer's actions. + spawn("peerResponseToTheTransactionsRequest", func() { + msg, err := peerIncomingRoute.Dequeue() + if err != nil { + t.Fatalf("Dequeue: %v", err) + } + inv := msg.(*appmessage.MsgRequestTransactions) + + if len(txIDs) != len(inv.IDs) { + t.Fatalf("TestHandleRelayedTransactions: expected %d transactions ID, but got %d", len(txIDs), len(inv.IDs)) + } + + for i, id := range inv.IDs { + if txIDs[i].String() != id.String() { + t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String()) + } + err = incomingRoute.Enqueue(appmessage.NewMsgTransactionNotFound(txIDs[i])) + if err != nil { + t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err) + } + } + // Insert an unexpected message type to stop the infinity loop. + err = incomingRoute.Enqueue(&appmessage.MsgAddresses{}) + if err != nil { + t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err) + } + }) + + err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, peerIncomingRoute) + // Since we inserted an unexpected message type to stop the infinity loop, + // we expect the error will be infected from this specific message and also the + // error will count as a protocol message. + if protocolErr := (protocolerrors.ProtocolError{}); err == nil || !errors.As(err, &protocolErr) { + t.Fatalf("Expected to protocol error") + } else { + if !protocolErr.ShouldBan { + t.Fatalf("Exepcted shouldBan true, but got false.") + } + if !strings.Contains(err.Error(), "unexpected Addresses [code 3] message in the block relay flow while expecting an inv message") { + t.Fatalf("Unexpected error: expected: an error due to existence of an Addresses message "+ + "in the block relay flow, but got: %v", protocolErr.Cause) + } + } + }) +} + +// TestOnClosedIncomingRoute verifies that an appropriate error message will be returned when +// trying to dequeue a message from a closed route. +func TestOnClosedIncomingRoute(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOnClosedIncomingRoute") + if err != nil { + t.Fatalf("Error setting up test consensus: %+v", err) + } + defer teardown(false) + + sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() + adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) + if err != nil { + t.Fatalf("Failed to creat a NetAdapter : %v", err) + } + domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database()) + if err != nil { + t.Fatalf("Failed to set up a domain instance: %v", err) + } + context := &mocTransactionsRelayContext{ + netAdapter: adapter, + domain: domainInstance, + sharedRequestedTransactions: sharedRequestedTransactions, + } + incomingRoute := router.NewRoute("incoming") + outgoingRoute := router.NewRoute("outgoing") + defer outgoingRoute.Close() + + txID := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) + txIDs := []*externalapi.DomainTransactionID{txID} + + err = incomingRoute.Enqueue(&appmessage.MsgInvTransaction{TxIDs: txIDs}) + if err != nil { + t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err) + } + incomingRoute.Close() + err = transactionrelay.HandleRelayedTransactions(context, incomingRoute, outgoingRoute) + if err == nil || !errors.Is(err, router.ErrRouteClosed) { + t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err) + } + }) +} diff --git a/app/protocol/flows/v5/transactionrelay/handle_requested_transactions.go b/app/protocol/flows/v5/transactionrelay/handle_requested_transactions.go new file mode 100644 index 0000000..33d3028 --- /dev/null +++ b/app/protocol/flows/v5/transactionrelay/handle_requested_transactions.go @@ -0,0 +1,58 @@ +package transactionrelay + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type handleRequestedTransactionsFlow struct { + TransactionsRelayContext + incomingRoute, outgoingRoute *router.Route +} + +// HandleRequestedTransactions listens to appmessage.MsgRequestTransactions messages, responding with the requested +// transactions if those are in the mempool. +// Missing transactions would be ignored +func HandleRequestedTransactions(context TransactionsRelayContext, incomingRoute *router.Route, outgoingRoute *router.Route) error { + flow := &handleRequestedTransactionsFlow{ + TransactionsRelayContext: context, + incomingRoute: incomingRoute, + outgoingRoute: outgoingRoute, + } + return flow.start() +} + +func (flow *handleRequestedTransactionsFlow) start() error { + for { + msgRequestTransactions, err := flow.readRequestTransactions() + if err != nil { + return err + } + + for _, transactionID := range msgRequestTransactions.IDs { + tx, _, ok := flow.Domain().MiningManager().GetTransaction(transactionID, true, false) + + if !ok { + msgTransactionNotFound := appmessage.NewMsgTransactionNotFound(transactionID) + err := flow.outgoingRoute.Enqueue(msgTransactionNotFound) + if err != nil { + return err + } + continue + } + err := flow.outgoingRoute.Enqueue(appmessage.DomainTransactionToMsgTx(tx)) + if err != nil { + return err + } + } + } +} + +func (flow *handleRequestedTransactionsFlow) readRequestTransactions() (*appmessage.MsgRequestTransactions, error) { + msg, err := flow.incomingRoute.Dequeue() + if err != nil { + return nil, err + } + + return msg.(*appmessage.MsgRequestTransactions), nil +} diff --git a/app/protocol/flows/v5/transactionrelay/handle_requested_transactions_test.go b/app/protocol/flows/v5/transactionrelay/handle_requested_transactions_test.go new file mode 100644 index 0000000..b4b32d9 --- /dev/null +++ b/app/protocol/flows/v5/transactionrelay/handle_requested_transactions_test.go @@ -0,0 +1,92 @@ +package transactionrelay_test + +import ( + "testing" + + "github.com/spectre-project/spectred/app/protocol/flowcontext" + "github.com/spectre-project/spectred/app/protocol/flows/v5/transactionrelay" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util/panics" +) + +// TestHandleRequestedTransactionsNotFound tests the flow of HandleRequestedTransactions +// when the requested transactions don't found in the mempool. +func TestHandleRequestedTransactionsNotFound(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + var log = logger.RegisterSubSystem("PROT") + var spawn = panics.GoroutineWrapperFunc(log) + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleRequestedTransactionsNotFound") + if err != nil { + t.Fatalf("Error setting up test Consensus: %+v", err) + } + defer teardown(false) + + sharedRequestedTransactions := flowcontext.NewSharedRequestedTransactions() + adapter, err := netadapter.NewNetAdapter(config.DefaultConfig()) + if err != nil { + t.Fatalf("Failed to create a NetAdapter: %v", err) + } + domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), tc.Database()) + if err != nil { + t.Fatalf("Failed to set up a domain Instance: %v", err) + } + context := &mocTransactionsRelayContext{ + netAdapter: adapter, + domain: domainInstance, + sharedRequestedTransactions: sharedRequestedTransactions, + } + incomingRoute := router.NewRoute("incoming") + outgoingRoute := router.NewRoute("outgoing") + defer outgoingRoute.Close() + + txID1 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}) + txID2 := externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}) + txIDs := []*externalapi.DomainTransactionID{txID1, txID2} + msg := appmessage.NewMsgRequestTransactions(txIDs) + err = incomingRoute.Enqueue(msg) + if err != nil { + t.Fatalf("Unexpected error from incomingRoute.Enqueue: %v", err) + } + // The goroutine is representing the peer's actions. + spawn("peerResponseToTheTransactionsMessages", func() { + for i, id := range txIDs { + msg, err := outgoingRoute.Dequeue() + if err != nil { + t.Fatalf("Dequeue: %s", err) + } + outMsg := msg.(*appmessage.MsgTransactionNotFound) + if txIDs[i].String() != outMsg.ID.String() { + t.Fatalf("TestHandleRelayedTransactions: expected equal txID: expected %s, but got %s", txIDs[i].String(), id.String()) + } + } + // Closed the incomingRoute for stop the infinity loop. + incomingRoute.Close() + }) + + err = transactionrelay.HandleRequestedTransactions(context, incomingRoute, outgoingRoute) + // Make sure the error is due to the closed route. + if err == nil || !errors.Is(err, router.ErrRouteClosed) { + t.Fatalf("Unexpected error: expected: %v, got : %v", router.ErrRouteClosed, err) + } + }) +} diff --git a/app/protocol/log.go b/app/protocol/log.go new file mode 100644 index 0000000..21e53f3 --- /dev/null +++ b/app/protocol/log.go @@ -0,0 +1,9 @@ +package protocol + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("PROT") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/protocol/manager.go b/app/protocol/manager.go new file mode 100644 index 0000000..cf46a4f --- /dev/null +++ b/app/protocol/manager.go @@ -0,0 +1,112 @@ +package protocol + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/spectre-project/spectred/app/protocol/common" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/domain" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/app/protocol/flowcontext" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" +) + +// Manager manages the p2p protocol +type Manager struct { + context *flowcontext.FlowContext + routersWaitGroup sync.WaitGroup + isClosed uint32 +} + +// NewManager creates a new instance of the p2p protocol manager +func NewManager(cfg *config.Config, domain domain.Domain, netAdapter *netadapter.NetAdapter, addressManager *addressmanager.AddressManager, + connectionManager *connmanager.ConnectionManager) (*Manager, error) { + + manager := Manager{ + context: flowcontext.New(cfg, domain, addressManager, netAdapter, connectionManager), + } + + netAdapter.SetP2PRouterInitializer(manager.routerInitializer) + return &manager, nil +} + +// Close closes the protocol manager and waits until all p2p flows +// finish. +func (m *Manager) Close() { + if !atomic.CompareAndSwapUint32(&m.isClosed, 0, 1) { + panic(errors.New("The protocol manager was already closed")) + } + + atomic.StoreUint32(&m.isClosed, 1) + m.context.Close() + m.routersWaitGroup.Wait() +} + +// Peers returns the currently active peers +func (m *Manager) Peers() []*peerpkg.Peer { + return m.context.Peers() +} + +// IBDPeer returns the current IBD peer or null if the node is not +// in IBD +func (m *Manager) IBDPeer() *peerpkg.Peer { + return m.context.IBDPeer() +} + +// AddTransaction adds transaction to the mempool and propagates it. +func (m *Manager) AddTransaction(tx *externalapi.DomainTransaction, allowOrphan bool) error { + return m.context.AddTransaction(tx, allowOrphan) +} + +// AddBlock adds the given block to the DAG and propagates it. +func (m *Manager) AddBlock(block *externalapi.DomainBlock) error { + return m.context.AddBlock(block) +} + +// Context returns the manager's flow context +func (m *Manager) Context() *flowcontext.FlowContext { + return m.context +} + +func (m *Manager) runFlows(flows []*common.Flow, peer *peerpkg.Peer, errChan <-chan error, flowsWaitGroup *sync.WaitGroup) error { + flowsWaitGroup.Add(len(flows)) + for _, flow := range flows { + executeFunc := flow.ExecuteFunc // extract to new variable so that it's not overwritten + spawn(fmt.Sprintf("flow-%s", flow.Name), func() { + executeFunc(peer) + flowsWaitGroup.Done() + }) + } + + return <-errChan +} + +// SetOnNewBlockTemplateHandler sets the onNewBlockTemplate handler +func (m *Manager) SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler flowcontext.OnNewBlockTemplateHandler) { + m.context.SetOnNewBlockTemplateHandler(onNewBlockTemplateHandler) +} + +// SetOnPruningPointUTXOSetOverrideHandler sets the OnPruningPointUTXOSetOverride handler +func (m *Manager) SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler flowcontext.OnPruningPointUTXOSetOverrideHandler) { + m.context.SetOnPruningPointUTXOSetOverrideHandler(onPruningPointUTXOSetOverrideHandler) +} + +// SetOnTransactionAddedToMempoolHandler sets the onTransactionAddedToMempool handler +func (m *Manager) SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler flowcontext.OnTransactionAddedToMempoolHandler) { + m.context.SetOnTransactionAddedToMempoolHandler(onTransactionAddedToMempoolHandler) +} + +// IsIBDRunning returns true if IBD is currently marked as running +func (m *Manager) IsIBDRunning() bool { + return m.context.IsIBDRunning() +} diff --git a/app/protocol/peer/log.go b/app/protocol/peer/log.go new file mode 100644 index 0000000..08eae57 --- /dev/null +++ b/app/protocol/peer/log.go @@ -0,0 +1,7 @@ +package peer + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("PROT") diff --git a/app/protocol/peer/peer.go b/app/protocol/peer/peer.go new file mode 100644 index 0000000..11b275c --- /dev/null +++ b/app/protocol/peer/peer.go @@ -0,0 +1,153 @@ +package peer + +import ( + "sync" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + mathUtil "github.com/spectre-project/spectred/util/math" + "github.com/spectre-project/spectred/util/mstime" +) + +// Peer holds data about a peer. +type Peer struct { + connection *netadapter.NetConnection + + userAgent string + services appmessage.ServiceFlag + advertisedProtocolVerion uint32 // protocol version advertised by remote + protocolVersion uint32 // negotiated protocol version + disableRelayTx bool + subnetworkID *externalapi.DomainSubnetworkID + + timeOffset time.Duration + connectionStarted time.Time + + pingLock sync.RWMutex + lastPingNonce uint64 // The nonce of the last ping we sent + lastPingTime time.Time // Time we sent last ping + lastPingDuration time.Duration // Time for last ping to return + + ibdRequestChannel chan *externalapi.DomainBlock // A channel used to communicate IBD requests between flows +} + +// New returns a new Peer +func New(connection *netadapter.NetConnection) *Peer { + return &Peer{ + connection: connection, + connectionStarted: time.Now(), + ibdRequestChannel: make(chan *externalapi.DomainBlock), + } +} + +// Connection returns the NetConnection associated with this peer +func (p *Peer) Connection() *netadapter.NetConnection { + return p.connection +} + +// SubnetworkID returns the subnetwork the peer is associated with. +// It is nil in full nodes. +func (p *Peer) SubnetworkID() *externalapi.DomainSubnetworkID { + return p.subnetworkID +} + +// ID returns the peer ID. +func (p *Peer) ID() *id.ID { + return p.connection.ID() +} + +// TimeOffset returns the peer's time offset. +func (p *Peer) TimeOffset() time.Duration { + return p.timeOffset +} + +// UserAgent returns the peer's user agent. +func (p *Peer) UserAgent() string { + return p.userAgent +} + +// AdvertisedProtocolVersion returns the peer's advertised protocol version. +func (p *Peer) AdvertisedProtocolVersion() uint32 { + return p.advertisedProtocolVerion +} + +// ProtocolVersion returns the protocol version which is used when communicating with the peer. +func (p *Peer) ProtocolVersion() uint32 { + return p.protocolVersion +} + +// TimeConnected returns the time since the connection to this been has been started. +func (p *Peer) TimeConnected() time.Duration { + return time.Since(p.connectionStarted) +} + +// IsOutbound returns whether the peer is an outbound connection. +func (p *Peer) IsOutbound() bool { + return p.connection.IsOutbound() +} + +// UpdateFieldsFromMsgVersion updates the peer with the data from the version message. +func (p *Peer) UpdateFieldsFromMsgVersion(msg *appmessage.MsgVersion, maxProtocolVersion uint32) { + // Negotiate the protocol version. + p.advertisedProtocolVerion = msg.ProtocolVersion + p.protocolVersion = mathUtil.MinUint32(maxProtocolVersion, p.advertisedProtocolVerion) + log.Debugf("Negotiated protocol version %d for peer %s", + p.protocolVersion, p) + + // Set the supported services for the peer to what the remote peer + // advertised. + p.services = msg.Services + + // Set the remote peer's user agent. + p.userAgent = msg.UserAgent + + p.disableRelayTx = msg.DisableRelayTx + p.subnetworkID = msg.SubnetworkID + + p.timeOffset = mstime.Since(msg.Timestamp) +} + +// SetPingPending sets the ping state of the peer to 'pending' +func (p *Peer) SetPingPending(nonce uint64) { + p.pingLock.Lock() + defer p.pingLock.Unlock() + + p.lastPingNonce = nonce + p.lastPingTime = time.Now() +} + +// SetPingIdle sets the ping state of the peer to 'idle' +func (p *Peer) SetPingIdle() { + p.pingLock.Lock() + defer p.pingLock.Unlock() + + p.lastPingNonce = 0 + p.lastPingDuration = time.Since(p.lastPingTime) +} + +func (p *Peer) String() string { + return p.connection.String() +} + +// Address returns the address associated with this connection +func (p *Peer) Address() string { + return p.connection.Address() +} + +// LastPingDuration returns the duration of the last ping to +// this peer +func (p *Peer) LastPingDuration() time.Duration { + p.pingLock.Lock() + defer p.pingLock.Unlock() + + return p.lastPingDuration +} + +// IBDRequestChannel returns the channel used in order to communicate an IBD request between peer flows +func (p *Peer) IBDRequestChannel() chan *externalapi.DomainBlock { + return p.ibdRequestChannel +} diff --git a/app/protocol/protocol.go b/app/protocol/protocol.go new file mode 100644 index 0000000..10cdd1c --- /dev/null +++ b/app/protocol/protocol.go @@ -0,0 +1,231 @@ +package protocol + +import ( + "sync" + "sync/atomic" + + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/app/protocol/flows/ready" + v5 "github.com/spectre-project/spectred/app/protocol/flows/v5" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/flows/handshake" + peerpkg "github.com/spectre-project/spectred/app/protocol/peer" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +func (m *Manager) routerInitializer(router *routerpkg.Router, netConnection *netadapter.NetConnection) { + // isStopping flag is raised the moment that the connection associated with this router is disconnected + // errChan is used by the flow goroutines to return to runFlows when an error occurs. + // They are both initialized here and passed to register flows. + isStopping := uint32(0) + errChan := make(chan error, 1) + + receiveVersionRoute, sendVersionRoute, receiveReadyRoute := registerHandshakeRoutes(router) + + // After flows were registered - spawn a new thread that will wait for connection to finish initializing + // and start receiving messages + spawn("routerInitializer-runFlows", func() { + m.routersWaitGroup.Add(1) + defer m.routersWaitGroup.Done() + + if atomic.LoadUint32(&m.isClosed) == 1 { + panic(errors.Errorf("tried to initialize router when the protocol manager is closed")) + } + + isBanned, err := m.context.ConnectionManager().IsBanned(netConnection) + if err != nil && !errors.Is(err, addressmanager.ErrAddressNotFound) { + panic(err) + } + if isBanned { + log.Infof("Peer %s is banned. Disconnecting...", netConnection) + netConnection.Disconnect() + return + } + + netConnection.SetOnInvalidMessageHandler(func(err error) { + if atomic.AddUint32(&isStopping, 1) == 1 { + errChan <- protocolerrors.Wrap(true, err, "received bad message") + } + }) + + peer, err := handshake.HandleHandshake(m.context, netConnection, receiveVersionRoute, + sendVersionRoute, router.OutgoingRoute()) + + if err != nil { + // non-blocking read from channel + select { + case innerError := <-errChan: + if errors.Is(err, routerpkg.ErrRouteClosed) { + m.handleError(innerError, netConnection, router.OutgoingRoute()) + } else { + log.Errorf("Peer %s sent invalid message: %s", netConnection, innerError) + m.handleError(err, netConnection, router.OutgoingRoute()) + } + default: + m.handleError(err, netConnection, router.OutgoingRoute()) + } + return + } + defer m.context.RemoveFromPeers(peer) + + var flows []*common.Flow + log.Infof("Registering p2p flows for peer %s for protocol version %d", peer, peer.ProtocolVersion()) + switch peer.ProtocolVersion() { + case 5: + flows = v5.Register(m, router, errChan, &isStopping) + default: + panic(errors.Errorf("no way to handle protocol version %d", peer.ProtocolVersion())) + } + + err = ready.HandleReady(receiveReadyRoute, router.OutgoingRoute(), peer) + if err != nil { + m.handleError(err, netConnection, router.OutgoingRoute()) + return + } + + removeHandshakeRoutes(router) + + flowsWaitGroup := &sync.WaitGroup{} + err = m.runFlows(flows, peer, errChan, flowsWaitGroup) + if err != nil { + m.handleError(err, netConnection, router.OutgoingRoute()) + // We call `flowsWaitGroup.Wait()` in two places instead of deferring, because + // we already defer `m.routersWaitGroup.Done()`, so we try to avoid error prone + // and confusing use of multiple dependent defers. + flowsWaitGroup.Wait() + return + } + flowsWaitGroup.Wait() + }) +} + +func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection, outgoingRoute *routerpkg.Route) { + if protocolErr := (protocolerrors.ProtocolError{}); errors.As(err, &protocolErr) { + if m.context.Config().EnableBanning && protocolErr.ShouldBan { + log.Warnf("Banning %s (reason: %s)", netConnection, protocolErr.Cause) + + err := m.context.ConnectionManager().Ban(netConnection) + if err != nil && !errors.Is(err, connmanager.ErrCannotBanPermanent) { + panic(err) + } + + err = outgoingRoute.Enqueue(appmessage.NewMsgReject(protocolErr.Error())) + if err != nil && !errors.Is(err, routerpkg.ErrRouteClosed) { + panic(err) + } + } + log.Infof("Disconnecting from %s (reason: %s)", netConnection, protocolErr.Cause) + netConnection.Disconnect() + return + } + if errors.Is(err, routerpkg.ErrTimeout) { + log.Warnf("Got timeout from %s. Disconnecting...", netConnection) + netConnection.Disconnect() + return + } + if errors.Is(err, routerpkg.ErrRouteClosed) { + return + } + panic(err) +} + +// RegisterFlow registers a flow to the given router. +func (m *Manager) RegisterFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, isStopping *uint32, + errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow { + + route, err := router.AddIncomingRoute(name, messageTypes) + if err != nil { + panic(err) + } + + return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc) +} + +// RegisterFlowWithCapacity registers a flow to the given router with a custom capacity. +func (m *Manager) RegisterFlowWithCapacity(name string, capacity int, router *routerpkg.Router, + messageTypes []appmessage.MessageCommand, isStopping *uint32, + errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow { + + route, err := router.AddIncomingRouteWithCapacity(name, capacity, messageTypes) + if err != nil { + panic(err) + } + + return m.registerFlowForRoute(route, name, isStopping, errChan, initializeFunc) +} + +func (m *Manager) registerFlowForRoute(route *routerpkg.Route, name string, isStopping *uint32, + errChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow { + + return &common.Flow{ + Name: name, + ExecuteFunc: func(peer *peerpkg.Peer) { + err := initializeFunc(route, peer) + if err != nil { + m.context.HandleError(err, name, isStopping, errChan) + return + } + }, + } +} + +// RegisterOneTimeFlow registers a one-time flow (that exits once some operations are done) to the given router. +func (m *Manager) RegisterOneTimeFlow(name string, router *routerpkg.Router, messageTypes []appmessage.MessageCommand, + isStopping *uint32, stopChan chan error, initializeFunc common.FlowInitializeFunc) *common.Flow { + + route, err := router.AddIncomingRoute(name, messageTypes) + if err != nil { + panic(err) + } + + return &common.Flow{ + Name: name, + ExecuteFunc: func(peer *peerpkg.Peer) { + defer func() { + err := router.RemoveRoute(messageTypes) + if err != nil { + panic(err) + } + }() + + err := initializeFunc(route, peer) + if err != nil { + m.context.HandleError(err, name, isStopping, stopChan) + return + } + }, + } +} + +func registerHandshakeRoutes(router *routerpkg.Router) ( + receiveVersionRoute, sendVersionRoute, receiveReadyRoute *routerpkg.Route) { + receiveVersionRoute, err := router.AddIncomingRoute("recieveVersion - incoming", []appmessage.MessageCommand{appmessage.CmdVersion}) + if err != nil { + panic(err) + } + + sendVersionRoute, err = router.AddIncomingRoute("sendVersion - incoming", []appmessage.MessageCommand{appmessage.CmdVerAck}) + if err != nil { + panic(err) + } + + receiveReadyRoute, err = router.AddIncomingRoute("recieveReady - incoming", []appmessage.MessageCommand{appmessage.CmdReady}) + if err != nil { + panic(err) + } + + return receiveVersionRoute, sendVersionRoute, receiveReadyRoute +} + +func removeHandshakeRoutes(router *routerpkg.Router) { + err := router.RemoveRoute([]appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck, appmessage.CmdReady}) + if err != nil { + panic(err) + } +} diff --git a/app/protocol/protocolerrors/protocolerrors.go b/app/protocol/protocolerrors/protocolerrors.go new file mode 100644 index 0000000..b8954ff --- /dev/null +++ b/app/protocol/protocolerrors/protocolerrors.go @@ -0,0 +1,67 @@ +package protocolerrors + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +// ProtocolError is an error that signifies a violation +// of the peer-to-peer protocol +type ProtocolError struct { + ShouldBan bool + Cause error +} + +func (e ProtocolError) Error() string { + return e.Cause.Error() +} + +// Unwrap returns the cause of ProtocolError, to be used with `errors.Unwrap()` +func (e ProtocolError) Unwrap() error { + return e.Cause +} + +// Errorf formats according to a format specifier and returns the string +// as a ProtocolError. +func Errorf(shouldBan bool, format string, args ...interface{}) error { + return ProtocolError{ + ShouldBan: shouldBan, + Cause: errors.Errorf(format, args...), + } +} + +// New returns a ProtocolError with the supplied message. +// New also records the stack trace at the point it was called. +func New(shouldBan bool, message string) error { + return ProtocolError{ + ShouldBan: shouldBan, + Cause: errors.New(message), + } +} + +// Wrap wraps the given error and returns it as a ProtocolError. +func Wrap(shouldBan bool, err error, message string) error { + return ProtocolError{ + ShouldBan: shouldBan, + Cause: errors.Wrap(err, message), + } +} + +// Wrapf wraps the given error with the given format and returns it as a ProtocolError. +func Wrapf(shouldBan bool, err error, format string, args ...interface{}) error { + return ProtocolError{ + ShouldBan: shouldBan, + Cause: errors.Wrapf(err, format, args...), + } +} + +// ConvertToBanningProtocolErrorIfRuleError converts the given error to +// a banning protocol error if it's a rule error, and otherwise keep it +// as is. +func ConvertToBanningProtocolErrorIfRuleError(err error, format string, args ...interface{}) error { + if !errors.As(err, &ruleerrors.RuleError{}) { + return err + } + + return Wrapf(true, err, format, args...) +} diff --git a/app/rpc/log.go b/app/rpc/log.go new file mode 100644 index 0000000..16df723 --- /dev/null +++ b/app/rpc/log.go @@ -0,0 +1,9 @@ +package rpc + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("RPCS") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/rpc/manager.go b/app/rpc/manager.go new file mode 100644 index 0000000..a274d83 --- /dev/null +++ b/app/rpc/manager.go @@ -0,0 +1,238 @@ +package rpc + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/utxoindex" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" +) + +// Manager is an RPC manager +type Manager struct { + context *rpccontext.Context +} + +// NewManager creates a new RPC Manager +func NewManager( + cfg *config.Config, + domain domain.Domain, + netAdapter *netadapter.NetAdapter, + protocolManager *protocol.Manager, + connectionManager *connmanager.ConnectionManager, + addressManager *addressmanager.AddressManager, + utxoIndex *utxoindex.UTXOIndex, + consensusEventsChan chan externalapi.ConsensusEvent, + shutDownChan chan<- struct{}) *Manager { + + manager := Manager{ + context: rpccontext.NewContext( + cfg, + domain, + netAdapter, + protocolManager, + connectionManager, + addressManager, + utxoIndex, + shutDownChan, + ), + } + netAdapter.SetRPCRouterInitializer(manager.routerInitializer) + + manager.initConsensusEventsHandler(consensusEventsChan) + + return &manager +} + +func (m *Manager) initConsensusEventsHandler(consensusEventsChan chan externalapi.ConsensusEvent) { + spawn("consensusEventsHandler", func() { + for { + consensusEvent, ok := <-consensusEventsChan + if !ok { + return + } + switch event := consensusEvent.(type) { + case *externalapi.VirtualChangeSet: + err := m.notifyVirtualChange(event) + if err != nil { + panic(err) + } + case *externalapi.BlockAdded: + err := m.notifyBlockAddedToDAG(event.Block) + if err != nil { + panic(err) + } + default: + panic(errors.Errorf("Got event of unsupported type %T", consensusEvent)) + } + } + }) +} + +// notifyBlockAddedToDAG notifies the manager that a block has been added to the DAG +func (m *Manager) notifyBlockAddedToDAG(block *externalapi.DomainBlock) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyBlockAddedToDAG") + defer onEnd() + + // Before converting the block and populating it, we check if any listeners are interested. + // This is done since most nodes do not use this event. + if !m.context.NotificationManager.HasBlockAddedListeners() { + return nil + } + + rpcBlock := appmessage.DomainBlockToRPCBlock(block) + err := m.context.PopulateBlockWithVerboseData(rpcBlock, block.Header, block, true) + if err != nil { + return err + } + blockAddedNotification := appmessage.NewBlockAddedNotificationMessage(rpcBlock) + err = m.context.NotificationManager.NotifyBlockAdded(blockAddedNotification) + if err != nil { + return err + } + + return nil +} + +// notifyVirtualChange notifies the manager that the virtual block has been changed. +func (m *Manager) notifyVirtualChange(virtualChangeSet *externalapi.VirtualChangeSet) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualChange") + defer onEnd() + + if m.context.Config.UTXOIndex && virtualChangeSet.VirtualUTXODiff != nil { + err := m.notifyUTXOsChanged(virtualChangeSet) + if err != nil { + return err + } + } + + err := m.notifyVirtualSelectedParentBlueScoreChanged(virtualChangeSet.VirtualSelectedParentBlueScore) + if err != nil { + return err + } + + err = m.notifyVirtualDaaScoreChanged(virtualChangeSet.VirtualDAAScore) + if err != nil { + return err + } + + if virtualChangeSet.VirtualSelectedParentChainChanges == nil || + (len(virtualChangeSet.VirtualSelectedParentChainChanges.Added) == 0 && + len(virtualChangeSet.VirtualSelectedParentChainChanges.Removed) == 0) { + + return nil + } + + err = m.notifyVirtualSelectedParentChainChanged(virtualChangeSet) + if err != nil { + return err + } + + return nil +} + +// NotifyNewBlockTemplate notifies the manager that a new +// block template is available for miners +func (m *Manager) NotifyNewBlockTemplate() error { + notification := appmessage.NewNewBlockTemplateNotificationMessage() + return m.context.NotificationManager.NotifyNewBlockTemplate(notification) +} + +// NotifyPruningPointUTXOSetOverride notifies the manager whenever the UTXO index +// resets due to pruning point change via IBD. +func (m *Manager) NotifyPruningPointUTXOSetOverride() error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyPruningPointUTXOSetOverride") + defer onEnd() + + if m.context.Config.UTXOIndex { + err := m.notifyPruningPointUTXOSetOverride() + if err != nil { + return err + } + } + + return nil +} + +// NotifyFinalityConflict notifies the manager that there's a finality conflict in the DAG +func (m *Manager) NotifyFinalityConflict(violatingBlockHash string) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyFinalityConflict") + defer onEnd() + + notification := appmessage.NewFinalityConflictNotificationMessage(violatingBlockHash) + return m.context.NotificationManager.NotifyFinalityConflict(notification) +} + +// NotifyFinalityConflictResolved notifies the manager that a finality conflict in the DAG has been resolved +func (m *Manager) NotifyFinalityConflictResolved(finalityBlockHash string) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyFinalityConflictResolved") + defer onEnd() + + notification := appmessage.NewFinalityConflictResolvedNotificationMessage(finalityBlockHash) + return m.context.NotificationManager.NotifyFinalityConflictResolved(notification) +} + +func (m *Manager) notifyUTXOsChanged(virtualChangeSet *externalapi.VirtualChangeSet) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyUTXOsChanged") + defer onEnd() + + utxoIndexChanges, err := m.context.UTXOIndex.Update(virtualChangeSet) + if err != nil { + return err + } + + return m.context.NotificationManager.NotifyUTXOsChanged(utxoIndexChanges) +} + +func (m *Manager) notifyPruningPointUTXOSetOverride() error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.notifyPruningPointUTXOSetOverride") + defer onEnd() + + err := m.context.UTXOIndex.Reset() + if err != nil { + return err + } + + return m.context.NotificationManager.NotifyPruningPointUTXOSetOverride() +} + +func (m *Manager) notifyVirtualSelectedParentBlueScoreChanged(virtualSelectedParentBlueScore uint64) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentBlueScoreChanged") + defer onEnd() + + notification := appmessage.NewVirtualSelectedParentBlueScoreChangedNotificationMessage(virtualSelectedParentBlueScore) + return m.context.NotificationManager.NotifyVirtualSelectedParentBlueScoreChanged(notification) +} + +func (m *Manager) notifyVirtualDaaScoreChanged(virtualDAAScore uint64) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualDaaScoreChanged") + defer onEnd() + + notification := appmessage.NewVirtualDaaScoreChangedNotificationMessage(virtualDAAScore) + return m.context.NotificationManager.NotifyVirtualDaaScoreChanged(notification) +} + +func (m *Manager) notifyVirtualSelectedParentChainChanged(virtualChangeSet *externalapi.VirtualChangeSet) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "RPCManager.NotifyVirtualSelectedParentChainChanged") + defer onEnd() + + hasListeners, includeAcceptedTransactionIDs := m.context.NotificationManager.HasListenersThatPropagateVirtualSelectedParentChainChanged() + + if hasListeners { + notification, err := m.context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( + virtualChangeSet.VirtualSelectedParentChainChanges, includeAcceptedTransactionIDs) + if err != nil { + return err + } + return m.context.NotificationManager.NotifyVirtualSelectedParentChainChanged(notification) + } + + return nil +} diff --git a/app/rpc/rpc.go b/app/rpc/rpc.go new file mode 100644 index 0000000..76afee0 --- /dev/null +++ b/app/rpc/rpc.go @@ -0,0 +1,107 @@ +package rpc + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/app/rpc/rpchandlers" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type handler func(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error) + +var handlers = map[appmessage.MessageCommand]handler{ + appmessage.CmdGetCurrentNetworkRequestMessage: rpchandlers.HandleGetCurrentNetwork, + appmessage.CmdSubmitBlockRequestMessage: rpchandlers.HandleSubmitBlock, + appmessage.CmdGetBlockTemplateRequestMessage: rpchandlers.HandleGetBlockTemplate, + appmessage.CmdNotifyBlockAddedRequestMessage: rpchandlers.HandleNotifyBlockAdded, + appmessage.CmdGetPeerAddressesRequestMessage: rpchandlers.HandleGetPeerAddresses, + appmessage.CmdGetSelectedTipHashRequestMessage: rpchandlers.HandleGetSelectedTipHash, + appmessage.CmdGetMempoolEntryRequestMessage: rpchandlers.HandleGetMempoolEntry, + appmessage.CmdGetConnectedPeerInfoRequestMessage: rpchandlers.HandleGetConnectedPeerInfo, + appmessage.CmdAddPeerRequestMessage: rpchandlers.HandleAddPeer, + appmessage.CmdSubmitTransactionRequestMessage: rpchandlers.HandleSubmitTransaction, + appmessage.CmdNotifyVirtualSelectedParentChainChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentChainChanged, + appmessage.CmdGetBlockRequestMessage: rpchandlers.HandleGetBlock, + appmessage.CmdGetSubnetworkRequestMessage: rpchandlers.HandleGetSubnetwork, + appmessage.CmdGetVirtualSelectedParentChainFromBlockRequestMessage: rpchandlers.HandleGetVirtualSelectedParentChainFromBlock, + appmessage.CmdGetBlocksRequestMessage: rpchandlers.HandleGetBlocks, + appmessage.CmdGetBlockCountRequestMessage: rpchandlers.HandleGetBlockCount, + appmessage.CmdGetBalanceByAddressRequestMessage: rpchandlers.HandleGetBalanceByAddress, + appmessage.CmdGetBlockDAGInfoRequestMessage: rpchandlers.HandleGetBlockDAGInfo, + appmessage.CmdResolveFinalityConflictRequestMessage: rpchandlers.HandleResolveFinalityConflict, + appmessage.CmdNotifyFinalityConflictsRequestMessage: rpchandlers.HandleNotifyFinalityConflicts, + appmessage.CmdGetMempoolEntriesRequestMessage: rpchandlers.HandleGetMempoolEntries, + appmessage.CmdShutDownRequestMessage: rpchandlers.HandleShutDown, + appmessage.CmdGetHeadersRequestMessage: rpchandlers.HandleGetHeaders, + appmessage.CmdNotifyUTXOsChangedRequestMessage: rpchandlers.HandleNotifyUTXOsChanged, + appmessage.CmdStopNotifyingUTXOsChangedRequestMessage: rpchandlers.HandleStopNotifyingUTXOsChanged, + appmessage.CmdGetUTXOsByAddressesRequestMessage: rpchandlers.HandleGetUTXOsByAddresses, + appmessage.CmdGetBalancesByAddressesRequestMessage: rpchandlers.HandleGetBalancesByAddresses, + appmessage.CmdGetVirtualSelectedParentBlueScoreRequestMessage: rpchandlers.HandleGetVirtualSelectedParentBlueScore, + appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualSelectedParentBlueScoreChanged, + appmessage.CmdBanRequestMessage: rpchandlers.HandleBan, + appmessage.CmdUnbanRequestMessage: rpchandlers.HandleUnban, + appmessage.CmdGetInfoRequestMessage: rpchandlers.HandleGetInfo, + appmessage.CmdNotifyPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleNotifyPruningPointUTXOSetOverrideRequest, + appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideRequestMessage: rpchandlers.HandleStopNotifyingPruningPointUTXOSetOverrideRequest, + appmessage.CmdEstimateNetworkHashesPerSecondRequestMessage: rpchandlers.HandleEstimateNetworkHashesPerSecond, + appmessage.CmdNotifyVirtualDaaScoreChangedRequestMessage: rpchandlers.HandleNotifyVirtualDaaScoreChanged, + appmessage.CmdNotifyNewBlockTemplateRequestMessage: rpchandlers.HandleNotifyNewBlockTemplate, + appmessage.CmdGetCoinSupplyRequestMessage: rpchandlers.HandleGetCoinSupply, + appmessage.CmdGetMempoolEntriesByAddressesRequestMessage: rpchandlers.HandleGetMempoolEntriesByAddresses, +} + +func (m *Manager) routerInitializer(router *router.Router, netConnection *netadapter.NetConnection) { + messageTypes := make([]appmessage.MessageCommand, 0, len(handlers)) + for messageType := range handlers { + messageTypes = append(messageTypes, messageType) + } + incomingRoute, err := router.AddIncomingRoute("rpc router", messageTypes) + if err != nil { + panic(err) + } + m.context.NotificationManager.AddListener(router) + + spawn("routerInitializer-handleIncomingMessages", func() { + defer m.context.NotificationManager.RemoveListener(router) + + err := m.handleIncomingMessages(router, incomingRoute) + m.handleError(err, netConnection) + }) +} + +func (m *Manager) handleIncomingMessages(router *router.Router, incomingRoute *router.Route) error { + outgoingRoute := router.OutgoingRoute() + for { + request, err := incomingRoute.Dequeue() + if err != nil { + return err + } + handler, ok := handlers[request.Command()] + if !ok { + return err + } + response, err := handler(m.context, router, request) + if err != nil { + return err + } + err = outgoingRoute.Enqueue(response) + if err != nil { + return err + } + } +} + +func (m *Manager) handleError(err error, netConnection *netadapter.NetConnection) { + if errors.Is(err, router.ErrTimeout) { + log.Warnf("Got timeout from %s. Disconnecting...", netConnection) + netConnection.Disconnect() + return + } + if errors.Is(err, router.ErrRouteClosed) { + return + } + panic(err) +} diff --git a/app/rpc/rpccontext/chain_changed.go b/app/rpc/rpccontext/chain_changed.go new file mode 100644 index 0000000..6fb880d --- /dev/null +++ b/app/rpc/rpccontext/chain_changed.go @@ -0,0 +1,79 @@ +package rpccontext + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage converts +// VirtualSelectedParentChainChanges to VirtualSelectedParentChainChangedNotificationMessage +func (ctx *Context) ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( + selectedParentChainChanges *externalapi.SelectedChainPath, includeAcceptedTransactionIDs bool) ( + *appmessage.VirtualSelectedParentChainChangedNotificationMessage, error) { + + removedChainBlockHashes := make([]string, len(selectedParentChainChanges.Removed)) + for i, removed := range selectedParentChainChanges.Removed { + removedChainBlockHashes[i] = removed.String() + } + + addedChainBlocks := make([]string, len(selectedParentChainChanges.Added)) + for i, added := range selectedParentChainChanges.Added { + addedChainBlocks[i] = added.String() + } + + var acceptedTransactionIDs []*appmessage.AcceptedTransactionIDs + if includeAcceptedTransactionIDs { + var err error + acceptedTransactionIDs, err = ctx.getAndConvertAcceptedTransactionIDs(selectedParentChainChanges) + if err != nil { + return nil, err + } + } + + return appmessage.NewVirtualSelectedParentChainChangedNotificationMessage( + removedChainBlockHashes, addedChainBlocks, acceptedTransactionIDs), nil +} + +func (ctx *Context) getAndConvertAcceptedTransactionIDs(selectedParentChainChanges *externalapi.SelectedChainPath) ( + []*appmessage.AcceptedTransactionIDs, error) { + + acceptedTransactionIDs := make([]*appmessage.AcceptedTransactionIDs, len(selectedParentChainChanges.Added)) + + const chunk = 1000 + position := 0 + + for position < len(selectedParentChainChanges.Added) { + var chainBlocksChunk []*externalapi.DomainHash + if position+chunk > len(selectedParentChainChanges.Added) { + chainBlocksChunk = selectedParentChainChanges.Added[position:] + } else { + chainBlocksChunk = selectedParentChainChanges.Added[position : position+chunk] + } + // We use chunks in order to avoid blocking consensus for too long + chainBlocksAcceptanceData, err := ctx.Domain.Consensus().GetBlocksAcceptanceData(chainBlocksChunk) + if err != nil { + return nil, err + } + + for i, addedChainBlock := range chainBlocksChunk { + chainBlockAcceptanceData := chainBlocksAcceptanceData[i] + acceptedTransactionIDs[position+i] = &appmessage.AcceptedTransactionIDs{ + AcceptingBlockHash: addedChainBlock.String(), + AcceptedTransactionIDs: nil, + } + for _, blockAcceptanceData := range chainBlockAcceptanceData { + for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData { + if transactionAcceptanceData.IsAccepted { + acceptedTransactionIDs[position+i].AcceptedTransactionIDs = + append(acceptedTransactionIDs[position+i].AcceptedTransactionIDs, + consensushashing.TransactionID(transactionAcceptanceData.Transaction).String()) + } + } + } + } + position += chunk + } + + return acceptedTransactionIDs, nil +} diff --git a/app/rpc/rpccontext/context.go b/app/rpc/rpccontext/context.go new file mode 100644 index 0000000..ac4a9b7 --- /dev/null +++ b/app/rpc/rpccontext/context.go @@ -0,0 +1,50 @@ +package rpccontext + +import ( + "github.com/spectre-project/spectred/app/protocol" + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/utxoindex" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + "github.com/spectre-project/spectred/infrastructure/network/connmanager" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" +) + +// Context represents the RPC context +type Context struct { + Config *config.Config + NetAdapter *netadapter.NetAdapter + Domain domain.Domain + ProtocolManager *protocol.Manager + ConnectionManager *connmanager.ConnectionManager + AddressManager *addressmanager.AddressManager + UTXOIndex *utxoindex.UTXOIndex + ShutDownChan chan<- struct{} + + NotificationManager *NotificationManager +} + +// NewContext creates a new RPC context +func NewContext(cfg *config.Config, + domain domain.Domain, + netAdapter *netadapter.NetAdapter, + protocolManager *protocol.Manager, + connectionManager *connmanager.ConnectionManager, + addressManager *addressmanager.AddressManager, + utxoIndex *utxoindex.UTXOIndex, + shutDownChan chan<- struct{}) *Context { + + context := &Context{ + Config: cfg, + NetAdapter: netAdapter, + Domain: domain, + ProtocolManager: protocolManager, + ConnectionManager: connectionManager, + AddressManager: addressManager, + UTXOIndex: utxoIndex, + ShutDownChan: shutDownChan, + } + context.NotificationManager = NewNotificationManager(cfg.ActiveNetParams) + + return context +} diff --git a/app/rpc/rpccontext/log.go b/app/rpc/rpccontext/log.go new file mode 100644 index 0000000..902ff18 --- /dev/null +++ b/app/rpc/rpccontext/log.go @@ -0,0 +1,7 @@ +package rpccontext + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("RPCS") diff --git a/app/rpc/rpccontext/notificationmanager.go b/app/rpc/rpccontext/notificationmanager.go new file mode 100644 index 0000000..2c17efc --- /dev/null +++ b/app/rpc/rpccontext/notificationmanager.go @@ -0,0 +1,491 @@ +package rpccontext + +import ( + "sync" + + "github.com/spectre-project/spectred/domain/dagconfig" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/utxoindex" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// NotificationManager manages notifications for the RPC +type NotificationManager struct { + sync.RWMutex + listeners map[*routerpkg.Router]*NotificationListener + params *dagconfig.Params +} + +// UTXOsChangedNotificationAddress represents a spectred address. +// This type is meant to be used in UTXOsChanged notifications +type UTXOsChangedNotificationAddress struct { + Address string + ScriptPublicKeyString utxoindex.ScriptPublicKeyString +} + +// NotificationListener represents a registered RPC notification listener +type NotificationListener struct { + params *dagconfig.Params + + propagateBlockAddedNotifications bool + propagateVirtualSelectedParentChainChangedNotifications bool + propagateFinalityConflictNotifications bool + propagateFinalityConflictResolvedNotifications bool + propagateUTXOsChangedNotifications bool + propagateVirtualSelectedParentBlueScoreChangedNotifications bool + propagateVirtualDaaScoreChangedNotifications bool + propagatePruningPointUTXOSetOverrideNotifications bool + propagateNewBlockTemplateNotifications bool + + propagateUTXOsChangedNotificationAddresses map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress + includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications bool +} + +// NewNotificationManager creates a new NotificationManager +func NewNotificationManager(params *dagconfig.Params) *NotificationManager { + return &NotificationManager{ + params: params, + listeners: make(map[*routerpkg.Router]*NotificationListener), + } +} + +// AddListener registers a listener with the given router +func (nm *NotificationManager) AddListener(router *routerpkg.Router) { + nm.Lock() + defer nm.Unlock() + + listener := newNotificationListener(nm.params) + nm.listeners[router] = listener +} + +// RemoveListener unregisters the given router +func (nm *NotificationManager) RemoveListener(router *routerpkg.Router) { + nm.Lock() + defer nm.Unlock() + + delete(nm.listeners, router) +} + +// Listener retrieves the listener registered with the given router +func (nm *NotificationManager) Listener(router *routerpkg.Router) (*NotificationListener, error) { + nm.RLock() + defer nm.RUnlock() + + listener, ok := nm.listeners[router] + if !ok { + return nil, errors.Errorf("listener not found") + } + return listener, nil +} + +// HasBlockAddedListeners indicates if the notification manager has any listeners for `BlockAdded` events +func (nm *NotificationManager) HasBlockAddedListeners() bool { + nm.RLock() + defer nm.RUnlock() + + for _, listener := range nm.listeners { + if listener.propagateBlockAddedNotifications { + return true + } + } + return false +} + +// NotifyBlockAdded notifies the notification manager that a block has been added to the DAG +func (nm *NotificationManager) NotifyBlockAdded(notification *appmessage.BlockAddedNotificationMessage) error { + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateBlockAddedNotifications { + err := router.OutgoingRoute().MaybeEnqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyVirtualSelectedParentChainChanged notifies the notification manager that the DAG's selected parent chain has changed +func (nm *NotificationManager) NotifyVirtualSelectedParentChainChanged( + notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error { + + nm.RLock() + defer nm.RUnlock() + + notificationWithoutAcceptedTransactionIDs := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{ + RemovedChainBlockHashes: notification.RemovedChainBlockHashes, + AddedChainBlockHashes: notification.AddedChainBlockHashes, + } + + for router, listener := range nm.listeners { + if listener.propagateVirtualSelectedParentChainChangedNotifications { + var err error + + if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications { + err = router.OutgoingRoute().MaybeEnqueue(notification) + } else { + err = router.OutgoingRoute().MaybeEnqueue(notificationWithoutAcceptedTransactionIDs) + } + + if err != nil { + return err + } + } + } + return nil +} + +// HasListenersThatPropagateVirtualSelectedParentChainChanged returns whether there's any listener that is +// subscribed to VirtualSelectedParentChainChanged notifications as well as checks if any such listener requested +// to include AcceptedTransactionIDs. +func (nm *NotificationManager) HasListenersThatPropagateVirtualSelectedParentChainChanged() (hasListeners, hasListenersThatRequireAcceptedTransactionIDs bool) { + + nm.RLock() + defer nm.RUnlock() + + hasListeners = false + hasListenersThatRequireAcceptedTransactionIDs = false + + for _, listener := range nm.listeners { + if listener.propagateVirtualSelectedParentChainChangedNotifications { + hasListeners = true + // Generating acceptedTransactionIDs is a heavy operation, so we check if it's needed by any listener. + if listener.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications { + hasListenersThatRequireAcceptedTransactionIDs = true + break + } + } + } + + return hasListeners, hasListenersThatRequireAcceptedTransactionIDs +} + +// NotifyFinalityConflict notifies the notification manager that there's a finality conflict in the DAG +func (nm *NotificationManager) NotifyFinalityConflict(notification *appmessage.FinalityConflictNotificationMessage) error { + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateFinalityConflictNotifications { + err := router.OutgoingRoute().Enqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyFinalityConflictResolved notifies the notification manager that a finality conflict in the DAG has been resolved +func (nm *NotificationManager) NotifyFinalityConflictResolved(notification *appmessage.FinalityConflictResolvedNotificationMessage) error { + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateFinalityConflictResolvedNotifications { + err := router.OutgoingRoute().Enqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyUTXOsChanged notifies the notification manager that UTXOs have been changed +func (nm *NotificationManager) NotifyUTXOsChanged(utxoChanges *utxoindex.UTXOChanges) error { + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateUTXOsChangedNotifications { + // Filter utxoChanges and create a notification + notification, err := listener.convertUTXOChangesToUTXOsChangedNotification(utxoChanges) + if err != nil { + return err + } + + // Don't send the notification if it's empty + if len(notification.Added) == 0 && len(notification.Removed) == 0 { + continue + } + + // Enqueue the notification + err = router.OutgoingRoute().MaybeEnqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyVirtualSelectedParentBlueScoreChanged notifies the notification manager that the DAG's +// virtual selected parent blue score has changed +func (nm *NotificationManager) NotifyVirtualSelectedParentBlueScoreChanged( + notification *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage) error { + + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateVirtualSelectedParentBlueScoreChangedNotifications { + err := router.OutgoingRoute().MaybeEnqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyVirtualDaaScoreChanged notifies the notification manager that the DAG's +// virtual DAA score has changed +func (nm *NotificationManager) NotifyVirtualDaaScoreChanged( + notification *appmessage.VirtualDaaScoreChangedNotificationMessage) error { + + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateVirtualDaaScoreChangedNotifications { + err := router.OutgoingRoute().MaybeEnqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyNewBlockTemplate notifies the notification manager that a new +// block template is available for miners +func (nm *NotificationManager) NotifyNewBlockTemplate( + notification *appmessage.NewBlockTemplateNotificationMessage) error { + + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagateNewBlockTemplateNotifications { + err := router.OutgoingRoute().Enqueue(notification) + if err != nil { + return err + } + } + } + return nil +} + +// NotifyPruningPointUTXOSetOverride notifies the notification manager that the UTXO index +// reset due to pruning point change via IBD. +func (nm *NotificationManager) NotifyPruningPointUTXOSetOverride() error { + nm.RLock() + defer nm.RUnlock() + + for router, listener := range nm.listeners { + if listener.propagatePruningPointUTXOSetOverrideNotifications { + err := router.OutgoingRoute().Enqueue(appmessage.NewPruningPointUTXOSetOverrideNotificationMessage()) + if err != nil { + return err + } + } + } + return nil +} + +func newNotificationListener(params *dagconfig.Params) *NotificationListener { + return &NotificationListener{ + params: params, + + propagateBlockAddedNotifications: false, + propagateVirtualSelectedParentChainChangedNotifications: false, + propagateFinalityConflictNotifications: false, + propagateFinalityConflictResolvedNotifications: false, + propagateUTXOsChangedNotifications: false, + propagateVirtualSelectedParentBlueScoreChangedNotifications: false, + propagateNewBlockTemplateNotifications: false, + propagatePruningPointUTXOSetOverrideNotifications: false, + } +} + +// IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications returns true if this listener +// includes accepted transaction IDs in it's virtual-selected-parent-chain-changed notifications +func (nl *NotificationListener) IncludeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications() bool { + return nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications +} + +// PropagateBlockAddedNotifications instructs the listener to send block added notifications +// to the remote listener +func (nl *NotificationListener) PropagateBlockAddedNotifications() { + nl.propagateBlockAddedNotifications = true +} + +// PropagateVirtualSelectedParentChainChangedNotifications instructs the listener to send chain changed notifications +// to the remote listener +func (nl *NotificationListener) PropagateVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool) { + nl.propagateVirtualSelectedParentChainChangedNotifications = true + nl.includeAcceptedTransactionIDsInVirtualSelectedParentChainChangedNotifications = includeAcceptedTransactionIDs +} + +// PropagateFinalityConflictNotifications instructs the listener to send finality conflict notifications +// to the remote listener +func (nl *NotificationListener) PropagateFinalityConflictNotifications() { + nl.propagateFinalityConflictNotifications = true +} + +// PropagateFinalityConflictResolvedNotifications instructs the listener to send finality conflict resolved notifications +// to the remote listener +func (nl *NotificationListener) PropagateFinalityConflictResolvedNotifications() { + nl.propagateFinalityConflictResolvedNotifications = true +} + +// PropagateUTXOsChangedNotifications instructs the listener to send UTXOs changed notifications +// to the remote listener for the given addresses. Subsequent calls instruct the listener to +// send UTXOs changed notifications for those addresses along with the old ones. Duplicate addresses +// are ignored. +func (nm *NotificationManager) PropagateUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) { + // Apply a write-lock since the internal listener address map is modified + nm.Lock() + defer nm.Unlock() + + if !nl.propagateUTXOsChangedNotifications { + nl.propagateUTXOsChangedNotifications = true + nl.propagateUTXOsChangedNotificationAddresses = + make(map[utxoindex.ScriptPublicKeyString]*UTXOsChangedNotificationAddress, len(addresses)) + } + + for _, address := range addresses { + nl.propagateUTXOsChangedNotificationAddresses[address.ScriptPublicKeyString] = address + } +} + +// StopPropagatingUTXOsChangedNotifications instructs the listener to stop sending UTXOs +// changed notifications to the remote listener for the given addresses. Addresses for which +// notifications are not currently sent are ignored. +func (nm *NotificationManager) StopPropagatingUTXOsChangedNotifications(nl *NotificationListener, addresses []*UTXOsChangedNotificationAddress) { + // Apply a write-lock since the internal listener address map is modified + nm.Lock() + defer nm.Unlock() + + if !nl.propagateUTXOsChangedNotifications { + return + } + + for _, address := range addresses { + delete(nl.propagateUTXOsChangedNotificationAddresses, address.ScriptPublicKeyString) + } +} + +func (nl *NotificationListener) convertUTXOChangesToUTXOsChangedNotification( + utxoChanges *utxoindex.UTXOChanges) (*appmessage.UTXOsChangedNotificationMessage, error) { + + // As an optimization, we iterate over the smaller set (O(n)) among the two below + // and check existence over the larger set (O(1)) + utxoChangesSize := len(utxoChanges.Added) + len(utxoChanges.Removed) + addressesSize := len(nl.propagateUTXOsChangedNotificationAddresses) + + notification := &appmessage.UTXOsChangedNotificationMessage{} + if utxoChangesSize < addressesSize { + for scriptPublicKeyString, addedPairs := range utxoChanges.Added { + if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok { + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs) + notification.Added = append(notification.Added, utxosByAddressesEntries...) + } + } + for scriptPublicKeyString, removedPairs := range utxoChanges.Removed { + if listenerAddress, ok := nl.propagateUTXOsChangedNotificationAddresses[scriptPublicKeyString]; ok { + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs) + notification.Removed = append(notification.Removed, utxosByAddressesEntries...) + } + } + } else if addressesSize > 0 { + for _, listenerAddress := range nl.propagateUTXOsChangedNotificationAddresses { + listenerScriptPublicKeyString := listenerAddress.ScriptPublicKeyString + if addedPairs, ok := utxoChanges.Added[listenerScriptPublicKeyString]; ok { + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, addedPairs) + notification.Added = append(notification.Added, utxosByAddressesEntries...) + } + if removedPairs, ok := utxoChanges.Removed[listenerScriptPublicKeyString]; ok { + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(listenerAddress.Address, removedPairs) + notification.Removed = append(notification.Removed, utxosByAddressesEntries...) + } + } + } else { + for scriptPublicKeyString, addedPairs := range utxoChanges.Added { + addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString) + if err != nil { + return nil, err + } + + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, addedPairs) + notification.Added = append(notification.Added, utxosByAddressesEntries...) + } + for scriptPublicKeyString, removedPAirs := range utxoChanges.Removed { + addressString, err := nl.scriptPubKeyStringToAddressString(scriptPublicKeyString) + if err != nil { + return nil, err + } + + utxosByAddressesEntries := ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, removedPAirs) + notification.Removed = append(notification.Removed, utxosByAddressesEntries...) + } + } + + return notification, nil +} + +func (nl *NotificationListener) scriptPubKeyStringToAddressString(scriptPublicKeyString utxoindex.ScriptPublicKeyString) (string, error) { + scriptPubKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString)) + + // ignore error because it is often returned when the script is of unknown type + scriptType, address, err := txscript.ExtractScriptPubKeyAddress(scriptPubKey, nl.params) + if err != nil { + return "", err + } + + var addressString string + if scriptType == txscript.NonStandardTy { + addressString = "" + } else { + addressString = address.String() + } + return addressString, nil +} + +// PropagateVirtualSelectedParentBlueScoreChangedNotifications instructs the listener to send +// virtual selected parent blue score notifications to the remote listener +func (nl *NotificationListener) PropagateVirtualSelectedParentBlueScoreChangedNotifications() { + nl.propagateVirtualSelectedParentBlueScoreChangedNotifications = true +} + +// PropagateVirtualDaaScoreChangedNotifications instructs the listener to send +// virtual DAA score notifications to the remote listener +func (nl *NotificationListener) PropagateVirtualDaaScoreChangedNotifications() { + nl.propagateVirtualDaaScoreChangedNotifications = true +} + +// PropagateNewBlockTemplateNotifications instructs the listener to send +// new block template notifications to the remote listener +func (nl *NotificationListener) PropagateNewBlockTemplateNotifications() { + nl.propagateNewBlockTemplateNotifications = true +} + +// PropagatePruningPointUTXOSetOverrideNotifications instructs the listener to send pruning point UTXO set override notifications +// to the remote listener. +func (nl *NotificationListener) PropagatePruningPointUTXOSetOverrideNotifications() { + nl.propagatePruningPointUTXOSetOverrideNotifications = true +} + +// StopPropagatingPruningPointUTXOSetOverrideNotifications instructs the listener to stop sending pruning +// point UTXO set override notifications to the remote listener. +func (nl *NotificationListener) StopPropagatingPruningPointUTXOSetOverrideNotifications() { + nl.propagatePruningPointUTXOSetOverrideNotifications = false +} diff --git a/app/rpc/rpccontext/utxos_by_addresses.go b/app/rpc/rpccontext/utxos_by_addresses.go new file mode 100644 index 0000000..22e9081 --- /dev/null +++ b/app/rpc/rpccontext/utxos_by_addresses.go @@ -0,0 +1,58 @@ +package rpccontext + +import ( + "encoding/hex" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/utxoindex" +) + +// ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries converts +// UTXOOutpointEntryPairs to a slice of UTXOsByAddressesEntry +func ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(address string, pairs utxoindex.UTXOOutpointEntryPairs) []*appmessage.UTXOsByAddressesEntry { + utxosByAddressesEntries := make([]*appmessage.UTXOsByAddressesEntry, 0, len(pairs)) + for outpoint, utxoEntry := range pairs { + utxosByAddressesEntries = append(utxosByAddressesEntries, &appmessage.UTXOsByAddressesEntry{ + Address: address, + Outpoint: &appmessage.RPCOutpoint{ + TransactionID: outpoint.TransactionID.String(), + Index: outpoint.Index, + }, + UTXOEntry: &appmessage.RPCUTXOEntry{ + Amount: utxoEntry.Amount(), + ScriptPublicKey: &appmessage.RPCScriptPublicKey{Script: hex.EncodeToString(utxoEntry.ScriptPublicKey().Script), Version: utxoEntry.ScriptPublicKey().Version}, + BlockDAAScore: utxoEntry.BlockDAAScore(), + IsCoinbase: utxoEntry.IsCoinbase(), + }, + }) + } + return utxosByAddressesEntries +} + +// ConvertAddressStringsToUTXOsChangedNotificationAddresses converts address strings +// to UTXOsChangedNotificationAddresses +func (ctx *Context) ConvertAddressStringsToUTXOsChangedNotificationAddresses( + addressStrings []string) ([]*UTXOsChangedNotificationAddress, error) { + + addresses := make([]*UTXOsChangedNotificationAddress, len(addressStrings)) + for i, addressString := range addressStrings { + address, err := util.DecodeAddress(addressString, ctx.Config.ActiveNetParams.Prefix) + if err != nil { + return nil, errors.Errorf("Could not decode address '%s': %s", addressString, err) + } + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + return nil, errors.Errorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err) + } + scriptPublicKeyString := utxoindex.ScriptPublicKeyString(scriptPublicKey.String()) + addresses[i] = &UTXOsChangedNotificationAddress{ + Address: addressString, + ScriptPublicKeyString: scriptPublicKeyString, + } + } + return addresses, nil +} diff --git a/app/rpc/rpccontext/verbosedata.go b/app/rpc/rpccontext/verbosedata.go new file mode 100644 index 0000000..65a2e8d --- /dev/null +++ b/app/rpc/rpccontext/verbosedata.go @@ -0,0 +1,176 @@ +package rpccontext + +import ( + "encoding/hex" + "math" + "math/big" + + "github.com/pkg/errors" + difficultyPackage "github.com/spectre-project/spectred/util/difficulty" + + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +// ErrBuildBlockVerboseDataInvalidBlock indicates that a block that was given to BuildBlockVerboseData is invalid. +var ErrBuildBlockVerboseDataInvalidBlock = errors.New("ErrBuildBlockVerboseDataInvalidBlock") + +// GetDifficultyRatio returns the proof-of-work difficulty as a multiple of the +// minimum difficulty using the passed bits field from the header of a block. +func (ctx *Context) GetDifficultyRatio(bits uint32, params *dagconfig.Params) float64 { + // The minimum difficulty is the max possible proof-of-work limit bits + // converted back to a number. Note this is not the same as the proof of + // work limit directly because the block difficulty is encoded in a block + // with the compact form which loses precision. + target := difficultyPackage.CompactToBig(bits) + + difficulty := new(big.Rat).SetFrac(params.PowMax, target) + diff, _ := difficulty.Float64() + + roundingPrecision := float64(100) + diff = math.Round(diff*roundingPrecision) / roundingPrecision + + return diff +} + +// PopulateBlockWithVerboseData populates the given `block` with verbose +// data from `domainBlockHeader` and optionally from `domainBlock` +func (ctx *Context) PopulateBlockWithVerboseData(block *appmessage.RPCBlock, domainBlockHeader externalapi.BlockHeader, + domainBlock *externalapi.DomainBlock, includeTransactionVerboseData bool) error { + + blockHash := consensushashing.HeaderHash(domainBlockHeader) + + blockInfo, err := ctx.Domain.Consensus().GetBlockInfo(blockHash) + if err != nil { + return err + } + + if blockInfo.BlockStatus == externalapi.StatusInvalid { + return errors.Wrap(ErrBuildBlockVerboseDataInvalidBlock, "cannot build verbose data for "+ + "invalid block") + } + + _, childrenHashes, err := ctx.Domain.Consensus().GetBlockRelations(blockHash) + if err != nil { + return err + } + + isChainBlock, err := ctx.Domain.Consensus().IsChainBlock(blockHash) + if err != nil { + return err + } + + block.VerboseData = &appmessage.RPCBlockVerboseData{ + Hash: blockHash.String(), + Difficulty: ctx.GetDifficultyRatio(domainBlockHeader.Bits(), ctx.Config.ActiveNetParams), + ChildrenHashes: hashes.ToStrings(childrenHashes), + IsHeaderOnly: blockInfo.BlockStatus == externalapi.StatusHeaderOnly, + BlueScore: blockInfo.BlueScore, + MergeSetBluesHashes: hashes.ToStrings(blockInfo.MergeSetBlues), + MergeSetRedsHashes: hashes.ToStrings(blockInfo.MergeSetReds), + IsChainBlock: isChainBlock, + } + // selectedParentHash will be nil in the genesis block + if blockInfo.SelectedParent != nil { + block.VerboseData.SelectedParentHash = blockInfo.SelectedParent.String() + } + + if blockInfo.BlockStatus == externalapi.StatusHeaderOnly { + return nil + } + + // Get the block if we didn't receive it previously + if domainBlock == nil { + domainBlock, err = ctx.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash) + if err != nil { + return err + } + } + + transactionIDs := make([]string, len(domainBlock.Transactions)) + for i, transaction := range domainBlock.Transactions { + transactionIDs[i] = consensushashing.TransactionID(transaction).String() + } + block.VerboseData.TransactionIDs = transactionIDs + + if includeTransactionVerboseData { + for _, transaction := range block.Transactions { + err := ctx.PopulateTransactionWithVerboseData(transaction, domainBlockHeader) + if err != nil { + return err + } + } + } + + return nil +} + +// PopulateTransactionWithVerboseData populates the given `transaction` with +// verbose data from `domainTransaction` +func (ctx *Context) PopulateTransactionWithVerboseData( + transaction *appmessage.RPCTransaction, domainBlockHeader externalapi.BlockHeader) error { + + domainTransaction, err := appmessage.RPCTransactionToDomainTransaction(transaction) + if err != nil { + return err + } + + ctx.Domain.Consensus().PopulateMass(domainTransaction) + + transaction.VerboseData = &appmessage.RPCTransactionVerboseData{ + TransactionID: consensushashing.TransactionID(domainTransaction).String(), + Hash: consensushashing.TransactionHash(domainTransaction).String(), + Mass: domainTransaction.Mass, + } + if domainBlockHeader != nil { + transaction.VerboseData.BlockHash = consensushashing.HeaderHash(domainBlockHeader).String() + transaction.VerboseData.BlockTime = uint64(domainBlockHeader.TimeInMilliseconds()) + } + for _, input := range transaction.Inputs { + ctx.populateTransactionInputWithVerboseData(input) + } + for _, output := range transaction.Outputs { + err := ctx.populateTransactionOutputWithVerboseData(output) + if err != nil { + return err + } + } + return nil +} + +func (ctx *Context) populateTransactionInputWithVerboseData(transactionInput *appmessage.RPCTransactionInput) { + transactionInput.VerboseData = &appmessage.RPCTransactionInputVerboseData{} +} + +func (ctx *Context) populateTransactionOutputWithVerboseData(transactionOutput *appmessage.RPCTransactionOutput) error { + scriptPublicKey, err := hex.DecodeString(transactionOutput.ScriptPublicKey.Script) + if err != nil { + return err + } + domainScriptPublicKey := &externalapi.ScriptPublicKey{ + Script: scriptPublicKey, + Version: transactionOutput.ScriptPublicKey.Version, + } + + // Ignore the error here since an error means the script + // couldn't be parsed and there's no additional information about + // it anyways + scriptPublicKeyType, scriptPublicKeyAddress, _ := txscript.ExtractScriptPubKeyAddress( + domainScriptPublicKey, ctx.Config.ActiveNetParams) + + var encodedScriptPublicKeyAddress string + if scriptPublicKeyAddress != nil { + encodedScriptPublicKeyAddress = scriptPublicKeyAddress.EncodeAddress() + } + transactionOutput.VerboseData = &appmessage.RPCTransactionOutputVerboseData{ + ScriptPublicKeyType: scriptPublicKeyType.String(), + ScriptPublicKeyAddress: encodedScriptPublicKeyAddress, + } + return nil +} diff --git a/app/rpc/rpchandlers/add_peer.go b/app/rpc/rpchandlers/add_peer.go new file mode 100644 index 0000000..092f4ab --- /dev/null +++ b/app/rpc/rpchandlers/add_peer.go @@ -0,0 +1,32 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util/network" +) + +// HandleAddPeer handles the respectively named RPC command +func HandleAddPeer(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if context.Config.SafeRPC { + log.Warn("AddPeer RPC command called while node in safe RPC mode -- ignoring.") + response := appmessage.NewAddPeerResponseMessage() + response.Error = + appmessage.RPCErrorf("AddPeer RPC command called while node in safe RPC mode") + return response, nil + } + + AddPeerRequest := request.(*appmessage.AddPeerRequestMessage) + address, err := network.NormalizeAddress(AddPeerRequest.Address, context.Config.ActiveNetParams.DefaultPort) + if err != nil { + errorMessage := &appmessage.AddPeerResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not parse address: %s", err) + return errorMessage, nil + } + + context.ConnectionManager.AddConnectionRequest(address, AddPeerRequest.IsPermanent) + + response := appmessage.NewAddPeerResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/ban.go b/app/rpc/rpchandlers/ban.go new file mode 100644 index 0000000..27ba3d4 --- /dev/null +++ b/app/rpc/rpchandlers/ban.go @@ -0,0 +1,41 @@ +package rpchandlers + +import ( + "net" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleBan handles the respectively named RPC command +func HandleBan(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if context.Config.SafeRPC { + log.Warn("Ban RPC command called while node in safe RPC mode -- ignoring.") + response := appmessage.NewBanResponseMessage() + response.Error = + appmessage.RPCErrorf("Ban RPC command called while node in safe RPC mode") + return response, nil + } + + banRequest := request.(*appmessage.BanRequestMessage) + ip := net.ParseIP(banRequest.IP) + if ip == nil { + hint := "" + if banRequest.IP[0] == '[' { + hint = " (try to remove “[” and “]” symbols)" + } + errorMessage := &appmessage.BanResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not parse IP%s: %s", hint, banRequest.IP) + return errorMessage, nil + } + + err := context.ConnectionManager.BanByIP(ip) + if err != nil { + errorMessage := &appmessage.BanResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not ban IP: %s", err) + return errorMessage, nil + } + response := appmessage.NewBanResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/estimate_network_hashes_per_second.go b/app/rpc/rpchandlers/estimate_network_hashes_per_second.go new file mode 100644 index 0000000..95fe47f --- /dev/null +++ b/app/rpc/rpchandlers/estimate_network_hashes_per_second.go @@ -0,0 +1,60 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleEstimateNetworkHashesPerSecond handles the respectively named RPC command +func HandleEstimateNetworkHashesPerSecond( + context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + + estimateNetworkHashesPerSecondRequest := request.(*appmessage.EstimateNetworkHashesPerSecondRequestMessage) + + windowSize := int(estimateNetworkHashesPerSecondRequest.WindowSize) + startHash := model.VirtualBlockHash + if estimateNetworkHashesPerSecondRequest.StartHash != "" { + var err error + startHash, err = externalapi.NewDomainHashFromString(estimateNetworkHashesPerSecondRequest.StartHash) + if err != nil { + response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} + response.Error = appmessage.RPCErrorf("StartHash '%s' is not a valid block hash", + estimateNetworkHashesPerSecondRequest.StartHash) + return response, nil + } + } + + if context.Config.SafeRPC { + const windowSizeLimit = 10000 + if windowSize > windowSizeLimit { + response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} + response.Error = + appmessage.RPCErrorf( + "Requested window size %d is larger than max allowed in RPC safe mode (%d)", + windowSize, windowSizeLimit) + return response, nil + } + } + + if uint64(windowSize) > context.Config.ActiveNetParams.PruningDepth() { + response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} + response.Error = + appmessage.RPCErrorf( + "Requested window size %d is larger than pruning point depth %d", + windowSize, context.Config.ActiveNetParams.PruningDepth()) + return response, nil + } + + networkHashesPerSecond, err := context.Domain.Consensus().EstimateNetworkHashesPerSecond(startHash, windowSize) + if err != nil { + response := &appmessage.EstimateNetworkHashesPerSecondResponseMessage{} + response.Error = appmessage.RPCErrorf("could not resolve network hashes per "+ + "second for startHash %s and window size %d: %s", startHash, windowSize, err) + return response, nil + } + + return appmessage.NewEstimateNetworkHashesPerSecondResponseMessage(networkHashesPerSecond), nil +} diff --git a/app/rpc/rpchandlers/get_balance_by_address.go b/app/rpc/rpchandlers/get_balance_by_address.go new file mode 100644 index 0000000..9b6c3c7 --- /dev/null +++ b/app/rpc/rpchandlers/get_balance_by_address.go @@ -0,0 +1,57 @@ +package rpchandlers + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util" +) + +// HandleGetBalanceByAddress handles the respectively named RPC command +func HandleGetBalanceByAddress(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + getBalanceByAddressRequest := request.(*appmessage.GetBalanceByAddressRequestMessage) + + balance, err := getBalanceByAddress(context, getBalanceByAddressRequest.Address) + if err != nil { + rpcError := &appmessage.RPCError{} + if !errors.As(err, &rpcError) { + return nil, err + } + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = rpcError + return errorMessage, nil + } + + response := appmessage.NewGetBalanceByAddressResponse(balance) + return response, nil +} + +func getBalanceByAddress(context *rpccontext.Context, addressString string) (uint64, error) { + address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix) + if err != nil { + return 0, appmessage.RPCErrorf("Couldn't decode address '%s': %s", addressString, err) + } + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + return 0, appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err) + } + utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey) + if err != nil { + return 0, err + } + + balance := uint64(0) + for _, utxoOutpointEntryPair := range utxoOutpointEntryPairs { + balance += utxoOutpointEntryPair.Amount() + } + return balance, nil +} diff --git a/app/rpc/rpchandlers/get_balances_by_addresses.go b/app/rpc/rpchandlers/get_balances_by_addresses.go new file mode 100644 index 0000000..e9c7a16 --- /dev/null +++ b/app/rpc/rpchandlers/get_balances_by_addresses.go @@ -0,0 +1,41 @@ +package rpchandlers + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetBalancesByAddresses handles the respectively named RPC command +func HandleGetBalancesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := &appmessage.GetBalancesByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + getBalancesByAddressesRequest := request.(*appmessage.GetBalancesByAddressesRequestMessage) + + allEntries := make([]*appmessage.BalancesByAddressesEntry, len(getBalancesByAddressesRequest.Addresses)) + for i, address := range getBalancesByAddressesRequest.Addresses { + balance, err := getBalanceByAddress(context, address) + + if err != nil { + rpcError := &appmessage.RPCError{} + if !errors.As(err, &rpcError) { + return nil, err + } + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = rpcError + return errorMessage, nil + } + allEntries[i] = &appmessage.BalancesByAddressesEntry{ + Address: address, + Balance: balance, + } + } + + response := appmessage.NewGetBalancesByAddressesResponse(allEntries) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_block.go b/app/rpc/rpchandlers/get_block.go new file mode 100644 index 0000000..edf0d36 --- /dev/null +++ b/app/rpc/rpchandlers/get_block.go @@ -0,0 +1,49 @@ +package rpchandlers + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetBlock handles the respectively named RPC command +func HandleGetBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + getBlockRequest := request.(*appmessage.GetBlockRequestMessage) + + // Load the raw block bytes from the database. + hash, err := externalapi.NewDomainHashFromString(getBlockRequest.Hash) + if err != nil { + errorMessage := &appmessage.GetBlockResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Hash could not be parsed: %s", err) + return errorMessage, nil + } + + block, err := context.Domain.Consensus().GetBlockEvenIfHeaderOnly(hash) + if err != nil { + errorMessage := &appmessage.GetBlockResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Block %s not found", hash) + return errorMessage, nil + } + + response := appmessage.NewGetBlockResponseMessage() + + if getBlockRequest.IncludeTransactions { + response.Block = appmessage.DomainBlockToRPCBlock(block) + } else { + response.Block = appmessage.DomainBlockToRPCBlock(&externalapi.DomainBlock{Header: block.Header}) + } + + err = context.PopulateBlockWithVerboseData(response.Block, block.Header, block, getBlockRequest.IncludeTransactions) + if err != nil { + if errors.Is(err, rpccontext.ErrBuildBlockVerboseDataInvalidBlock) { + errorMessage := &appmessage.GetBlockResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Block %s is invalid", hash) + return errorMessage, nil + } + return nil, err + } + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_block_count.go b/app/rpc/rpchandlers/get_block_count.go new file mode 100644 index 0000000..7392685 --- /dev/null +++ b/app/rpc/rpchandlers/get_block_count.go @@ -0,0 +1,17 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetBlockCount handles the respectively named RPC command +func HandleGetBlockCount(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + syncInfo, err := context.Domain.Consensus().GetSyncInfo() + if err != nil { + return nil, err + } + response := appmessage.NewGetBlockCountResponseMessage(syncInfo) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_block_dag_info.go b/app/rpc/rpchandlers/get_block_dag_info.go new file mode 100644 index 0000000..e21bc5e --- /dev/null +++ b/app/rpc/rpchandlers/get_block_dag_info.go @@ -0,0 +1,47 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetBlockDAGInfo handles the respectively named RPC command +func HandleGetBlockDAGInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + params := context.Config.ActiveNetParams + consensus := context.Domain.Consensus() + + response := appmessage.NewGetBlockDAGInfoResponseMessage() + response.NetworkName = params.Name + + syncInfo, err := consensus.GetSyncInfo() + if err != nil { + return nil, err + } + response.BlockCount = syncInfo.BlockCount + response.HeaderCount = syncInfo.HeaderCount + + tipHashes, err := consensus.Tips() + if err != nil { + return nil, err + } + response.TipHashes = hashes.ToStrings(tipHashes) + + virtualInfo, err := consensus.GetVirtualInfo() + if err != nil { + return nil, err + } + response.VirtualParentHashes = hashes.ToStrings(virtualInfo.ParentHashes) + response.Difficulty = context.GetDifficultyRatio(virtualInfo.Bits, context.Config.ActiveNetParams) + response.PastMedianTime = virtualInfo.PastMedianTime + response.VirtualDAAScore = virtualInfo.DAAScore + + pruningPoint, err := context.Domain.Consensus().PruningPoint() + if err != nil { + return nil, err + } + response.PruningPointHash = pruningPoint.String() + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_block_template.go b/app/rpc/rpchandlers/get_block_template.go new file mode 100644 index 0000000..7cd0e44 --- /dev/null +++ b/app/rpc/rpchandlers/get_block_template.go @@ -0,0 +1,46 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/version" +) + +// HandleGetBlockTemplate handles the respectively named RPC command +func HandleGetBlockTemplate(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + getBlockTemplateRequest := request.(*appmessage.GetBlockTemplateRequestMessage) + + payAddress, err := util.DecodeAddress(getBlockTemplateRequest.PayAddress, context.Config.ActiveNetParams.Prefix) + if err != nil { + errorMessage := &appmessage.GetBlockTemplateResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not decode address: %s", err) + return errorMessage, nil + } + + scriptPublicKey, err := txscript.PayToAddrScript(payAddress) + if err != nil { + return nil, err + } + + coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version() + "/" + getBlockTemplateRequest.ExtraData)} + + templateBlock, isNearlySynced, err := context.Domain.MiningManager().GetBlockTemplate(coinbaseData) + if err != nil { + return nil, err + } + + if uint64(len(templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex].Payload)) > context.Config.NetParams().MaxCoinbasePayloadLength { + errorMessage := &appmessage.GetBlockTemplateResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Coinbase payload is above max length (%d). Try to shorten the extra data.", context.Config.NetParams().MaxCoinbasePayloadLength) + return errorMessage, nil + } + + rpcBlock := appmessage.DomainBlockToRPCBlock(templateBlock) + + return appmessage.NewGetBlockTemplateResponseMessage(rpcBlock, context.ProtocolManager.Context().HasPeers() && isNearlySynced), nil +} diff --git a/app/rpc/rpchandlers/get_blocks.go b/app/rpc/rpchandlers/get_blocks.go new file mode 100644 index 0000000..8ada914 --- /dev/null +++ b/app/rpc/rpchandlers/get_blocks.go @@ -0,0 +1,100 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetBlocks handles the respectively named RPC command +func HandleGetBlocks(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + getBlocksRequest := request.(*appmessage.GetBlocksRequestMessage) + + // Validate that user didn't set IncludeTransactions without setting IncludeBlocks + if !getBlocksRequest.IncludeBlocks && getBlocksRequest.IncludeTransactions { + return &appmessage.GetBlocksResponseMessage{ + Error: appmessage.RPCErrorf( + "If includeTransactions is set, then includeBlockVerboseData must be set as well"), + }, nil + } + + // Decode lowHash + // If lowHash is empty - use genesis instead. + lowHash := context.Config.ActiveNetParams.GenesisHash + if getBlocksRequest.LowHash != "" { + var err error + lowHash, err = externalapi.NewDomainHashFromString(getBlocksRequest.LowHash) + if err != nil { + return &appmessage.GetBlocksResponseMessage{ + Error: appmessage.RPCErrorf("Could not decode lowHash %s: %s", getBlocksRequest.LowHash, err), + }, nil + } + + blockInfo, err := context.Domain.Consensus().GetBlockInfo(lowHash) + if err != nil { + return nil, err + } + + if !blockInfo.HasHeader() { + return &appmessage.GetBlocksResponseMessage{ + Error: appmessage.RPCErrorf("Could not find lowHash %s", getBlocksRequest.LowHash), + }, nil + } + } + + // Get hashes between lowHash and virtualSelectedParent + virtualSelectedParent, err := context.Domain.Consensus().GetVirtualSelectedParent() + if err != nil { + return nil, err + } + + // We use +1 because lowHash is also returned + // maxBlocks MUST be >= MergeSetSizeLimit + 1 + maxBlocks := context.Config.NetParams().MergeSetSizeLimit + 1 + blockHashes, highHash, err := context.Domain.Consensus().GetHashesBetween(lowHash, virtualSelectedParent, maxBlocks) + if err != nil { + return nil, err + } + + // prepend low hash to make it inclusive + blockHashes = append([]*externalapi.DomainHash{lowHash}, blockHashes...) + + // If the high hash is equal to virtualSelectedParent it means GetHashesBetween didn't skip any hashes, and + // there's space to add the virtualSelectedParent's anticone, otherwise you can't add the anticone because + // there's no guarantee that all of the anticone root ancestors will be present. + if highHash.Equal(virtualSelectedParent) { + virtualSelectedParentAnticone, err := context.Domain.Consensus().Anticone(virtualSelectedParent) + if err != nil { + return nil, err + } + blockHashes = append(blockHashes, virtualSelectedParentAnticone...) + } + + // Prepare the response + response := appmessage.NewGetBlocksResponseMessage() + response.BlockHashes = hashes.ToStrings(blockHashes) + if getBlocksRequest.IncludeBlocks { + rpcBlocks := make([]*appmessage.RPCBlock, len(blockHashes)) + for i, blockHash := range blockHashes { + block, err := context.Domain.Consensus().GetBlockEvenIfHeaderOnly(blockHash) + if err != nil { + return nil, err + } + + if getBlocksRequest.IncludeTransactions { + rpcBlocks[i] = appmessage.DomainBlockToRPCBlock(block) + } else { + rpcBlocks[i] = appmessage.DomainBlockToRPCBlock(&externalapi.DomainBlock{Header: block.Header}) + } + err = context.PopulateBlockWithVerboseData(rpcBlocks[i], block.Header, nil, getBlocksRequest.IncludeTransactions) + if err != nil { + return nil, err + } + } + response.Blocks = rpcBlocks + } + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_blocks_test.go b/app/rpc/rpchandlers/get_blocks_test.go new file mode 100644 index 0000000..bc51845 --- /dev/null +++ b/app/rpc/rpchandlers/get_blocks_test.go @@ -0,0 +1,167 @@ +package rpchandlers_test + +import ( + "reflect" + "sort" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/app/rpc/rpchandlers" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/miningmanager" + "github.com/spectre-project/spectred/infrastructure/config" +) + +type fakeDomain struct { + testapi.TestConsensus +} + +func (d fakeDomain) ConsensusEventsChannel() chan externalapi.ConsensusEvent { + panic("implement me") +} + +func (d fakeDomain) DeleteStagingConsensus() error { + panic("implement me") +} + +func (d fakeDomain) StagingConsensus() externalapi.Consensus { + panic("implement me") +} + +func (d fakeDomain) InitStagingConsensusWithoutGenesis() error { + panic("implement me") +} + +func (d fakeDomain) CommitStagingConsensus() error { + panic("implement me") +} + +func (d fakeDomain) Consensus() externalapi.Consensus { return d } +func (d fakeDomain) MiningManager() miningmanager.MiningManager { return nil } + +func TestHandleGetBlocks(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleGetBlocks") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + fakeContext := rpccontext.Context{ + Config: &config.Config{Flags: &config.Flags{NetworkFlags: config.NetworkFlags{ActiveNetParams: &consensusConfig.Params}}}, + Domain: fakeDomain{tc}, + } + + getBlocks := func(lowHash *externalapi.DomainHash) *appmessage.GetBlocksResponseMessage { + request := appmessage.GetBlocksRequestMessage{} + if lowHash != nil { + request.LowHash = lowHash.String() + } + response, err := rpchandlers.HandleGetBlocks(&fakeContext, nil, &request) + if err != nil { + t.Fatalf("Expected empty request to not fail, instead: '%v'", err) + } + return response.(*appmessage.GetBlocksResponseMessage) + } + + filterAntiPast := func(povBlock *externalapi.DomainHash, slice []*externalapi.DomainHash) []*externalapi.DomainHash { + antipast := make([]*externalapi.DomainHash, 0, len(slice)) + + for _, blockHash := range slice { + isInPastOfPovBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, povBlock) + if err != nil { + t.Fatalf("Failed doing reachability check: '%v'", err) + } + if !isInPastOfPovBlock { + antipast = append(antipast, blockHash) + } + } + return antipast + } + + // Create a DAG with the following structure: + // merging block + // / | \ + // split1 split2 split3 + // \ | / + // merging block + // / | \ + // split1 split2 split3 + // \ | / + // etc. + expectedOrder := make([]*externalapi.DomainHash, 0, 40) + mergingBlock := consensusConfig.GenesisHash + for i := 0; i < 10; i++ { + splitBlocks := make([]*externalapi.DomainHash, 0, 3) + for j := 0; j < 3; j++ { + blockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{mergingBlock}, nil, nil) + if err != nil { + t.Fatalf("Failed adding block: %v", err) + } + splitBlocks = append(splitBlocks, blockHash) + } + sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(stagingArea, splitBlocks, tc, t))) + restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1] + expectedOrder = append(expectedOrder, selectedParent) + expectedOrder = append(expectedOrder, restOfSplitBlocks...) + + mergingBlock, _, err = tc.AddBlock(splitBlocks, nil, nil) + if err != nil { + t.Fatalf("Failed adding block: %v", err) + } + expectedOrder = append(expectedOrder, mergingBlock) + } + + virtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("Failed getting SelectedParent: %v", err) + } + if !virtualSelectedParent.Equal(expectedOrder[len(expectedOrder)-1]) { + t.Fatalf("Expected %s to be selectedParent, instead found: %s", expectedOrder[len(expectedOrder)-1], virtualSelectedParent) + } + + requestSelectedParent := getBlocks(virtualSelectedParent) + if !reflect.DeepEqual(requestSelectedParent.BlockHashes, hashes.ToStrings([]*externalapi.DomainHash{virtualSelectedParent})) { + t.Fatalf("TestHandleGetBlocks expected:\n%v\nactual:\n%v", virtualSelectedParent, requestSelectedParent.BlockHashes) + } + + for i, blockHash := range expectedOrder { + expectedBlocks := filterAntiPast(blockHash, expectedOrder) + expectedBlocks = append([]*externalapi.DomainHash{blockHash}, expectedBlocks...) + + actualBlocks := getBlocks(blockHash) + if !reflect.DeepEqual(actualBlocks.BlockHashes, hashes.ToStrings(expectedBlocks)) { + t.Fatalf("TestHandleGetBlocks %d \nexpected: \n%v\nactual:\n%v", i, + hashes.ToStrings(expectedBlocks), actualBlocks.BlockHashes) + } + } + + // Make explicitly sure that if lowHash==highHash we get a slice with a single hash. + actualBlocks := getBlocks(virtualSelectedParent) + if !reflect.DeepEqual(actualBlocks.BlockHashes, []string{virtualSelectedParent.String()}) { + t.Fatalf("TestHandleGetBlocks expected blocks to contain just '%s', instead got: \n%v", + virtualSelectedParent, actualBlocks.BlockHashes) + } + + expectedOrder = append([]*externalapi.DomainHash{consensusConfig.GenesisHash}, expectedOrder...) + actualOrder := getBlocks(nil) + if !reflect.DeepEqual(actualOrder.BlockHashes, hashes.ToStrings(expectedOrder)) { + t.Fatalf("TestHandleGetBlocks \nexpected: %v \nactual:\n%v", expectedOrder, actualOrder.BlockHashes) + } + + requestAllExplictly := getBlocks(consensusConfig.GenesisHash) + if !reflect.DeepEqual(requestAllExplictly.BlockHashes, hashes.ToStrings(expectedOrder)) { + t.Fatalf("TestHandleGetBlocks \nexpected: \n%v\n. actual:\n%v", expectedOrder, requestAllExplictly.BlockHashes) + } + }) +} diff --git a/app/rpc/rpchandlers/get_coin_supply.go b/app/rpc/rpchandlers/get_coin_supply.go new file mode 100644 index 0000000..5c8ff90 --- /dev/null +++ b/app/rpc/rpchandlers/get_coin_supply.go @@ -0,0 +1,29 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetCoinSupply handles the respectively named RPC command +func HandleGetCoinSupply(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := &appmessage.GetCoinSupplyResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + circulatingSompiSupply, err := context.UTXOIndex.GetCirculatingSompiSupply() + if err != nil { + return nil, err + } + + response := appmessage.NewGetCoinSupplyResponseMessage( + constants.MaxSompi, + circulatingSompiSupply, + ) + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_connected_peer_info.go b/app/rpc/rpchandlers/get_connected_peer_info.go new file mode 100644 index 0000000..057975c --- /dev/null +++ b/app/rpc/rpchandlers/get_connected_peer_info.go @@ -0,0 +1,30 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetConnectedPeerInfo handles the respectively named RPC command +func HandleGetConnectedPeerInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + peers := context.ProtocolManager.Peers() + ibdPeer := context.ProtocolManager.IBDPeer() + infos := make([]*appmessage.GetConnectedPeerInfoMessage, 0, len(peers)) + for _, peer := range peers { + info := &appmessage.GetConnectedPeerInfoMessage{ + ID: peer.ID().String(), + Address: peer.Address(), + LastPingDuration: peer.LastPingDuration().Milliseconds(), + IsOutbound: peer.IsOutbound(), + TimeOffset: peer.TimeOffset().Milliseconds(), + UserAgent: peer.UserAgent(), + AdvertisedProtocolVersion: peer.AdvertisedProtocolVersion(), + TimeConnected: peer.TimeConnected().Milliseconds(), + IsIBDPeer: peer == ibdPeer, + } + infos = append(infos, info) + } + response := appmessage.NewGetConnectedPeerInfoResponseMessage(infos) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_current_network.go b/app/rpc/rpchandlers/get_current_network.go new file mode 100644 index 0000000..5fb5f02 --- /dev/null +++ b/app/rpc/rpchandlers/get_current_network.go @@ -0,0 +1,13 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetCurrentNetwork handles the respectively named RPC command +func HandleGetCurrentNetwork(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + response := appmessage.NewGetCurrentNetworkResponseMessage(context.Config.ActiveNetParams.Net.String()) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_headers.go b/app/rpc/rpchandlers/get_headers.go new file mode 100644 index 0000000..9f8ad9a --- /dev/null +++ b/app/rpc/rpchandlers/get_headers.go @@ -0,0 +1,14 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetHeaders handles the respectively named RPC command +func HandleGetHeaders(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + response := &appmessage.GetHeadersResponseMessage{} + response.Error = appmessage.RPCErrorf("not implemented") + return response, nil +} diff --git a/app/rpc/rpchandlers/get_info.go b/app/rpc/rpchandlers/get_info.go new file mode 100644 index 0000000..dc009ab --- /dev/null +++ b/app/rpc/rpchandlers/get_info.go @@ -0,0 +1,26 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/version" +) + +// HandleGetInfo handles the respectively named RPC command +func HandleGetInfo(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + isNearlySynced, err := context.Domain.Consensus().IsNearlySynced() + if err != nil { + return nil, err + } + + response := appmessage.NewGetInfoResponseMessage( + context.NetAdapter.ID().String(), + uint64(context.Domain.MiningManager().TransactionCount(true, false)), + version.Version(), + context.Config.UTXOIndex, + context.ProtocolManager.Context().HasPeers() && isNearlySynced, + ) + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_mempool_entries.go b/app/rpc/rpchandlers/get_mempool_entries.go new file mode 100644 index 0000000..49f1e12 --- /dev/null +++ b/app/rpc/rpchandlers/get_mempool_entries.go @@ -0,0 +1,47 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetMempoolEntries handles the respectively named RPC command +func HandleGetMempoolEntries(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + getMempoolEntriesRequest := request.(*appmessage.GetMempoolEntriesRequestMessage) + + entries := make([]*appmessage.MempoolEntry, 0) + + transactionPoolTransactions, orphanPoolTransactions := context.Domain.MiningManager().AllTransactions(!getMempoolEntriesRequest.FilterTransactionPool, getMempoolEntriesRequest.IncludeOrphanPool) + + if !getMempoolEntriesRequest.FilterTransactionPool { + for _, transaction := range transactionPoolTransactions { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + entries = append(entries, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: false, + }) + } + } + if getMempoolEntriesRequest.IncludeOrphanPool { + for _, transaction := range orphanPoolTransactions { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + entries = append(entries, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: true, + }) + } + } + + return appmessage.NewGetMempoolEntriesResponseMessage(entries), nil +} diff --git a/app/rpc/rpchandlers/get_mempool_entries_by_addresses.go b/app/rpc/rpchandlers/get_mempool_entries_by_addresses.go new file mode 100644 index 0000000..816ddfd --- /dev/null +++ b/app/rpc/rpchandlers/get_mempool_entries_by_addresses.go @@ -0,0 +1,122 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util" +) + +// HandleGetMempoolEntriesByAddresses handles the respectively named RPC command +func HandleGetMempoolEntriesByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + + getMempoolEntriesByAddressesRequest := request.(*appmessage.GetMempoolEntriesByAddressesRequestMessage) + + mempoolEntriesByAddresses := make([]*appmessage.MempoolEntryByAddress, 0) + + sendingInTransactionPool, receivingInTransactionPool, sendingInOrphanPool, receivingInOrphanPool, err := context.Domain.MiningManager().GetTransactionsByAddresses(!getMempoolEntriesByAddressesRequest.FilterTransactionPool, getMempoolEntriesByAddressesRequest.IncludeOrphanPool) + if err != nil { + return nil, err + } + + for _, addressString := range getMempoolEntriesByAddressesRequest.Addresses { + + address, err := util.DecodeAddress(addressString, context.Config.NetParams().Prefix) + if err != nil { + errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err) + return errorMessage, nil + } + + sending := make([]*appmessage.MempoolEntry, 0) + receiving := make([]*appmessage.MempoolEntry, 0) + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + errorMessage := &appmessage.GetMempoolEntriesByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not extract scriptPublicKey from address '%s': %s", addressString, err) + return errorMessage, nil + } + + if !getMempoolEntriesByAddressesRequest.FilterTransactionPool { + + if transaction, found := sendingInTransactionPool[scriptPublicKey.String()]; found { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + + sending = append(sending, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: false, + }, + ) + } + + if transaction, found := receivingInTransactionPool[scriptPublicKey.String()]; found { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + + receiving = append(receiving, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: false, + }, + ) + } + } + if getMempoolEntriesByAddressesRequest.IncludeOrphanPool { + + if transaction, found := sendingInOrphanPool[scriptPublicKey.String()]; found { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + + sending = append(sending, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: true, + }, + ) + } + + if transaction, found := receivingInOrphanPool[scriptPublicKey.String()]; found { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + err := context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + + receiving = append(receiving, &appmessage.MempoolEntry{ + Fee: transaction.Fee, + Transaction: rpcTransaction, + IsOrphan: true, + }, + ) + } + + } + + if len(sending) > 0 || len(receiving) > 0 { + mempoolEntriesByAddresses = append( + mempoolEntriesByAddresses, + &appmessage.MempoolEntryByAddress{ + Address: address.String(), + Sending: sending, + Receiving: receiving, + }, + ) + } + } + + return appmessage.NewGetMempoolEntriesByAddressesResponseMessage(mempoolEntriesByAddresses), nil +} diff --git a/app/rpc/rpchandlers/get_mempool_entry.go b/app/rpc/rpchandlers/get_mempool_entry.go new file mode 100644 index 0000000..3034919 --- /dev/null +++ b/app/rpc/rpchandlers/get_mempool_entry.go @@ -0,0 +1,41 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetMempoolEntry handles the respectively named RPC command +func HandleGetMempoolEntry(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + + transaction := &externalapi.DomainTransaction{} + var found bool + var isOrphan bool + + getMempoolEntryRequest := request.(*appmessage.GetMempoolEntryRequestMessage) + + transactionID, err := transactionid.FromString(getMempoolEntryRequest.TxID) + if err != nil { + errorMessage := &appmessage.GetMempoolEntryResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Transaction ID could not be parsed: %s", err) + return errorMessage, nil + } + + mempoolTransaction, isOrphan, found := context.Domain.MiningManager().GetTransaction(transactionID, !getMempoolEntryRequest.FilterTransactionPool, getMempoolEntryRequest.IncludeOrphanPool) + + if !found { + errorMessage := &appmessage.GetMempoolEntryResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Transaction %s was not found", transactionID) + return errorMessage, nil + } + + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(mempoolTransaction) + err = context.PopulateTransactionWithVerboseData(rpcTransaction, nil) + if err != nil { + return nil, err + } + return appmessage.NewGetMempoolEntryResponseMessage(transaction.Fee, rpcTransaction, isOrphan), nil +} diff --git a/app/rpc/rpchandlers/get_peer_addresses.go b/app/rpc/rpchandlers/get_peer_addresses.go new file mode 100644 index 0000000..67df9e3 --- /dev/null +++ b/app/rpc/rpchandlers/get_peer_addresses.go @@ -0,0 +1,30 @@ +package rpchandlers + +import ( + "net" + "strconv" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetPeerAddresses handles the respectively named RPC command +func HandleGetPeerAddresses(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + netAddresses := context.AddressManager.Addresses() + addressMessages := make([]*appmessage.GetPeerAddressesKnownAddressMessage, len(netAddresses)) + for i, netAddress := range netAddresses { + addressWithPort := net.JoinHostPort(netAddress.IP.String(), strconv.FormatUint(uint64(netAddress.Port), 10)) + addressMessages[i] = &appmessage.GetPeerAddressesKnownAddressMessage{Addr: addressWithPort} + } + + bannedAddresses := context.AddressManager.BannedAddresses() + bannedAddressMessages := make([]*appmessage.GetPeerAddressesKnownAddressMessage, len(bannedAddresses)) + for i, netAddress := range bannedAddresses { + addressWithPort := net.JoinHostPort(netAddress.IP.String(), strconv.FormatUint(uint64(netAddress.Port), 10)) + bannedAddressMessages[i] = &appmessage.GetPeerAddressesKnownAddressMessage{Addr: addressWithPort} + } + + response := appmessage.NewGetPeerAddressesResponseMessage(addressMessages, bannedAddressMessages) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_selected_tip_hash.go b/app/rpc/rpchandlers/get_selected_tip_hash.go new file mode 100644 index 0000000..cdd5a57 --- /dev/null +++ b/app/rpc/rpchandlers/get_selected_tip_hash.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetSelectedTipHash handles the respectively named RPC command +func HandleGetSelectedTipHash(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + selectedTip, err := context.Domain.Consensus().GetVirtualSelectedParent() + if err != nil { + return nil, err + } + + response := appmessage.NewGetSelectedTipHashResponseMessage(selectedTip.String()) + + return response, nil +} diff --git a/app/rpc/rpchandlers/get_subnetwork.go b/app/rpc/rpchandlers/get_subnetwork.go new file mode 100644 index 0000000..365672e --- /dev/null +++ b/app/rpc/rpchandlers/get_subnetwork.go @@ -0,0 +1,14 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetSubnetwork handles the respectively named RPC command +func HandleGetSubnetwork(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + response := &appmessage.GetSubnetworkResponseMessage{} + response.Error = appmessage.RPCErrorf("not implemented") + return response, nil +} diff --git a/app/rpc/rpchandlers/get_utxos_by_addresses.go b/app/rpc/rpchandlers/get_utxos_by_addresses.go new file mode 100644 index 0000000..0a6faad --- /dev/null +++ b/app/rpc/rpchandlers/get_utxos_by_addresses.go @@ -0,0 +1,45 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util" +) + +// HandleGetUTXOsByAddresses handles the respectively named RPC command +func HandleGetUTXOsByAddresses(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + getUTXOsByAddressesRequest := request.(*appmessage.GetUTXOsByAddressesRequestMessage) + + allEntries := make([]*appmessage.UTXOsByAddressesEntry, 0) + for _, addressString := range getUTXOsByAddressesRequest.Addresses { + address, err := util.DecodeAddress(addressString, context.Config.ActiveNetParams.Prefix) + if err != nil { + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not decode address '%s': %s", addressString, err) + return errorMessage, nil + } + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + errorMessage := &appmessage.GetUTXOsByAddressesResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not create a scriptPublicKey for address '%s': %s", addressString, err) + return errorMessage, nil + } + utxoOutpointEntryPairs, err := context.UTXOIndex.UTXOs(scriptPublicKey) + if err != nil { + return nil, err + } + entries := rpccontext.ConvertUTXOOutpointEntryPairsToUTXOsByAddressesEntries(addressString, utxoOutpointEntryPairs) + allEntries = append(allEntries, entries...) + } + + response := appmessage.NewGetUTXOsByAddressesResponseMessage(allEntries) + return response, nil +} diff --git a/app/rpc/rpchandlers/get_virtual_selected_parent_blue_score.go b/app/rpc/rpchandlers/get_virtual_selected_parent_blue_score.go new file mode 100644 index 0000000..d3678b6 --- /dev/null +++ b/app/rpc/rpchandlers/get_virtual_selected_parent_blue_score.go @@ -0,0 +1,21 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetVirtualSelectedParentBlueScore handles the respectively named RPC command +func HandleGetVirtualSelectedParentBlueScore(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + c := context.Domain.Consensus() + selectedParent, err := c.GetVirtualSelectedParent() + if err != nil { + return nil, err + } + blockInfo, err := c.GetBlockInfo(selectedParent) + if err != nil { + return nil, err + } + return appmessage.NewGetVirtualSelectedParentBlueScoreResponseMessage(blockInfo.BlueScore), nil +} diff --git a/app/rpc/rpchandlers/get_virtual_selected_parent_chain_from_block.go b/app/rpc/rpchandlers/get_virtual_selected_parent_chain_from_block.go new file mode 100644 index 0000000..befbddb --- /dev/null +++ b/app/rpc/rpchandlers/get_virtual_selected_parent_chain_from_block.go @@ -0,0 +1,39 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleGetVirtualSelectedParentChainFromBlock handles the respectively named RPC command +func HandleGetVirtualSelectedParentChainFromBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + getVirtualSelectedParentChainFromBlockRequest := request.(*appmessage.GetVirtualSelectedParentChainFromBlockRequestMessage) + + startHash, err := externalapi.NewDomainHashFromString(getVirtualSelectedParentChainFromBlockRequest.StartHash) + if err != nil { + errorMessage := &appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not parse startHash: %s", err) + return errorMessage, nil + } + + virtualSelectedParentChain, err := context.Domain.Consensus().GetVirtualSelectedParentChainFromBlock(startHash) + if err != nil { + response := &appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage{} + response.Error = appmessage.RPCErrorf("Could not build virtual "+ + "selected parent chain from %s: %s", getVirtualSelectedParentChainFromBlockRequest.StartHash, err) + return response, nil + } + + chainChangedNotification, err := context.ConvertVirtualSelectedParentChainChangesToChainChangedNotificationMessage( + virtualSelectedParentChain, getVirtualSelectedParentChainFromBlockRequest.IncludeAcceptedTransactionIDs) + if err != nil { + return nil, err + } + + response := appmessage.NewGetVirtualSelectedParentChainFromBlockResponseMessage( + chainChangedNotification.RemovedChainBlockHashes, chainChangedNotification.AddedChainBlockHashes, + chainChangedNotification.AcceptedTransactionIDs) + return response, nil +} diff --git a/app/rpc/rpchandlers/log.go b/app/rpc/rpchandlers/log.go new file mode 100644 index 0000000..035fbb1 --- /dev/null +++ b/app/rpc/rpchandlers/log.go @@ -0,0 +1,9 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("RPCS") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/app/rpc/rpchandlers/notify_block_added.go b/app/rpc/rpchandlers/notify_block_added.go new file mode 100644 index 0000000..a44cb66 --- /dev/null +++ b/app/rpc/rpchandlers/notify_block_added.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyBlockAdded handles the respectively named RPC command +func HandleNotifyBlockAdded(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateBlockAddedNotifications() + + response := appmessage.NewNotifyBlockAddedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_finality_conflicts.go b/app/rpc/rpchandlers/notify_finality_conflicts.go new file mode 100644 index 0000000..6242fbb --- /dev/null +++ b/app/rpc/rpchandlers/notify_finality_conflicts.go @@ -0,0 +1,20 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyFinalityConflicts handles the respectively named RPC command +func HandleNotifyFinalityConflicts(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateFinalityConflictNotifications() + listener.PropagateFinalityConflictResolvedNotifications() + + response := appmessage.NewNotifyFinalityConflictsResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_new_block_template.go b/app/rpc/rpchandlers/notify_new_block_template.go new file mode 100644 index 0000000..f5286a3 --- /dev/null +++ b/app/rpc/rpchandlers/notify_new_block_template.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyNewBlockTemplate handles the respectively named RPC command +func HandleNotifyNewBlockTemplate(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateNewBlockTemplateNotifications() + + response := appmessage.NewNotifyNewBlockTemplateResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_pruning_point_utxo_set_overrides.go b/app/rpc/rpchandlers/notify_pruning_point_utxo_set_overrides.go new file mode 100644 index 0000000..dc15e2d --- /dev/null +++ b/app/rpc/rpchandlers/notify_pruning_point_utxo_set_overrides.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyPruningPointUTXOSetOverrideRequest handles the respectively named RPC command +func HandleNotifyPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagatePruningPointUTXOSetOverrideNotifications() + + response := appmessage.NewNotifyPruningPointUTXOSetOverrideResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_utxos_changed.go b/app/rpc/rpchandlers/notify_utxos_changed.go new file mode 100644 index 0000000..3730a23 --- /dev/null +++ b/app/rpc/rpchandlers/notify_utxos_changed.go @@ -0,0 +1,33 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyUTXOsChanged handles the respectively named RPC command +func HandleNotifyUTXOsChanged(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage() + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + notifyUTXOsChangedRequest := request.(*appmessage.NotifyUTXOsChangedRequestMessage) + addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(notifyUTXOsChangedRequest.Addresses) + if err != nil { + errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage() + errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err) + return errorMessage, nil + } + + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + context.NotificationManager.PropagateUTXOsChangedNotifications(listener, addresses) + + response := appmessage.NewNotifyUTXOsChangedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_virtual_daa_score_changed.go b/app/rpc/rpchandlers/notify_virtual_daa_score_changed.go new file mode 100644 index 0000000..16db095 --- /dev/null +++ b/app/rpc/rpchandlers/notify_virtual_daa_score_changed.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyVirtualDaaScoreChanged handles the respectively named RPC command +func HandleNotifyVirtualDaaScoreChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateVirtualDaaScoreChangedNotifications() + + response := appmessage.NewNotifyVirtualDaaScoreChangedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_virtual_selected_parent_blue_score_changed.go b/app/rpc/rpchandlers/notify_virtual_selected_parent_blue_score_changed.go new file mode 100644 index 0000000..66d3062 --- /dev/null +++ b/app/rpc/rpchandlers/notify_virtual_selected_parent_blue_score_changed.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyVirtualSelectedParentBlueScoreChanged handles the respectively named RPC command +func HandleNotifyVirtualSelectedParentBlueScoreChanged(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateVirtualSelectedParentBlueScoreChangedNotifications() + + response := appmessage.NewNotifyVirtualSelectedParentBlueScoreChangedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/notify_virtual_selected_parent_chain_changed.go b/app/rpc/rpchandlers/notify_virtual_selected_parent_chain_changed.go new file mode 100644 index 0000000..68327b0 --- /dev/null +++ b/app/rpc/rpchandlers/notify_virtual_selected_parent_chain_changed.go @@ -0,0 +1,24 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleNotifyVirtualSelectedParentChainChanged handles the respectively named RPC command +func HandleNotifyVirtualSelectedParentChainChanged(context *rpccontext.Context, router *router.Router, + request appmessage.Message) (appmessage.Message, error) { + + notifyVirtualSelectedParentChainChangedRequest := request.(*appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage) + + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.PropagateVirtualSelectedParentChainChangedNotifications( + notifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIDs) + + response := appmessage.NewNotifyVirtualSelectedParentChainChangedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/resolve_finality_conflict.go b/app/rpc/rpchandlers/resolve_finality_conflict.go new file mode 100644 index 0000000..48c0a66 --- /dev/null +++ b/app/rpc/rpchandlers/resolve_finality_conflict.go @@ -0,0 +1,22 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleResolveFinalityConflict handles the respectively named RPC command +func HandleResolveFinalityConflict(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if context.Config.SafeRPC { + log.Warn("ResolveFinalityConflict RPC command called while node in safe RPC mode -- ignoring.") + response := &appmessage.ResolveFinalityConflictResponseMessage{} + response.Error = + appmessage.RPCErrorf("ResolveFinalityConflict RPC command called while node in safe RPC mode") + return response, nil + } + + response := &appmessage.ResolveFinalityConflictResponseMessage{} + response.Error = appmessage.RPCErrorf("not implemented") + return response, nil +} diff --git a/app/rpc/rpchandlers/shut_down.go b/app/rpc/rpchandlers/shut_down.go new file mode 100644 index 0000000..3b46116 --- /dev/null +++ b/app/rpc/rpchandlers/shut_down.go @@ -0,0 +1,33 @@ +package rpchandlers + +import ( + "time" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +const pauseBeforeShutDown = time.Second + +// HandleShutDown handles the respectively named RPC command +func HandleShutDown(context *rpccontext.Context, _ *router.Router, _ appmessage.Message) (appmessage.Message, error) { + if context.Config.SafeRPC { + log.Warn("ShutDown RPC command called while node in safe RPC mode -- ignoring.") + response := appmessage.NewShutDownResponseMessage() + response.Error = + appmessage.RPCErrorf("ShutDown RPC command called while node in safe RPC mode") + return response, nil + } + + log.Warn("ShutDown RPC called.") + + // Wait a second before shutting down, to allow time to return the response to the caller + spawn("HandleShutDown-pauseAndShutDown", func() { + <-time.After(pauseBeforeShutDown) + close(context.ShutDownChan) + }) + + response := appmessage.NewShutDownResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/stop_notifying_pruning_point_utxo_set_overrides.go b/app/rpc/rpchandlers/stop_notifying_pruning_point_utxo_set_overrides.go new file mode 100644 index 0000000..e9243a9 --- /dev/null +++ b/app/rpc/rpchandlers/stop_notifying_pruning_point_utxo_set_overrides.go @@ -0,0 +1,19 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleStopNotifyingPruningPointUTXOSetOverrideRequest handles the respectively named RPC command +func HandleStopNotifyingPruningPointUTXOSetOverrideRequest(context *rpccontext.Context, router *router.Router, _ appmessage.Message) (appmessage.Message, error) { + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + listener.StopPropagatingPruningPointUTXOSetOverrideNotifications() + + response := appmessage.NewStopNotifyingPruningPointUTXOSetOverrideResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/stop_notifying_utxos_changed.go b/app/rpc/rpchandlers/stop_notifying_utxos_changed.go new file mode 100644 index 0000000..5ee18ea --- /dev/null +++ b/app/rpc/rpchandlers/stop_notifying_utxos_changed.go @@ -0,0 +1,33 @@ +package rpchandlers + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleStopNotifyingUTXOsChanged handles the respectively named RPC command +func HandleStopNotifyingUTXOsChanged(context *rpccontext.Context, router *router.Router, request appmessage.Message) (appmessage.Message, error) { + if !context.Config.UTXOIndex { + errorMessage := appmessage.NewStopNotifyingUTXOsChangedResponseMessage() + errorMessage.Error = appmessage.RPCErrorf("Method unavailable when spectred is run without --utxoindex") + return errorMessage, nil + } + + stopNotifyingUTXOsChangedRequest := request.(*appmessage.StopNotifyingUTXOsChangedRequestMessage) + addresses, err := context.ConvertAddressStringsToUTXOsChangedNotificationAddresses(stopNotifyingUTXOsChangedRequest.Addresses) + if err != nil { + errorMessage := appmessage.NewNotifyUTXOsChangedResponseMessage() + errorMessage.Error = appmessage.RPCErrorf("Parsing error: %s", err) + return errorMessage, nil + } + + listener, err := context.NotificationManager.Listener(router) + if err != nil { + return nil, err + } + context.NotificationManager.StopPropagatingUTXOsChangedNotifications(listener, addresses) + + response := appmessage.NewStopNotifyingUTXOsChangedResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/submit_block.go b/app/rpc/rpchandlers/submit_block.go new file mode 100644 index 0000000..64a8a0d --- /dev/null +++ b/app/rpc/rpchandlers/submit_block.go @@ -0,0 +1,84 @@ +package rpchandlers + +import ( + "encoding/json" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleSubmitBlock handles the respectively named RPC command +func HandleSubmitBlock(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + submitBlockRequest := request.(*appmessage.SubmitBlockRequestMessage) + + var err error + isSynced := false + // The node is considered synced if it has peers and consensus state is nearly synced + if context.ProtocolManager.Context().HasPeers() { + isSynced, err = context.ProtocolManager.Context().IsNearlySynced() + if err != nil { + return nil, err + } + } + + if !context.Config.AllowSubmitBlockWhenNotSynced && !isSynced { + return &appmessage.SubmitBlockResponseMessage{ + Error: appmessage.RPCErrorf("Block not submitted - node is not synced"), + RejectReason: appmessage.RejectReasonIsInIBD, + }, nil + } + + domainBlock, err := appmessage.RPCBlockToDomainBlock(submitBlockRequest.Block) + if err != nil { + return &appmessage.SubmitBlockResponseMessage{ + Error: appmessage.RPCErrorf("Could not parse block: %s", err), + RejectReason: appmessage.RejectReasonBlockInvalid, + }, nil + } + + if !submitBlockRequest.AllowNonDAABlocks { + virtualDAAScore, err := context.Domain.Consensus().GetVirtualDAAScore() + if err != nil { + return nil, err + } + // A simple heuristic check which signals that the mined block is out of date + // and should not be accepted unless user explicitly requests + daaWindowSize := uint64(context.Config.NetParams().DifficultyAdjustmentWindowSize) + if virtualDAAScore > daaWindowSize && domainBlock.Header.DAAScore() < virtualDAAScore-daaWindowSize { + return &appmessage.SubmitBlockResponseMessage{ + Error: appmessage.RPCErrorf("Block rejected. Reason: block DAA score %d is too far "+ + "behind virtual's DAA score %d", domainBlock.Header.DAAScore(), virtualDAAScore), + RejectReason: appmessage.RejectReasonBlockInvalid, + }, nil + } + } + + err = context.ProtocolManager.AddBlock(domainBlock) + if err != nil { + isProtocolOrRuleError := errors.As(err, &ruleerrors.RuleError{}) || errors.As(err, &protocolerrors.ProtocolError{}) + if !isProtocolOrRuleError { + return nil, err + } + + jsonBytes, _ := json.MarshalIndent(submitBlockRequest.Block.Header, "", " ") + if jsonBytes != nil { + log.Warnf("The RPC submitted block triggered a rule/protocol error (%s), printing "+ + "the full header for debug purposes: \n%s", err, string(jsonBytes)) + } + + return &appmessage.SubmitBlockResponseMessage{ + Error: appmessage.RPCErrorf("Block rejected. Reason: %s", err), + RejectReason: appmessage.RejectReasonBlockInvalid, + }, nil + } + + log.Infof("Accepted block %s via submitBlock", consensushashing.BlockHash(domainBlock)) + + response := appmessage.NewSubmitBlockResponseMessage() + return response, nil +} diff --git a/app/rpc/rpchandlers/submit_transaction.go b/app/rpc/rpchandlers/submit_transaction.go new file mode 100644 index 0000000..3bc1bcd --- /dev/null +++ b/app/rpc/rpchandlers/submit_transaction.go @@ -0,0 +1,38 @@ +package rpchandlers + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleSubmitTransaction handles the respectively named RPC command +func HandleSubmitTransaction(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + submitTransactionRequest := request.(*appmessage.SubmitTransactionRequestMessage) + + domainTransaction, err := appmessage.RPCTransactionToDomainTransaction(submitTransactionRequest.Transaction) + if err != nil { + errorMessage := &appmessage.SubmitTransactionResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not parse transaction: %s", err) + return errorMessage, nil + } + + transactionID := consensushashing.TransactionID(domainTransaction) + err = context.ProtocolManager.AddTransaction(domainTransaction, submitTransactionRequest.AllowOrphan) + if err != nil { + if !errors.As(err, &mempool.RuleError{}) { + return nil, err + } + + log.Debugf("Rejected transaction %s: %s", transactionID, err) + errorMessage := &appmessage.SubmitTransactionResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Rejected transaction %s: %s", transactionID, err) + return errorMessage, nil + } + + response := appmessage.NewSubmitTransactionResponseMessage(transactionID.String()) + return response, nil +} diff --git a/app/rpc/rpchandlers/unban.go b/app/rpc/rpchandlers/unban.go new file mode 100644 index 0000000..b1ac3cb --- /dev/null +++ b/app/rpc/rpchandlers/unban.go @@ -0,0 +1,40 @@ +package rpchandlers + +import ( + "net" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/rpc/rpccontext" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// HandleUnban handles the respectively named RPC command +func HandleUnban(context *rpccontext.Context, _ *router.Router, request appmessage.Message) (appmessage.Message, error) { + if context.Config.SafeRPC { + log.Warn("Unban RPC command called while node in safe RPC mode -- ignoring.") + response := appmessage.NewUnbanResponseMessage() + response.Error = + appmessage.RPCErrorf("Unban RPC command called while node in safe RPC mode") + return response, nil + } + + unbanRequest := request.(*appmessage.UnbanRequestMessage) + ip := net.ParseIP(unbanRequest.IP) + if ip == nil { + hint := "" + if unbanRequest.IP[0] == '[' { + hint = " (try to remove “[” and “]” symbols)" + } + errorMessage := &appmessage.UnbanResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not parse IP%s: %s", hint, unbanRequest.IP) + return errorMessage, nil + } + err := context.AddressManager.Unban(appmessage.NewNetAddressIPPort(ip, 0)) + if err != nil { + errorMessage := &appmessage.UnbanResponseMessage{} + errorMessage.Error = appmessage.RPCErrorf("Could not unban IP: %s", err) + return errorMessage, nil + } + response := appmessage.NewUnbanResponseMessage() + return response, nil +} diff --git a/build_and_test.sh b/build_and_test.sh new file mode 100755 index 0000000..0f5a9e8 --- /dev/null +++ b/build_and_test.sh @@ -0,0 +1,30 @@ +#!/bin/bash -ex + +# add go binary path. +export PATH="${PATH}:$(go env GOPATH)/bin" + +# go preparation. +go version +go get -v -t -d ./... +go get -v golang.org/x/lint/golint +go install -v golang.org/x/lint/golint +go install -v honnef.co/go/tools/cmd/staticcheck@latest + +# list files whose formatting differs. +test -z "$(go fmt ./...)" + +# look for style mistakes. +golint -set_exit_status ./... + +# static analysis. +staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./... + +# build spectre daemon. +go build -v -o spectred . + +# check if parallel tests are enabled. +[ -n "${NO_PARALLEL}" ] && { + go test -timeout 20m -parallel=1 -v ./... +} || { + go test -timeout 20m -v ./... +} diff --git a/changelog.txt b/changelog.txt new file mode 100644 index 0000000..882df8b --- /dev/null +++ b/changelog.txt @@ -0,0 +1,4 @@ +spectred v0.3.10 - 2024-04-25 +============================= + +* First version with RandomX for CPU mining. diff --git a/cmd/genkeypair/README.md b/cmd/genkeypair/README.md new file mode 100644 index 0000000..b234635 --- /dev/null +++ b/cmd/genkeypair/README.md @@ -0,0 +1,9 @@ +# genkeypair + +A tool for generating private-key-address pairs. + +Note: This tool prints unencrypted private keys and is not +recommended for day to day use, and is intended mainly for tests. + +In order to manage your funds it's recommended to use +[spectrewallet](../spectrewallet) diff --git a/cmd/genkeypair/config.go b/cmd/genkeypair/config.go new file mode 100644 index 0000000..b88e322 --- /dev/null +++ b/cmd/genkeypair/config.go @@ -0,0 +1,26 @@ +package main + +import ( + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/infrastructure/config" +) + +type configFlags struct { + config.NetworkFlags +} + +func parseConfig() (*configFlags, error) { + cfg := &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return nil, err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/cmd/genkeypair/main.go b/cmd/genkeypair/main.go new file mode 100644 index 0000000..4f573a4 --- /dev/null +++ b/cmd/genkeypair/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/util" +) + +func main() { + cfg, err := parseConfig() + if err != nil { + panic(err) + } + + privateKey, publicKey, err := libspectrewallet.CreateKeyPair(false) + if err != nil { + panic(err) + } + + addr, err := util.NewAddressPublicKey(publicKey, cfg.NetParams().Prefix) + if err != nil { + panic(err) + } + + fmt.Printf("Private key: %x\n", privateKey) + fmt.Printf("Address: %s\n", addr) +} diff --git a/cmd/spectrectl/README.md b/cmd/spectrectl/README.md new file mode 100644 index 0000000..dd83805 --- /dev/null +++ b/cmd/spectrectl/README.md @@ -0,0 +1,53 @@ +# spectrectl + +`spectrectl` is an RPC client for `spectred`. + +## Requirements + +Go 1.19 or later. + +## Build from Source + +* Install Go according to the installation instructions here: + http://golang.org/doc/install + +* Ensure Go was installed properly and is a supported version: + +```bash +go version +``` + +* Run the following commands to obtain and install `spectred` + including all dependencies: + +```bash +git clone https://github.com/spectre-project/spectred +cd spectred/cmd/spectrectl +go install . +``` + +* `spectrectl` should now be installed in `$(go env GOPATH)/bin`. If + you did not already add the bin directory to your system path + during Go installation, you are encouraged to do so now. + +## Usage + +The full `spectrectl` configuration options can be seen with: + +```bash +spectrectl --help +``` + +But the minimum configuration needed to run it is: + +```bash +spectrectl +``` + +For example: + +``` +spectrectl '{"getBlockDagInfoRequest":{}}' +``` + +For a list of all available requests check out the [RPC documentation](infrastructure/network/netadapter/server/grpcserver/protowire/rpc.md) diff --git a/cmd/spectrectl/command_parser.go b/cmd/spectrectl/command_parser.go new file mode 100644 index 0000000..285e405 --- /dev/null +++ b/cmd/spectrectl/command_parser.go @@ -0,0 +1,188 @@ +package main + +import ( + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" +) + +func parseCommand(args []string, commandDescs []*commandDescription) (*protowire.SpectredMessage, error) { + commandName, parameterStrings := args[0], args[1:] + + var commandDesc *commandDescription + for _, cd := range commandDescs { + if cd.name == commandName { + commandDesc = cd + break + } + } + if commandDesc == nil { + return nil, errors.Errorf("unknown command: %s. Use --list-commands to list all commands", commandName) + } + if len(parameterStrings) != len(commandDesc.parameters) { + return nil, errors.Errorf("command '%s' expects %d parameters but got %d", + commandName, len(commandDesc.parameters), len(parameterStrings)) + } + + commandValue := reflect.New(unwrapCommandType(commandDesc.typeof)) + for i, parameterDesc := range commandDesc.parameters { + parameterValue, err := stringToValue(parameterDesc, parameterStrings[i]) + if err != nil { + return nil, err + } + setField(commandValue, parameterValue, parameterDesc) + } + + return generateSpectredMessage(commandValue, commandDesc) +} + +func setField(commandValue reflect.Value, parameterValue reflect.Value, parameterDesc *parameterDescription) { + parameterField := commandValue.Elem().FieldByName(parameterDesc.name) + + parameterField.Set(parameterValue) +} + +func stringToValue(parameterDesc *parameterDescription, valueStr string) (reflect.Value, error) { + if valueStr == "-" { + return reflect.Zero(parameterDesc.typeof), nil + } + + var value interface{} + var err error + switch parameterDesc.typeof.Kind() { + case reflect.Bool: + value, err = strconv.ParseBool(valueStr) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + case reflect.Int8: + var valueInt64 int64 + valueInt64, err = strconv.ParseInt(valueStr, 10, 8) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = int8(valueInt64) + case reflect.Int16: + var valueInt64 int64 + valueInt64, err = strconv.ParseInt(valueStr, 10, 16) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = int16(valueInt64) + case reflect.Int32: + var valueInt64 int64 + valueInt64, err = strconv.ParseInt(valueStr, 10, 32) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = int32(valueInt64) + case reflect.Int64: + value, err = strconv.ParseInt(valueStr, 10, 64) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + case reflect.Uint8: + var valueUInt64 uint64 + valueUInt64, err = strconv.ParseUint(valueStr, 10, 8) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = uint8(valueUInt64) + case reflect.Uint16: + var valueUInt64 uint64 + valueUInt64, err = strconv.ParseUint(valueStr, 10, 16) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = uint16(valueUInt64) + case reflect.Uint32: + var valueUInt64 uint64 + valueUInt64, err = strconv.ParseUint(valueStr, 10, 32) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = uint32(valueUInt64) + case reflect.Uint64: + value, err = strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + case reflect.Float32: + var valueFloat64 float64 + valueFloat64, err = strconv.ParseFloat(valueStr, 32) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + value = float32(valueFloat64) + case reflect.Float64: + value, err = strconv.ParseFloat(valueStr, 64) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + case reflect.String: + value = valueStr + case reflect.Struct: + pointer := reflect.New(parameterDesc.typeof) // create pointer to this type + fieldInterface := pointer.Interface().(proto.Message) + err := protojson.Unmarshal([]byte(valueStr), fieldInterface) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + // Unpointer the value once it's ready + fieldInterfaceValue := reflect.ValueOf(fieldInterface) + value = fieldInterfaceValue.Elem().Interface() + case reflect.Ptr: + dummyParameterDesc := ¶meterDescription{ + name: "valuePointedTo", + typeof: parameterDesc.typeof.Elem(), + } + valuePointedTo, err := stringToValue(dummyParameterDesc, valueStr) + if err != nil { + return reflect.Value{}, errors.WithStack(err) + } + pointer := pointerToValue(valuePointedTo) + + value = pointer.Interface() + + case reflect.Slice: + sliceType := parameterDesc.typeof.Elem() + if sliceType.Kind() != reflect.String { + return reflect.Value{}, + errors.Errorf("Unsupported slice type '%s' for parameter '%s'", + sliceType, + parameterDesc.name) + } + if valueStr == "" { + value = []string{} + } else { + value = strings.Split(valueStr, ",") + } + // Int and uint are not supported because their size is platform-dependant + case reflect.Int, + reflect.Uint, + // Other types are not supported simply because they are not used in any command right now + // but support can be added if and when needed + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.UnsafePointer, + reflect.Invalid, + reflect.Uintptr, + reflect.Complex64, + reflect.Complex128, + reflect.Array, + reflect.Chan: + fallthrough + default: + return reflect.Value{}, + errors.Errorf("Unsupported type '%s' for parameter '%s'", parameterDesc.typeof.Kind(), parameterDesc.name) + } + + return reflect.ValueOf(value), nil +} diff --git a/cmd/spectrectl/commands.go b/cmd/spectrectl/commands.go new file mode 100644 index 0000000..7758d69 --- /dev/null +++ b/cmd/spectrectl/commands.go @@ -0,0 +1,96 @@ +package main + +import ( + "fmt" + "reflect" + "strings" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" +) + +var commandTypes = []reflect.Type{ + reflect.TypeOf(protowire.SpectredMessage_AddPeerRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetConnectedPeerInfoRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetPeerAddressesRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetCurrentNetworkRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetInfoRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_GetBlockRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetBlocksRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetHeadersRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetBlockCountRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetBlockDagInfoRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetSelectedTipHashRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetVirtualSelectedParentBlueScoreRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest{}), + reflect.TypeOf(protowire.SpectredMessage_ResolveFinalityConflictRequest{}), + reflect.TypeOf(protowire.SpectredMessage_EstimateNetworkHashesPerSecondRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_GetBlockTemplateRequest{}), + reflect.TypeOf(protowire.SpectredMessage_SubmitBlockRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_GetMempoolEntryRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetMempoolEntriesRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetMempoolEntriesByAddressesRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_SubmitTransactionRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_GetUtxosByAddressesRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetBalanceByAddressRequest{}), + reflect.TypeOf(protowire.SpectredMessage_GetCoinSupplyRequest{}), + + reflect.TypeOf(protowire.SpectredMessage_BanRequest{}), + reflect.TypeOf(protowire.SpectredMessage_UnbanRequest{}), +} + +type commandDescription struct { + name string + parameters []*parameterDescription + typeof reflect.Type +} + +type parameterDescription struct { + name string + typeof reflect.Type +} + +func commandDescriptions() []*commandDescription { + commandDescriptions := make([]*commandDescription, len(commandTypes)) + + for i, commandTypeWrapped := range commandTypes { + commandType := unwrapCommandType(commandTypeWrapped) + + name := strings.TrimSuffix(commandType.Name(), "RequestMessage") + numFields := commandType.NumField() + + var parameters []*parameterDescription + for i := 0; i < numFields; i++ { + field := commandType.Field(i) + + if !isFieldExported(field) { + continue + } + + parameters = append(parameters, ¶meterDescription{ + name: field.Name, + typeof: field.Type, + }) + } + commandDescriptions[i] = &commandDescription{ + name: name, + parameters: parameters, + typeof: commandTypeWrapped, + } + } + + return commandDescriptions +} + +func (cd *commandDescription) help() string { + sb := &strings.Builder{} + sb.WriteString(cd.name) + for _, parameter := range cd.parameters { + _, _ = fmt.Fprintf(sb, " [%s]", parameter.name) + } + return sb.String() +} diff --git a/cmd/spectrectl/config.go b/cmd/spectrectl/config.go new file mode 100644 index 0000000..c8f5932 --- /dev/null +++ b/cmd/spectrectl/config.go @@ -0,0 +1,55 @@ +package main + +import ( + "github.com/jessevdk/go-flags" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/config" +) + +var ( + defaultRPCServer = "localhost" + defaultTimeout uint64 = 30 +) + +type configFlags struct { + RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` + Timeout uint64 `short:"t" long:"timeout" description:"Timeout for the request (in seconds)"` + RequestJSON string `short:"j" long:"json" description:"The request in JSON format"` + ListCommands bool `short:"l" long:"list-commands" description:"List all commands and exit"` + AllowConnectionToDifferentVersions bool `short:"a" long:"allow-connection-to-different-versions" description:"Allow connections to versions different than spectrectl's version'"` + CommandAndParameters []string + config.NetworkFlags +} + +func parseConfig() (*configFlags, error) { + cfg := &configFlags{ + RPCServer: defaultRPCServer, + Timeout: defaultTimeout, + } + parser := flags.NewParser(cfg, flags.HelpFlag) + parser.Usage = "spectrectl [OPTIONS] [COMMAND] [COMMAND PARAMETERS].\n\nCommand can be supplied only if --json is not used." + + "\n\nUse `spectrectl --list-commands` to get a list of all commands and their parameters." + + "\nFor optional parameters- use '-' without quotes to not pass the parameter.\n" + remainingArgs, err := parser.Parse() + if err != nil { + return nil, err + } + + if cfg.ListCommands { + return cfg, nil + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return nil, err + } + + cfg.CommandAndParameters = remainingArgs + if len(cfg.CommandAndParameters) == 0 && cfg.RequestJSON == "" || + len(cfg.CommandAndParameters) > 0 && cfg.RequestJSON != "" { + + return nil, errors.New("Exactly one of --json or a command must be specified") + } + + return cfg, nil +} diff --git a/cmd/spectrectl/docker/Dockerfile b/cmd/spectrectl/docker/Dockerfile new file mode 100644 index 0000000..6648798 --- /dev/null +++ b/cmd/spectrectl/docker/Dockerfile @@ -0,0 +1,30 @@ +# -- multistage docker build: stage #1: build stage +FROM golang:1.19-alpine AS build + +RUN mkdir -p /go/src/github.com/spectre-project/spectred/ + +WORKDIR /go/src/github.com/spectre-project/spectred/ + +RUN apk add --no-cache curl git openssh binutils gcc musl-dev + +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +WORKDIR /go/src/github.com/spectre-project/spectred/cmd/spectrectl + +RUN GOOS=linux go build -a -installsuffix cgo -o spectrectl . + +# --- multistage docker build: stage #2: runtime image +FROM alpine +WORKDIR /app + +RUN apk add --no-cache ca-certificates tini + +COPY --from=build /go/src/github.com/spectre-project/spectred/cmd/spectrectl/spectrectl /app/ + +USER nobody +ENTRYPOINT [ "/sbin/tini", "--" ] diff --git a/cmd/spectrectl/main.go b/cmd/spectrectl/main.go new file mode 100644 index 0000000..f5782a9 --- /dev/null +++ b/cmd/spectrectl/main.go @@ -0,0 +1,119 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/spectre-project/spectred/version" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + + "github.com/pkg/errors" + "google.golang.org/protobuf/encoding/protojson" + + "github.com/spectre-project/spectred/infrastructure/network/rpcclient/grpcclient" +) + +func main() { + cfg, err := parseConfig() + if err != nil { + printErrorAndExit(fmt.Sprintf("error parsing command-line arguments: %s", err)) + } + if cfg.ListCommands { + printAllCommands() + return + } + + rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer) + if err != nil { + printErrorAndExit(fmt.Sprintf("error parsing RPC server address: %s", err)) + } + client, err := grpcclient.Connect(rpcAddress) + if err != nil { + printErrorAndExit(fmt.Sprintf("error connecting to the RPC server: %s", err)) + } + defer client.Disconnect() + + if !cfg.AllowConnectionToDifferentVersions { + spectredMessage, err := client.Post(&protowire.SpectredMessage{Payload: &protowire.SpectredMessage_GetInfoRequest{GetInfoRequest: &protowire.GetInfoRequestMessage{}}}) + if err != nil { + printErrorAndExit(fmt.Sprintf("Cannot post GetInfo message: %s", err)) + } + + localVersion := version.Version() + remoteVersion := spectredMessage.GetGetInfoResponse().ServerVersion + + if localVersion != remoteVersion { + printErrorAndExit(fmt.Sprintf("Server version mismatch, expect: %s, got: %s", localVersion, remoteVersion)) + } + } + + responseChan := make(chan string) + + if cfg.RequestJSON != "" { + go postJSON(cfg, client, responseChan) + } else { + go postCommand(cfg, client, responseChan) + } + + timeout := time.Duration(cfg.Timeout) * time.Second + select { + case responseString := <-responseChan: + prettyResponseString := prettifyResponse(responseString) + fmt.Println(prettyResponseString) + case <-time.After(timeout): + printErrorAndExit(fmt.Sprintf("timeout of %s has been exceeded", timeout)) + } +} + +func printAllCommands() { + requestDescs := commandDescriptions() + for _, requestDesc := range requestDescs { + fmt.Printf("\t%s\n", requestDesc.help()) + } +} + +func postCommand(cfg *configFlags, client *grpcclient.GRPCClient, responseChan chan string) { + message, err := parseCommand(cfg.CommandAndParameters, commandDescriptions()) + if err != nil { + printErrorAndExit(fmt.Sprintf("error parsing command: %s", err)) + } + + response, err := client.Post(message) + if err != nil { + printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err)) + } + responseBytes, err := protojson.MarshalOptions{EmitUnpopulated: true}.Marshal(response) + if err != nil { + printErrorAndExit(errors.Wrapf(err, "error parsing the response from the RPC server").Error()) + } + + responseChan <- string(responseBytes) +} + +func postJSON(cfg *configFlags, client *grpcclient.GRPCClient, doneChan chan string) { + responseString, err := client.PostJSON(cfg.RequestJSON) + if err != nil { + printErrorAndExit(fmt.Sprintf("error posting the request to the RPC server: %s", err)) + } + doneChan <- responseString +} + +func prettifyResponse(response string) string { + spectredMessage := &protowire.SpectredMessage{} + err := protojson.Unmarshal([]byte(response), spectredMessage) + if err != nil { + printErrorAndExit(fmt.Sprintf("error parsing the response from the RPC server: %s", err)) + } + + marshalOptions := &protojson.MarshalOptions{} + marshalOptions.Indent = " " + marshalOptions.EmitUnpopulated = true + return marshalOptions.Format(spectredMessage) +} + +func printErrorAndExit(message string) { + fmt.Fprintf(os.Stderr, fmt.Sprintf("%s\n", message)) + os.Exit(1) +} diff --git a/cmd/spectrectl/reflection_helpers.go b/cmd/spectrectl/reflection_helpers.go new file mode 100644 index 0000000..e05d592 --- /dev/null +++ b/cmd/spectrectl/reflection_helpers.go @@ -0,0 +1,46 @@ +package main + +import ( + "reflect" + "unicode" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" +) + +// protobuf generates the command types with two types: +// 1. A concrete type that holds the fields of the command bearing the name of the command with `RequestMessage` as suffix +// 2. A wrapper that implements isSpectredMessage_Payload, having a single field pointing to the concrete command +// bearing the name of the command with `SpectredMessage_` prefix and `Request` suffix + +// unwrapCommandType converts a reflect.Type signifying a wrapper type into the concrete request type +func unwrapCommandType(requestTypeWrapped reflect.Type) reflect.Type { + return requestTypeWrapped.Field(0).Type.Elem() +} + +// unwrapCommandValue convertes a reflect.Value of a pointer to a wrapped command into a concrete command +func unwrapCommandValue(commandValueWrapped reflect.Value) reflect.Value { + return commandValueWrapped.Elem().Field(0) +} + +// isFieldExported returns true if the given field is exported. +// Currently the only way to check this is to check if the first rune in the field's name is upper case. +func isFieldExported(field reflect.StructField) bool { + return unicode.IsUpper(rune(field.Name[0])) +} + +// generateSpectredMessage generates a wrapped SpectredMessage with the given `commandValue` +func generateSpectredMessage(commandValue reflect.Value, commandDesc *commandDescription) (*protowire.SpectredMessage, error) { + commandWrapper := reflect.New(commandDesc.typeof) + unwrapCommandValue(commandWrapper).Set(commandValue) + + spectredMessage := reflect.New(reflect.TypeOf(protowire.SpectredMessage{})) + spectredMessage.Elem().FieldByName("Payload").Set(commandWrapper) + return spectredMessage.Interface().(*protowire.SpectredMessage), nil +} + +// pointerToValue returns a reflect.Value that represents a pointer to the given value +func pointerToValue(valuePointedTo reflect.Value) reflect.Value { + pointer := reflect.New(valuePointedTo.Type()) + pointer.Elem().Set(valuePointedTo) + return pointer +} diff --git a/cmd/spectreminer/README.md b/cmd/spectreminer/README.md new file mode 100644 index 0000000..d5ca000 --- /dev/null +++ b/cmd/spectreminer/README.md @@ -0,0 +1,45 @@ +# spectreminer + +`spectreminer` is a CPU-based miner for `spectred`. + +## Requirements + +Go 1.19 or later. + +## Build from Source + +* Install Go according to the installation instructions here: + http://golang.org/doc/install + +* Ensure Go was installed properly and is a supported version: + +```bash +go version +``` + +* Run the following commands to obtain and install `spectred` + including all dependencies: + +```bash +git clone https://github.com/spectre-project/spectred +cd spectred/cmd/spectreminer +go install . +``` + +* `spectreminer` should now be installed in `$(go env GOPATH)/bin`. + If you did not already add the bin directory to your system path + during Go installation, you are encouraged to do so now. + +## Usage + +The full `spectreminer` configuration options can be seen with: + +```bash +spectreminer --help +``` + +But the minimum configuration needed to run it is: + +```bash +spectreminer --miningaddr= +``` diff --git a/cmd/spectreminer/client.go b/cmd/spectreminer/client.go new file mode 100644 index 0000000..abc985b --- /dev/null +++ b/cmd/spectreminer/client.go @@ -0,0 +1,61 @@ +package main + +import ( + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" +) + +const minerTimeout = 10 * time.Second + +type minerClient struct { + *rpcclient.RPCClient + + cfg *configFlags + newBlockTemplateNotificationChan chan struct{} +} + +func (mc *minerClient) connect() error { + rpcAddress, err := mc.cfg.NetParams().NormalizeRPCServerAddress(mc.cfg.RPCServer) + if err != nil { + return err + } + rpcClient, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + return err + } + mc.RPCClient = rpcClient + mc.SetTimeout(minerTimeout) + mc.SetLogger(backendLog, logger.LevelTrace) + + err = mc.RegisterForNewBlockTemplateNotifications(func(_ *appmessage.NewBlockTemplateNotificationMessage) { + select { + case mc.newBlockTemplateNotificationChan <- struct{}{}: + default: + } + }) + if err != nil { + return errors.Wrapf(err, "error requesting new-block-template notifications") + } + + log.Infof("Connected to %s", rpcAddress) + + return nil +} + +func newMinerClient(cfg *configFlags) (*minerClient, error) { + minerClient := &minerClient{ + cfg: cfg, + newBlockTemplateNotificationChan: make(chan struct{}), + } + + err := minerClient.connect() + if err != nil { + return nil, err + } + + return minerClient, nil +} diff --git a/cmd/spectreminer/config.go b/cmd/spectreminer/config.go new file mode 100644 index 0000000..0e977d6 --- /dev/null +++ b/cmd/spectreminer/config.go @@ -0,0 +1,93 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/spectre-project/spectred/infrastructure/config" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/util" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/version" +) + +const ( + defaultLogFilename = "spectreminer.log" + defaultErrLogFilename = "spectreminer_err.log" + defaultTargetBlockRateRatio = 2.0 +) + +var ( + // Default configuration options + defaultAppDir = util.AppDir("spectreminer", false) + defaultLogFile = filepath.Join(defaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(defaultAppDir, defaultErrLogFilename) + defaultRPCServer = "localhost" +) + +type configFlags struct { + ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` + RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` + MiningAddr string `long:"miningaddr" description:"Address to mine to"` + NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine. If omitted, will mine until the process is interrupted."` + MineWhenNotSynced bool `long:"mine-when-not-synced" description:"Mine even if the node is not synced with the rest of the network."` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + TargetBlocksPerSecond *float64 `long:"target-blocks-per-second" description:"Sets a maximum block rate. 0 means no limit (The default one is 2 * target network block rate)"` + config.NetworkFlags +} + +func parseConfig() (*configFlags, error) { + cfg := &configFlags{ + RPCServer: defaultRPCServer, + } + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + + // If special error ErrHelp catched by -h or --help + if ourErr, ok := err.(*flags.Error); ok && ourErr.Type == flags.ErrHelp { + os.Exit(0) + } + + // Show the version and exit if the version flag was specified. + if cfg.ShowVersion { + appName := filepath.Base(os.Args[0]) + appName = strings.TrimSuffix(appName, filepath.Ext(appName)) + fmt.Println(appName, "version", version.Version()) + os.Exit(0) + } + + if err != nil { + return nil, err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return nil, err + } + + if cfg.TargetBlocksPerSecond == nil { + targetBlocksPerSecond := defaultTargetBlockRateRatio / cfg.NetParams().TargetTimePerBlock.Seconds() + cfg.TargetBlocksPerSecond = &targetBlocksPerSecond + } + + if cfg.Profile != "" { + profilePort, err := strconv.Atoi(cfg.Profile) + if err != nil || profilePort < 1024 || profilePort > 65535 { + return nil, errors.New("The profile port must be between 1024 and 65535") + } + } + + if cfg.MiningAddr == "" { + fmt.Fprintln(os.Stderr, errors.New("Error parsing command-line arguments: --miningaddr is required")) + os.Exit(1) + } + + initLog(defaultLogFile, defaultErrLogFile) + + return cfg, nil +} diff --git a/cmd/spectreminer/docker/Dockerfile b/cmd/spectreminer/docker/Dockerfile new file mode 100644 index 0000000..a266ff2 --- /dev/null +++ b/cmd/spectreminer/docker/Dockerfile @@ -0,0 +1,29 @@ +# -- multistage docker build: stage #1: build stage +FROM golang:1.19-alpine AS build + +RUN mkdir -p /go/src/github.com/spectre-project/spectred/ + +WORKDIR /go/src/github.com/spectre-project/spectred/ + +RUN apk add --no-cache curl git openssh binutils gcc musl-dev + +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +WORKDIR /go/src/github.com/spectre-project/spectred/cmd/spectreminer +RUN GOOS=linux go build -a -installsuffix cgo -o spectreminer . + +# --- multistage docker build: stage #2: runtime image +FROM alpine +WORKDIR /app + +RUN apk add --no-cache ca-certificates tini + +COPY --from=build /go/src/github.com/spectre-project/spectred/cmd/spectreminer/spectreminer /app/ + +USER nobody +ENTRYPOINT [ "/sbin/tini", "--" ] diff --git a/cmd/spectreminer/log.go b/cmd/spectreminer/log.go new file mode 100644 index 0000000..5718ac1 --- /dev/null +++ b/cmd/spectreminer/log.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("SXMN") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + log.SetLevel(logger.LevelDebug) + err := backendLog.AddLogFile(logFile, logger.LevelTrace) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err) + os.Exit(1) + } + err = backendLog.AddLogFile(errLogFile, logger.LevelWarn) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err) + os.Exit(1) + } + err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err) + os.Exit(1) + } + err = backendLog.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err) + os.Exit(1) + } + +} diff --git a/cmd/spectreminer/main.go b/cmd/spectreminer/main.go new file mode 100644 index 0000000..fa734df --- /dev/null +++ b/cmd/spectreminer/main.go @@ -0,0 +1,67 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/util" + + "github.com/spectre-project/spectred/version" + + "github.com/pkg/errors" + + _ "net/http/pprof" + + "github.com/spectre-project/spectred/infrastructure/os/signal" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "MAIN", nil) + interrupt := signal.InterruptListener() + + cfg, err := parseConfig() + if err != nil { + printErrorAndExit(errors.Errorf("Error parsing command-line arguments: %s", err)) + } + defer backendLog.Close() + + // Show version at startup. + log.Infof("Version %s", version.Version()) + + // Enable http profiling server if requested. + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + client, err := newMinerClient(cfg) + if err != nil { + panic(errors.Wrap(err, "error connecting to the RPC server")) + } + defer client.Disconnect() + + miningAddr, err := util.DecodeAddress(cfg.MiningAddr, cfg.ActiveNetParams.Prefix) + if err != nil { + printErrorAndExit(errors.Errorf("Error decoding mining address: %s", err)) + } + + doneChan := make(chan struct{}) + spawn("mineLoop", func() { + err = mineLoop(client, cfg.NumberOfBlocks, *cfg.TargetBlocksPerSecond, cfg.MineWhenNotSynced, miningAddr) + if err != nil { + panic(errors.Wrap(err, "error in mine loop")) + } + doneChan <- struct{}{} + }) + + select { + case <-doneChan: + case <-interrupt: + } +} + +func printErrorAndExit(err error) { + fmt.Fprintf(os.Stderr, "%+v\n", err) + os.Exit(1) +} diff --git a/cmd/spectreminer/mineloop.go b/cmd/spectreminer/mineloop.go new file mode 100644 index 0000000..6434a72 --- /dev/null +++ b/cmd/spectreminer/mineloop.go @@ -0,0 +1,229 @@ +package main + +import ( + nativeerrors "errors" + "math/rand" + "sync/atomic" + "time" + + "github.com/spectre-project/spectred/version" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/cmd/spectreminer/templatemanager" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/util" +) + +var hashesTried uint64 + +const logHashRateInterval = 10 * time.Second + +func mineLoop(client *minerClient, numberOfBlocks uint64, targetBlocksPerSecond float64, mineWhenNotSynced bool, + miningAddr util.Address) error { + rand.Seed(time.Now().UnixNano()) // Seed the global concurrent-safe random source. + + errChan := make(chan error) + doneChan := make(chan struct{}) + + // We don't want to send router.DefaultMaxMessages blocks at once because there's + // a high chance we'll get disconnected from the node, so we make the channel + // capacity router.DefaultMaxMessages/2 (we give some slack for getBlockTemplate + // requests) + foundBlockChan := make(chan *externalapi.DomainBlock, router.DefaultMaxMessages/2) + + spawn("templatesLoop", func() { + templatesLoop(client, miningAddr, errChan) + }) + + spawn("blocksLoop", func() { + const windowSize = 10 + hasBlockRateTarget := targetBlocksPerSecond != 0 + var windowTicker, blockTicker *time.Ticker + // We use tickers to limit the block rate: + // 1. windowTicker -> makes sure that the last windowSize blocks take at least windowSize*targetBlocksPerSecond. + // 2. blockTicker -> makes sure that each block takes at least targetBlocksPerSecond/windowSize. + // that way we both allow for fluctuation in block rate but also make sure they're not too big (by an order of magnitude) + if hasBlockRateTarget { + windowRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond / windowSize)) + blockRate := time.Duration(float64(time.Second) / (targetBlocksPerSecond * windowSize)) + log.Infof("Minimum average time per %d blocks: %s, smaller minimum time per block: %s", windowSize, windowRate, blockRate) + windowTicker = time.NewTicker(windowRate) + blockTicker = time.NewTicker(blockRate) + defer windowTicker.Stop() + defer blockTicker.Stop() + } + windowStart := time.Now() + for blockIndex := uint64(0); numberOfBlocks == 0 || blockIndex < numberOfBlocks; blockIndex++ { + foundBlockChan <- mineNextBlock(mineWhenNotSynced) + if hasBlockRateTarget { + <-blockTicker.C + if (blockIndex % windowSize) == 0 { + tickerStart := time.Now() + <-windowTicker.C + log.Infof("Finished mining %d blocks in: %s. slept for: %s", windowSize, time.Since(windowStart), time.Since(tickerStart)) + windowStart = time.Now() + } + } + } + }) + + spawn("handleFoundBlock", func() { + for i := uint64(0); numberOfBlocks == 0 || i < numberOfBlocks; i++ { + block := <-foundBlockChan + err := handleFoundBlock(client, block) + if err != nil { + errChan <- err + return + } + } + doneChan <- struct{}{} + }) + + logHashRate() + + select { + case err := <-errChan: + return err + case <-doneChan: + return nil + } +} + +func logHashRate() { + spawn("logHashRate", func() { + lastCheck := time.Now() + for range time.Tick(logHashRateInterval) { + currentHashesTried := atomic.LoadUint64(&hashesTried) + currentTime := time.Now() + kiloHashesTried := float64(currentHashesTried) / 1000.0 + hashRate := kiloHashesTried / currentTime.Sub(lastCheck).Seconds() + log.Infof("Current hash rate is %.2f Khash/s", hashRate) + lastCheck = currentTime + // subtract from hashesTried the hashes we already sampled + atomic.AddUint64(&hashesTried, -currentHashesTried) + } + }) +} + +func handleFoundBlock(client *minerClient, block *externalapi.DomainBlock) error { + blockHash := consensushashing.BlockHash(block) + log.Infof("Submitting block %s to %s", blockHash, client.Address()) + + rejectReason, err := client.SubmitBlock(block) + if err != nil { + if nativeerrors.Is(err, router.ErrTimeout) { + log.Warnf("Got timeout while submitting block %s to %s: %s", blockHash, client.Address(), err) + return client.Reconnect() + } + if nativeerrors.Is(err, router.ErrRouteClosed) { + log.Debugf("Got route is closed while requesting block template from %s. "+ + "The client is most likely reconnecting", client.Address()) + return nil + } + if rejectReason == appmessage.RejectReasonIsInIBD { + const waitTime = 1 * time.Second + log.Warnf("Block %s was rejected because the node is in IBD. Waiting for %s", blockHash, waitTime) + time.Sleep(waitTime) + return nil + } + return errors.Wrapf(err, "Error submitting block %s to %s", blockHash, client.Address()) + } + return nil +} + +func mineNextBlock(mineWhenNotSynced bool) *externalapi.DomainBlock { + nonce := rand.Uint64() // Use the global concurrent-safe random source. + for { + nonce++ + // For each nonce we try to build a block from the most up to date + // block template. + // In the rare case where the nonce space is exhausted for a specific + // block, it'll keep looping the nonce until a new block template + // is discovered. + block, state := getBlockForMining(mineWhenNotSynced) + state.Nonce = nonce + atomic.AddUint64(&hashesTried, 1) + if state.CheckProofOfWork() { + mutHeader := block.Header.ToMutable() + mutHeader.SetNonce(nonce) + block.Header = mutHeader.ToImmutable() + log.Infof("Found block %s with parents %s", consensushashing.BlockHash(block), block.Header.DirectParents()) + return block + } + } +} + +func getBlockForMining(mineWhenNotSynced bool) (*externalapi.DomainBlock, *pow.State) { + tryCount := 0 + + const sleepTime = 500 * time.Millisecond + const sleepTimeWhenNotSynced = 5 * time.Second + + for { + tryCount++ + + shouldLog := (tryCount-1)%10 == 0 + template, state, isSynced := templatemanager.Get() + if template == nil { + if shouldLog { + log.Info("Waiting for the initial template") + } + time.Sleep(sleepTime) + continue + } + if !isSynced && !mineWhenNotSynced { + if shouldLog { + log.Warnf("Spectred is not synced. Skipping current block template") + } + time.Sleep(sleepTimeWhenNotSynced) + continue + } + + return template, state + } +} + +func templatesLoop(client *minerClient, miningAddr util.Address, errChan chan error) { + getBlockTemplate := func() { + template, err := client.GetBlockTemplate(miningAddr.String(), "spectreminer-"+version.Version()) + if nativeerrors.Is(err, router.ErrTimeout) { + log.Warnf("Got timeout while requesting block template from %s: %s", client.Address(), err) + reconnectErr := client.Reconnect() + if reconnectErr != nil { + errChan <- reconnectErr + } + return + } + if nativeerrors.Is(err, router.ErrRouteClosed) { + log.Debugf("Got route is closed while requesting block template from %s. "+ + "The client is most likely reconnecting", client.Address()) + return + } + if err != nil { + errChan <- errors.Wrapf(err, "Error getting block template from %s", client.Address()) + return + } + err = templatemanager.Set(template) + if err != nil { + errChan <- errors.Wrapf(err, "Error setting block template from %s", client.Address()) + return + } + } + + getBlockTemplate() + const tickerTime = 500 * time.Millisecond + ticker := time.NewTicker(tickerTime) + for { + select { + case <-client.newBlockTemplateNotificationChan: + getBlockTemplate() + ticker.Reset(tickerTime) + case <-ticker.C: + getBlockTemplate() + } + } +} diff --git a/cmd/spectreminer/templatemanager/templatemanager.go b/cmd/spectreminer/templatemanager/templatemanager.go new file mode 100644 index 0000000..deae2c2 --- /dev/null +++ b/cmd/spectreminer/templatemanager/templatemanager.go @@ -0,0 +1,41 @@ +package templatemanager + +import ( + "sync" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" +) + +var currentTemplate *externalapi.DomainBlock +var currentState *pow.State +var isSynced bool +var lock = &sync.Mutex{} + +// Get returns the template to work on +func Get() (*externalapi.DomainBlock, *pow.State, bool) { + lock.Lock() + defer lock.Unlock() + // Shallow copy the block so when the user replaces the header it won't affect the template here. + if currentTemplate == nil { + return nil, nil, false + } + block := *currentTemplate + state := *currentState + return &block, &state, isSynced +} + +// Set sets the current template to work on +func Set(template *appmessage.GetBlockTemplateResponseMessage) error { + block, err := appmessage.RPCBlockToDomainBlock(template.Block) + if err != nil { + return err + } + lock.Lock() + defer lock.Unlock() + currentTemplate = block + currentState = pow.NewState(block.Header.ToMutable()) + isSynced = template.IsSynced + return nil +} diff --git a/cmd/spectrewallet/balance.go b/cmd/spectrewallet/balance.go new file mode 100644 index 0000000..42aaa28 --- /dev/null +++ b/cmd/spectrewallet/balance.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "fmt" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" +) + +func balance(conf *balanceConfig) error { + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + response, err := daemonClient.GetBalance(ctx, &pb.GetBalanceRequest{}) + if err != nil { + return err + } + + pendingSuffix := "" + if response.Pending > 0 { + pendingSuffix = " (pending)" + } + if conf.Verbose { + pendingSuffix = "" + println("Address Available Pending") + println("-----------------------------------------------------------------------------------------------------------") + for _, addressBalance := range response.AddressBalances { + fmt.Printf("%s %s %s\n", addressBalance.Address, utils.FormatSpr(addressBalance.Available), utils.FormatSpr(addressBalance.Pending)) + } + println("-----------------------------------------------------------------------------------------------------------") + print(" ") + } + fmt.Printf("Total balance, SPR %s %s%s\n", utils.FormatSpr(response.Available), utils.FormatSpr(response.Pending), pendingSuffix) + + return nil +} diff --git a/cmd/spectrewallet/broadcast.go b/cmd/spectrewallet/broadcast.go new file mode 100644 index 0000000..a9c58ff --- /dev/null +++ b/cmd/spectrewallet/broadcast.go @@ -0,0 +1,56 @@ +package main + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func broadcast(conf *broadcastConfig) error { + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + if conf.Transactions == "" && conf.TransactionsFile == "" { + return errors.Errorf("Either --transaction or --transaction-file is required") + } + if conf.Transactions != "" && conf.TransactionsFile != "" { + return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time") + } + + transactionsHex := conf.Transactions + if conf.TransactionsFile != "" { + transactionHexBytes, err := ioutil.ReadFile(conf.TransactionsFile) + if err != nil { + return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionsFile) + } + transactionsHex = strings.TrimSpace(string(transactionHexBytes)) + } + + transactions, err := decodeTransactionsFromHex(transactionsHex) + if err != nil { + return err + } + + response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{Transactions: transactions}) + if err != nil { + return err + } + fmt.Println("Transactions were sent successfully") + fmt.Println("Transaction ID(s): ") + for _, txID := range response.TxIDs { + fmt.Printf("\t%s\n", txID) + } + + return nil +} diff --git a/cmd/spectrewallet/common.go b/cmd/spectrewallet/common.go new file mode 100644 index 0000000..5f3dcd5 --- /dev/null +++ b/cmd/spectrewallet/common.go @@ -0,0 +1,14 @@ +package main + +import ( + "fmt" + "os" + "time" +) + +const daemonTimeout = 2 * time.Minute + +func printErrorAndExit(err error) { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) +} diff --git a/cmd/spectrewallet/config.go b/cmd/spectrewallet/config.go new file mode 100644 index 0000000..25b64f1 --- /dev/null +++ b/cmd/spectrewallet/config.go @@ -0,0 +1,323 @@ +package main + +import ( + "os" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/config" + + "github.com/jessevdk/go-flags" +) + +const ( + createSubCmd = "create" + balanceSubCmd = "balance" + sendSubCmd = "send" + sweepSubCmd = "sweep" + createUnsignedTransactionSubCmd = "create-unsigned-transaction" + signSubCmd = "sign" + broadcastSubCmd = "broadcast" + parseSubCmd = "parse" + showAddressesSubCmd = "show-addresses" + newAddressSubCmd = "new-address" + dumpUnencryptedDataSubCmd = "dump-unencrypted-data" + startDaemonSubCmd = "start-daemon" +) + +const ( + defaultListen = "localhost:8882" + defaultRPCServer = "localhost" +) + +type configFlags struct { + config.NetworkFlags +} + +type createConfig struct { + KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.spectrewallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\spectrewallet\\key.json (Windows))"` + Password string `long:"password" short:"p" description:"Wallet password"` + Yes bool `long:"yes" short:"y" description:"Assume \"yes\" to all questions"` + MinimumSignatures uint32 `long:"min-signatures" short:"m" description:"Minimum required signatures" default:"1"` + NumPrivateKeys uint32 `long:"num-private-keys" short:"k" description:"Number of private keys" default:"1"` + NumPublicKeys uint32 `long:"num-public-keys" short:"n" description:"Total number of keys" default:"1"` + ECDSA bool `long:"ecdsa" description:"Create an ECDSA wallet"` + Import bool `long:"import" short:"i" description:"Import private keys (as opposed to generating them)"` + config.NetworkFlags +} + +type balanceConfig struct { + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + Verbose bool `long:"verbose" short:"v" description:"Verbose: show addresses with balance"` + config.NetworkFlags +} + +type sendConfig struct { + KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.spectrewallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\spectrewallet\\key.json (Windows))"` + Password string `long:"password" short:"p" description:"Wallet password"` + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + ToAddress string `long:"to-address" short:"t" description:"The public address to send Spectre to" required:"true"` + FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Spectre from. Use multiple times to accept several addresses" required:"false"` + SendAmount string `long:"send-amount" short:"v" description:"An amount to send in Spectre (e.g. 1234.12345678)"` + IsSendAll bool `long:"send-all" description:"Send all the Spectre in the wallet (mutually exclusive with --send-amount)"` + UseExistingChangeAddress bool `long:"use-existing-change-address" short:"u" description:"Will use an existing change address (in case no change address was ever used, it will use a new one)"` + Verbose bool `long:"show-serialized" short:"s" description:"Show a list of hex encoded sent transactions"` + config.NetworkFlags +} + +type sweepConfig struct { + PrivateKey string `long:"private-key" short:"k" description:"Private key in hex format"` + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + config.NetworkFlags +} + +type createUnsignedTransactionConfig struct { + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + ToAddress string `long:"to-address" short:"t" description:"The public address to send Spectre to" required:"true"` + FromAddresses []string `long:"from-address" short:"a" description:"Specific public address to send Spectre from. Use multiple times to accept several addresses" required:"false"` + SendAmount string `long:"send-amount" short:"v" description:"An amount to send in Spectre (e.g. 1234.12345678)"` + IsSendAll bool `long:"send-all" description:"Send all the Spectre in the wallet (mutually exclusive with --send-amount)"` + UseExistingChangeAddress bool `long:"use-existing-change-address" short:"u" description:"Will use an existing change address (in case no change address was ever used, it will use a new one)"` + config.NetworkFlags +} + +type signConfig struct { + KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.spectrewallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\spectrewallet\\key.json (Windows))"` + Password string `long:"password" short:"p" description:"Wallet password"` + Transaction string `long:"transaction" short:"t" description:"The unsigned transaction(s) to sign on (encoded in hex)"` + TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction(s) to sign on (encoded in hex)"` + config.NetworkFlags +} + +type broadcastConfig struct { + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + Transactions string `long:"transaction" short:"t" description:"The signed transaction to broadcast (encoded in hex)"` + TransactionsFile string `long:"transaction-file" short:"F" description:"The file containing the unsigned transaction to sign on (encoded in hex)"` + config.NetworkFlags +} + +type parseConfig struct { + Transaction string `long:"transaction" short:"t" description:"The transaction to parse (encoded in hex)"` + TransactionFile string `long:"transaction-file" short:"F" description:"The file containing the transaction to parse (encoded in hex)"` + Verbose bool `long:"verbose" short:"v" description:"Verbose: show transaction inputs"` + config.NetworkFlags +} + +type showAddressesConfig struct { + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + config.NetworkFlags +} + +type newAddressConfig struct { + DaemonAddress string `long:"daemonaddress" short:"d" description:"Wallet daemon server to connect to"` + config.NetworkFlags +} + +type startDaemonConfig struct { + KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.spectrewallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\spectrewallet\\key.json (Windows))"` + Password string `long:"password" short:"p" description:"Wallet password"` + RPCServer string `long:"rpcserver" short:"s" description:"RPC server to connect to"` + Listen string `long:"listen" short:"l" description:"Address to listen on (default: 0.0.0.0:8882)"` + Timeout uint32 `long:"wait-timeout" short:"w" description:"Waiting timeout for RPC calls, seconds (default: 30 s)"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +type dumpUnencryptedDataConfig struct { + KeysFile string `long:"keys-file" short:"f" description:"Keys file location (default: ~/.spectrewallet/keys.json (*nix), %USERPROFILE%\\AppData\\Local\\spectrewallet\\key.json (Windows))"` + Password string `long:"password" short:"p" description:"Wallet password"` + Yes bool `long:"yes" short:"y" description:"Assume \"yes\" to all questions"` + config.NetworkFlags +} + +func parseCommandLine() (subCommand string, config interface{}) { + cfg := &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + + createConf := &createConfig{} + parser.AddCommand(createSubCmd, "Creates a new wallet", + "Creates a private key and 3 public addresses, one for each of MainNet, TestNet and DevNet", createConf) + + balanceConf := &balanceConfig{DaemonAddress: defaultListen} + parser.AddCommand(balanceSubCmd, "Shows the balance of a public address", + "Shows the balance for a public address in Spectre", balanceConf) + + sendConf := &sendConfig{DaemonAddress: defaultListen} + parser.AddCommand(sendSubCmd, "Sends a Spectre transaction to a public address", + "Sends a Spectre transaction to a public address", sendConf) + + sweepConf := &sweepConfig{DaemonAddress: defaultListen} + parser.AddCommand(sweepSubCmd, "Sends all funds associated with the given schnorr private key to a new address of the current wallet", + "Sends all funds associated with the given schnorr private key to a newly created external (i.e. not a change) address of the "+ + "keyfile that is under the daemon's contol. Can be used with a private key generated with the genkeypair utilily "+ + "to send funds to your main wallet.", sweepConf) + + createUnsignedTransactionConf := &createUnsignedTransactionConfig{DaemonAddress: defaultListen} + parser.AddCommand(createUnsignedTransactionSubCmd, "Create an unsigned Spectre transaction", + "Create an unsigned Spectre transaction", createUnsignedTransactionConf) + + signConf := &signConfig{} + parser.AddCommand(signSubCmd, "Sign the given partially signed transaction", + "Sign the given partially signed transaction", signConf) + + broadcastConf := &broadcastConfig{DaemonAddress: defaultListen} + parser.AddCommand(broadcastSubCmd, "Broadcast the given transaction", + "Broadcast the given transaction", broadcastConf) + + parseConf := &parseConfig{} + parser.AddCommand(parseSubCmd, "Parse the given transaction and print its contents", + "Parse the given transaction and print its contents", parseConf) + + showAddressesConf := &showAddressesConfig{DaemonAddress: defaultListen} + parser.AddCommand(showAddressesSubCmd, "Shows all generated public addresses of the current wallet", + "Shows all generated public addresses of the current wallet", showAddressesConf) + + newAddressConf := &newAddressConfig{DaemonAddress: defaultListen} + parser.AddCommand(newAddressSubCmd, "Generates new public address of the current wallet and shows it", + "Generates new public address of the current wallet and shows it", newAddressConf) + + dumpUnencryptedDataConf := &dumpUnencryptedDataConfig{} + parser.AddCommand(dumpUnencryptedDataSubCmd, "Prints the unencrypted wallet data", + "Prints the unencrypted wallet data including its private keys. Anyone that sees it can access "+ + "the funds. Use only on safe environment.", dumpUnencryptedDataConf) + + startDaemonConf := &startDaemonConfig{ + RPCServer: defaultRPCServer, + Listen: defaultListen, + } + parser.AddCommand(startDaemonSubCmd, "Start the wallet daemon", "Start the wallet daemon", startDaemonConf) + + _, err := parser.Parse() + if err != nil { + var flagsErr *flags.Error + if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp { + os.Exit(0) + } else { + os.Exit(1) + } + return "", nil + } + + switch parser.Command.Active.Name { + case createSubCmd: + combineNetworkFlags(&createConf.NetworkFlags, &cfg.NetworkFlags) + err := createConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = createConf + case balanceSubCmd: + combineNetworkFlags(&balanceConf.NetworkFlags, &cfg.NetworkFlags) + err := balanceConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = balanceConf + case sendSubCmd: + combineNetworkFlags(&sendConf.NetworkFlags, &cfg.NetworkFlags) + err := sendConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + err = validateSendConfig(sendConf) + if err != nil { + printErrorAndExit(err) + } + config = sendConf + case sweepSubCmd: + combineNetworkFlags(&sweepConf.NetworkFlags, &cfg.NetworkFlags) + err := sweepConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = sweepConf + case createUnsignedTransactionSubCmd: + combineNetworkFlags(&createUnsignedTransactionConf.NetworkFlags, &cfg.NetworkFlags) + err := createUnsignedTransactionConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + err = validateCreateUnsignedTransactionConf(createUnsignedTransactionConf) + if err != nil { + printErrorAndExit(err) + } + config = createUnsignedTransactionConf + case signSubCmd: + combineNetworkFlags(&signConf.NetworkFlags, &cfg.NetworkFlags) + err := signConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = signConf + case broadcastSubCmd: + combineNetworkFlags(&broadcastConf.NetworkFlags, &cfg.NetworkFlags) + err := broadcastConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = broadcastConf + case parseSubCmd: + combineNetworkFlags(&parseConf.NetworkFlags, &cfg.NetworkFlags) + err := parseConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = parseConf + case showAddressesSubCmd: + combineNetworkFlags(&showAddressesConf.NetworkFlags, &cfg.NetworkFlags) + err := showAddressesConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = showAddressesConf + case newAddressSubCmd: + combineNetworkFlags(&newAddressConf.NetworkFlags, &cfg.NetworkFlags) + err := newAddressConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = newAddressConf + case dumpUnencryptedDataSubCmd: + combineNetworkFlags(&dumpUnencryptedDataConf.NetworkFlags, &cfg.NetworkFlags) + err := dumpUnencryptedDataConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = dumpUnencryptedDataConf + case startDaemonSubCmd: + combineNetworkFlags(&startDaemonConf.NetworkFlags, &cfg.NetworkFlags) + err := startDaemonConf.ResolveNetwork(parser) + if err != nil { + printErrorAndExit(err) + } + config = startDaemonConf + } + + return parser.Command.Active.Name, config +} + +func validateCreateUnsignedTransactionConf(conf *createUnsignedTransactionConfig) error { + if (!conf.IsSendAll && conf.SendAmount == "") || + (conf.IsSendAll && conf.SendAmount != "") { + + return errors.New("exactly one of '--send-amount' or '--all' must be specified") + } + return nil +} + +func validateSendConfig(conf *sendConfig) error { + if (!conf.IsSendAll && conf.SendAmount == "") || + (conf.IsSendAll && conf.SendAmount != "") { + + return errors.New("exactly one of '--send-amount' or '--all' must be specified") + } + return nil +} + +func combineNetworkFlags(dst, src *config.NetworkFlags) { + dst.Testnet = dst.Testnet || src.Testnet + dst.Simnet = dst.Simnet || src.Simnet + dst.Devnet = dst.Devnet || src.Devnet + if dst.OverrideDAGParamsFile == "" { + dst.OverrideDAGParamsFile = src.OverrideDAGParamsFile + } +} diff --git a/cmd/spectrewallet/create.go b/cmd/spectrewallet/create.go new file mode 100644 index 0000000..b9468a3 --- /dev/null +++ b/cmd/spectrewallet/create.go @@ -0,0 +1,93 @@ +package main + +import ( + "bufio" + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" + + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" +) + +func create(conf *createConfig) error { + var encryptedMnemonics []*keys.EncryptedMnemonic + var signerExtendedPublicKeys []string + var err error + isMultisig := conf.NumPublicKeys > 1 + if !conf.Import { + encryptedMnemonics, signerExtendedPublicKeys, err = keys.CreateMnemonics(conf.NetParams(), conf.NumPrivateKeys, conf.Password, isMultisig) + } else { + encryptedMnemonics, signerExtendedPublicKeys, err = keys.ImportMnemonics(conf.NetParams(), conf.NumPrivateKeys, conf.Password, isMultisig) + } + if err != nil { + return err + } + + for i, extendedPublicKey := range signerExtendedPublicKeys { + fmt.Printf("Extended public key of mnemonic #%d:\n%s\n\n", i+1, extendedPublicKey) + } + + fmt.Printf("Notice the above is neither a secret key to your wallet " + + "(use \"spectrewallet dump-unencrypted-data\" to see a secret seed phrase) " + + "nor a wallet public address (use \"spectrewallet new-address\" to create and see one)\n\n") + + extendedPublicKeys := make([]string, conf.NumPrivateKeys, conf.NumPublicKeys) + copy(extendedPublicKeys, signerExtendedPublicKeys) + reader := bufio.NewReader(os.Stdin) + for i := conf.NumPrivateKeys; i < conf.NumPublicKeys; i++ { + fmt.Printf("Enter public key #%d here:\n", i+1) + extendedPublicKey, err := utils.ReadLine(reader) + if err != nil { + return err + } + + _, err = bip32.DeserializeExtendedKey(string(extendedPublicKey)) + if err != nil { + return errors.Wrapf(err, "%s is invalid extended public key", string(extendedPublicKey)) + } + + fmt.Println() + + extendedPublicKeys = append(extendedPublicKeys, string(extendedPublicKey)) + } + + // For a read only wallet the cosigner index is 0 + cosignerIndex := uint32(0) + if len(signerExtendedPublicKeys) > 0 { + cosignerIndex, err = libspectrewallet.MinimumCosignerIndex(signerExtendedPublicKeys, extendedPublicKeys) + if err != nil { + return err + } + } + + file := keys.File{ + Version: keys.LastVersion, + EncryptedMnemonics: encryptedMnemonics, + ExtendedPublicKeys: extendedPublicKeys, + MinimumSignatures: conf.MinimumSignatures, + CosignerIndex: cosignerIndex, + ECDSA: conf.ECDSA, + } + + err = file.SetPath(conf.NetParams(), conf.KeysFile, conf.Yes) + if err != nil { + return err + } + + err = file.TryLock() + if err != nil { + return err + } + + err = file.Save() + if err != nil { + return err + } + + fmt.Printf("Wrote the keys into %s\n", file.Path()) + return nil +} diff --git a/cmd/spectrewallet/create_unsigned_tx.go b/cmd/spectrewallet/create_unsigned_tx.go new file mode 100644 index 0000000..9d2be6a --- /dev/null +++ b/cmd/spectrewallet/create_unsigned_tx.go @@ -0,0 +1,44 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" +) + +func createUnsignedTransaction(conf *createUnsignedTransactionConfig) error { + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + sendAmountSompi, err := utils.SprToSompi(conf.SendAmount) + + if err != nil { + return err + } + + response, err := daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{ + From: conf.FromAddresses, + Address: conf.ToAddress, + Amount: sendAmountSompi, + IsSendAll: conf.IsSendAll, + UseExistingChangeAddress: conf.UseExistingChangeAddress, + }) + if err != nil { + return err + } + + fmt.Fprintln(os.Stderr, "Created unsigned transaction") + fmt.Println(encodeTransactionsToHex(response.UnsignedTransactions)) + + return nil +} diff --git a/cmd/spectrewallet/daemon/client/client.go b/cmd/spectrewallet/daemon/client/client.go new file mode 100644 index 0000000..a37d788 --- /dev/null +++ b/cmd/spectrewallet/daemon/client/client.go @@ -0,0 +1,32 @@ +package client + +import ( + "context" + "time" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/server" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "google.golang.org/grpc" +) + +// Connect connects to the spectrewalletd server, and returns the client instance +func Connect(address string) (pb.SpectrewalletdClient, func(), error) { + // Connection is local, so 1 second timeout is sufficient + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + conn, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(server.MaxDaemonSendMsgSize))) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return nil, nil, errors.New("spectrewallet daemon is not running, start it with `spectrewallet start-daemon`") + } + return nil, nil, err + } + + return pb.NewSpectrewalletdClient(conn), func() { + conn.Close() + }, nil +} diff --git a/cmd/spectrewallet/daemon/pb/generate.go b/cmd/spectrewallet/daemon/pb/generate.go new file mode 100644 index 0000000..f357210 --- /dev/null +++ b/cmd/spectrewallet/daemon/pb/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative spectrewalletd.proto + +package pb diff --git a/cmd/spectrewallet/daemon/pb/spectrewalletd.pb.go b/cmd/spectrewallet/daemon/pb/spectrewalletd.pb.go new file mode 100644 index 0000000..2e92b05 --- /dev/null +++ b/cmd/spectrewallet/daemon/pb/spectrewalletd.pb.go @@ -0,0 +1,1806 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: spectrewalletd.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetBalanceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetBalanceRequest) Reset() { + *x = GetBalanceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalanceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalanceRequest) ProtoMessage() {} + +func (x *GetBalanceRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalanceRequest.ProtoReflect.Descriptor instead. +func (*GetBalanceRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{0} +} + +type GetBalanceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Available uint64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"` + Pending uint64 `protobuf:"varint,2,opt,name=pending,proto3" json:"pending,omitempty"` + AddressBalances []*AddressBalances `protobuf:"bytes,3,rep,name=addressBalances,proto3" json:"addressBalances,omitempty"` +} + +func (x *GetBalanceResponse) Reset() { + *x = GetBalanceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalanceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalanceResponse) ProtoMessage() {} + +func (x *GetBalanceResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalanceResponse.ProtoReflect.Descriptor instead. +func (*GetBalanceResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{1} +} + +func (x *GetBalanceResponse) GetAvailable() uint64 { + if x != nil { + return x.Available + } + return 0 +} + +func (x *GetBalanceResponse) GetPending() uint64 { + if x != nil { + return x.Pending + } + return 0 +} + +func (x *GetBalanceResponse) GetAddressBalances() []*AddressBalances { + if x != nil { + return x.AddressBalances + } + return nil +} + +type AddressBalances struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Available uint64 `protobuf:"varint,2,opt,name=available,proto3" json:"available,omitempty"` + Pending uint64 `protobuf:"varint,3,opt,name=pending,proto3" json:"pending,omitempty"` +} + +func (x *AddressBalances) Reset() { + *x = AddressBalances{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddressBalances) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddressBalances) ProtoMessage() {} + +func (x *AddressBalances) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddressBalances.ProtoReflect.Descriptor instead. +func (*AddressBalances) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{2} +} + +func (x *AddressBalances) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *AddressBalances) GetAvailable() uint64 { + if x != nil { + return x.Available + } + return 0 +} + +func (x *AddressBalances) GetPending() uint64 { + if x != nil { + return x.Pending + } + return 0 +} + +type CreateUnsignedTransactionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + From []string `protobuf:"bytes,3,rep,name=from,proto3" json:"from,omitempty"` + UseExistingChangeAddress bool `protobuf:"varint,4,opt,name=useExistingChangeAddress,proto3" json:"useExistingChangeAddress,omitempty"` + IsSendAll bool `protobuf:"varint,5,opt,name=isSendAll,proto3" json:"isSendAll,omitempty"` +} + +func (x *CreateUnsignedTransactionsRequest) Reset() { + *x = CreateUnsignedTransactionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUnsignedTransactionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUnsignedTransactionsRequest) ProtoMessage() {} + +func (x *CreateUnsignedTransactionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUnsignedTransactionsRequest.ProtoReflect.Descriptor instead. +func (*CreateUnsignedTransactionsRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateUnsignedTransactionsRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *CreateUnsignedTransactionsRequest) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *CreateUnsignedTransactionsRequest) GetFrom() []string { + if x != nil { + return x.From + } + return nil +} + +func (x *CreateUnsignedTransactionsRequest) GetUseExistingChangeAddress() bool { + if x != nil { + return x.UseExistingChangeAddress + } + return false +} + +func (x *CreateUnsignedTransactionsRequest) GetIsSendAll() bool { + if x != nil { + return x.IsSendAll + } + return false +} + +type CreateUnsignedTransactionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UnsignedTransactions [][]byte `protobuf:"bytes,1,rep,name=unsignedTransactions,proto3" json:"unsignedTransactions,omitempty"` +} + +func (x *CreateUnsignedTransactionsResponse) Reset() { + *x = CreateUnsignedTransactionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUnsignedTransactionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUnsignedTransactionsResponse) ProtoMessage() {} + +func (x *CreateUnsignedTransactionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUnsignedTransactionsResponse.ProtoReflect.Descriptor instead. +func (*CreateUnsignedTransactionsResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateUnsignedTransactionsResponse) GetUnsignedTransactions() [][]byte { + if x != nil { + return x.UnsignedTransactions + } + return nil +} + +type ShowAddressesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShowAddressesRequest) Reset() { + *x = ShowAddressesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShowAddressesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShowAddressesRequest) ProtoMessage() {} + +func (x *ShowAddressesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShowAddressesRequest.ProtoReflect.Descriptor instead. +func (*ShowAddressesRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{5} +} + +type ShowAddressesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address []string `protobuf:"bytes,1,rep,name=address,proto3" json:"address,omitempty"` +} + +func (x *ShowAddressesResponse) Reset() { + *x = ShowAddressesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShowAddressesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShowAddressesResponse) ProtoMessage() {} + +func (x *ShowAddressesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShowAddressesResponse.ProtoReflect.Descriptor instead. +func (*ShowAddressesResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{6} +} + +func (x *ShowAddressesResponse) GetAddress() []string { + if x != nil { + return x.Address + } + return nil +} + +type NewAddressRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NewAddressRequest) Reset() { + *x = NewAddressRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewAddressRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewAddressRequest) ProtoMessage() {} + +func (x *NewAddressRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewAddressRequest.ProtoReflect.Descriptor instead. +func (*NewAddressRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{7} +} + +type NewAddressResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *NewAddressResponse) Reset() { + *x = NewAddressResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewAddressResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewAddressResponse) ProtoMessage() {} + +func (x *NewAddressResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewAddressResponse.ProtoReflect.Descriptor instead. +func (*NewAddressResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{8} +} + +func (x *NewAddressResponse) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type BroadcastRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsDomain bool `protobuf:"varint,1,opt,name=isDomain,proto3" json:"isDomain,omitempty"` + Transactions [][]byte `protobuf:"bytes,2,rep,name=transactions,proto3" json:"transactions,omitempty"` +} + +func (x *BroadcastRequest) Reset() { + *x = BroadcastRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BroadcastRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BroadcastRequest) ProtoMessage() {} + +func (x *BroadcastRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BroadcastRequest.ProtoReflect.Descriptor instead. +func (*BroadcastRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{9} +} + +func (x *BroadcastRequest) GetIsDomain() bool { + if x != nil { + return x.IsDomain + } + return false +} + +func (x *BroadcastRequest) GetTransactions() [][]byte { + if x != nil { + return x.Transactions + } + return nil +} + +type BroadcastResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxIDs []string `protobuf:"bytes,1,rep,name=txIDs,proto3" json:"txIDs,omitempty"` +} + +func (x *BroadcastResponse) Reset() { + *x = BroadcastResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BroadcastResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BroadcastResponse) ProtoMessage() {} + +func (x *BroadcastResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BroadcastResponse.ProtoReflect.Descriptor instead. +func (*BroadcastResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{10} +} + +func (x *BroadcastResponse) GetTxIDs() []string { + if x != nil { + return x.TxIDs + } + return nil +} + +type ShutdownRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShutdownRequest) Reset() { + *x = ShutdownRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownRequest) ProtoMessage() {} + +func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{11} +} + +type ShutdownResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShutdownResponse) Reset() { + *x = ShutdownResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownResponse) ProtoMessage() {} + +func (x *ShutdownResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownResponse.ProtoReflect.Descriptor instead. +func (*ShutdownResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{12} +} + +type Outpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId string `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *Outpoint) Reset() { + *x = Outpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Outpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Outpoint) ProtoMessage() {} + +func (x *Outpoint) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Outpoint.ProtoReflect.Descriptor instead. +func (*Outpoint) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{13} +} + +func (x *Outpoint) GetTransactionId() string { + if x != nil { + return x.TransactionId + } + return "" +} + +func (x *Outpoint) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +type UtxosByAddressesEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Outpoint *Outpoint `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"` + UtxoEntry *UtxoEntry `protobuf:"bytes,3,opt,name=utxoEntry,proto3" json:"utxoEntry,omitempty"` +} + +func (x *UtxosByAddressesEntry) Reset() { + *x = UtxosByAddressesEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UtxosByAddressesEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UtxosByAddressesEntry) ProtoMessage() {} + +func (x *UtxosByAddressesEntry) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UtxosByAddressesEntry.ProtoReflect.Descriptor instead. +func (*UtxosByAddressesEntry) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{14} +} + +func (x *UtxosByAddressesEntry) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *UtxosByAddressesEntry) GetOutpoint() *Outpoint { + if x != nil { + return x.Outpoint + } + return nil +} + +func (x *UtxosByAddressesEntry) GetUtxoEntry() *UtxoEntry { + if x != nil { + return x.UtxoEntry + } + return nil +} + +type ScriptPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + ScriptPublicKey string `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` +} + +func (x *ScriptPublicKey) Reset() { + *x = ScriptPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScriptPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScriptPublicKey) ProtoMessage() {} + +func (x *ScriptPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScriptPublicKey.ProtoReflect.Descriptor instead. +func (*ScriptPublicKey) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{15} +} + +func (x *ScriptPublicKey) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ScriptPublicKey) GetScriptPublicKey() string { + if x != nil { + return x.ScriptPublicKey + } + return "" +} + +type UtxoEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + ScriptPublicKey *ScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` + BlockDaaScore uint64 `protobuf:"varint,3,opt,name=blockDaaScore,proto3" json:"blockDaaScore,omitempty"` + IsCoinbase bool `protobuf:"varint,4,opt,name=isCoinbase,proto3" json:"isCoinbase,omitempty"` +} + +func (x *UtxoEntry) Reset() { + *x = UtxoEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UtxoEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UtxoEntry) ProtoMessage() {} + +func (x *UtxoEntry) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UtxoEntry.ProtoReflect.Descriptor instead. +func (*UtxoEntry) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{16} +} + +func (x *UtxoEntry) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *UtxoEntry) GetScriptPublicKey() *ScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +func (x *UtxoEntry) GetBlockDaaScore() uint64 { + if x != nil { + return x.BlockDaaScore + } + return 0 +} + +func (x *UtxoEntry) GetIsCoinbase() bool { + if x != nil { + return x.IsCoinbase + } + return false +} + +type GetExternalSpendableUTXOsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *GetExternalSpendableUTXOsRequest) Reset() { + *x = GetExternalSpendableUTXOsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExternalSpendableUTXOsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExternalSpendableUTXOsRequest) ProtoMessage() {} + +func (x *GetExternalSpendableUTXOsRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExternalSpendableUTXOsRequest.ProtoReflect.Descriptor instead. +func (*GetExternalSpendableUTXOsRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{17} +} + +func (x *GetExternalSpendableUTXOsRequest) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type GetExternalSpendableUTXOsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*UtxosByAddressesEntry `protobuf:"bytes,1,rep,name=Entries,proto3" json:"Entries,omitempty"` +} + +func (x *GetExternalSpendableUTXOsResponse) Reset() { + *x = GetExternalSpendableUTXOsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetExternalSpendableUTXOsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetExternalSpendableUTXOsResponse) ProtoMessage() {} + +func (x *GetExternalSpendableUTXOsResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetExternalSpendableUTXOsResponse.ProtoReflect.Descriptor instead. +func (*GetExternalSpendableUTXOsResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{18} +} + +func (x *GetExternalSpendableUTXOsResponse) GetEntries() []*UtxosByAddressesEntry { + if x != nil { + return x.Entries + } + return nil +} + +// Since SendRequest contains a password - this command should only be used on a trusted or secure connection +type SendRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ToAddress string `protobuf:"bytes,1,opt,name=toAddress,proto3" json:"toAddress,omitempty"` + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` + From []string `protobuf:"bytes,4,rep,name=from,proto3" json:"from,omitempty"` + UseExistingChangeAddress bool `protobuf:"varint,5,opt,name=useExistingChangeAddress,proto3" json:"useExistingChangeAddress,omitempty"` + IsSendAll bool `protobuf:"varint,6,opt,name=isSendAll,proto3" json:"isSendAll,omitempty"` +} + +func (x *SendRequest) Reset() { + *x = SendRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendRequest) ProtoMessage() {} + +func (x *SendRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendRequest.ProtoReflect.Descriptor instead. +func (*SendRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{19} +} + +func (x *SendRequest) GetToAddress() string { + if x != nil { + return x.ToAddress + } + return "" +} + +func (x *SendRequest) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *SendRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *SendRequest) GetFrom() []string { + if x != nil { + return x.From + } + return nil +} + +func (x *SendRequest) GetUseExistingChangeAddress() bool { + if x != nil { + return x.UseExistingChangeAddress + } + return false +} + +func (x *SendRequest) GetIsSendAll() bool { + if x != nil { + return x.IsSendAll + } + return false +} + +type SendResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxIDs []string `protobuf:"bytes,1,rep,name=txIDs,proto3" json:"txIDs,omitempty"` + SignedTransactions [][]byte `protobuf:"bytes,2,rep,name=signedTransactions,proto3" json:"signedTransactions,omitempty"` +} + +func (x *SendResponse) Reset() { + *x = SendResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendResponse) ProtoMessage() {} + +func (x *SendResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendResponse.ProtoReflect.Descriptor instead. +func (*SendResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{20} +} + +func (x *SendResponse) GetTxIDs() []string { + if x != nil { + return x.TxIDs + } + return nil +} + +func (x *SendResponse) GetSignedTransactions() [][]byte { + if x != nil { + return x.SignedTransactions + } + return nil +} + +// Since SignRequest contains a password - this command should only be used on a trusted or secure connection +type SignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UnsignedTransactions [][]byte `protobuf:"bytes,1,rep,name=unsignedTransactions,proto3" json:"unsignedTransactions,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *SignRequest) Reset() { + *x = SignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignRequest) ProtoMessage() {} + +func (x *SignRequest) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignRequest.ProtoReflect.Descriptor instead. +func (*SignRequest) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{21} +} + +func (x *SignRequest) GetUnsignedTransactions() [][]byte { + if x != nil { + return x.UnsignedTransactions + } + return nil +} + +func (x *SignRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type SignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SignedTransactions [][]byte `protobuf:"bytes,1,rep,name=signedTransactions,proto3" json:"signedTransactions,omitempty"` +} + +func (x *SignResponse) Reset() { + *x = SignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spectrewalletd_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignResponse) ProtoMessage() {} + +func (x *SignResponse) ProtoReflect() protoreflect.Message { + mi := &file_spectrewalletd_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignResponse.ProtoReflect.Descriptor instead. +func (*SignResponse) Descriptor() ([]byte, []int) { + return file_spectrewalletd_proto_rawDescGZIP(), []int{22} +} + +func (x *SignResponse) GetSignedTransactions() [][]byte { + if x != nil { + return x.SignedTransactions + } + return nil +} + +var File_spectrewalletd_proto protoreflect.FileDescriptor + +var file_spectrewalletd_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, + 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x97, 0x01, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x63, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x22, 0xc3, 0x01, 0x0a, 0x21, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, + 0x22, 0x58, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, 0x68, + 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x31, 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x13, 0x0a, 0x11, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x12, 0x4e, 0x65, + 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x52, 0x0a, 0x10, 0x42, 0x72, + 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x69, 0x73, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x29, + 0x0a, 0x11, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x22, 0x11, 0x0a, 0x0f, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x12, 0x0a, 0x10, + 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x46, 0x0a, 0x08, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x55, 0x74, 0x78, + 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x34, 0x0a, 0x08, + 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, + 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, + 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x55, 0x0a, 0x0f, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x22, 0xb4, 0x01, 0x0a, 0x09, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, + 0x74, 0x64, 0x2e, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, + 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, + 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, + 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x22, 0x3c, 0x0a, 0x20, 0x47, 0x65, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x64, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, + 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x07, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x55, + 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0xcd, 0x01, + 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x22, 0x54, 0x0a, + 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x78, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x74, 0x78, + 0x49, 0x44, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x5d, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x32, 0x0a, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x14, 0x75, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, + 0x72, 0x64, 0x22, 0x3e, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x12, + 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x32, 0xda, 0x06, 0x0a, 0x0e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, + 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x12, 0x55, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, + 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x82, 0x01, 0x0a, + 0x19, 0x47, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, + 0x64, 0x61, 0x62, 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x12, 0x30, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, + 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x47, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, + 0x6c, 0x65, 0x55, 0x54, 0x58, 0x4f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x85, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x31, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, + 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, + 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0d, 0x53, 0x68, 0x6f, + 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, 0x6f, 0x77, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, + 0x64, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0a, 0x4e, 0x65, 0x77, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, + 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4e, 0x65, 0x77, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x4e, 0x65, 0x77, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4f, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x1f, 0x2e, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x52, 0x0a, 0x09, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x12, 0x20, + 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, + 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, + 0x64, 0x2e, 0x42, 0x72, 0x6f, 0x61, 0x64, 0x63, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x1b, 0x2e, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, + 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, 0x2e, 0x53, 0x65, 0x6e, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x04, 0x53, 0x69, + 0x67, 0x6e, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, + 0x65, 0x74, 0x64, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1c, 0x2e, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x64, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, + 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, 0x70, + 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x63, 0x6d, 0x64, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2f, + 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spectrewalletd_proto_rawDescOnce sync.Once + file_spectrewalletd_proto_rawDescData = file_spectrewalletd_proto_rawDesc +) + +func file_spectrewalletd_proto_rawDescGZIP() []byte { + file_spectrewalletd_proto_rawDescOnce.Do(func() { + file_spectrewalletd_proto_rawDescData = protoimpl.X.CompressGZIP(file_spectrewalletd_proto_rawDescData) + }) + return file_spectrewalletd_proto_rawDescData +} + +var file_spectrewalletd_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_spectrewalletd_proto_goTypes = []interface{}{ + (*GetBalanceRequest)(nil), // 0: spectrewalletd.GetBalanceRequest + (*GetBalanceResponse)(nil), // 1: spectrewalletd.GetBalanceResponse + (*AddressBalances)(nil), // 2: spectrewalletd.AddressBalances + (*CreateUnsignedTransactionsRequest)(nil), // 3: spectrewalletd.CreateUnsignedTransactionsRequest + (*CreateUnsignedTransactionsResponse)(nil), // 4: spectrewalletd.CreateUnsignedTransactionsResponse + (*ShowAddressesRequest)(nil), // 5: spectrewalletd.ShowAddressesRequest + (*ShowAddressesResponse)(nil), // 6: spectrewalletd.ShowAddressesResponse + (*NewAddressRequest)(nil), // 7: spectrewalletd.NewAddressRequest + (*NewAddressResponse)(nil), // 8: spectrewalletd.NewAddressResponse + (*BroadcastRequest)(nil), // 9: spectrewalletd.BroadcastRequest + (*BroadcastResponse)(nil), // 10: spectrewalletd.BroadcastResponse + (*ShutdownRequest)(nil), // 11: spectrewalletd.ShutdownRequest + (*ShutdownResponse)(nil), // 12: spectrewalletd.ShutdownResponse + (*Outpoint)(nil), // 13: spectrewalletd.Outpoint + (*UtxosByAddressesEntry)(nil), // 14: spectrewalletd.UtxosByAddressesEntry + (*ScriptPublicKey)(nil), // 15: spectrewalletd.ScriptPublicKey + (*UtxoEntry)(nil), // 16: spectrewalletd.UtxoEntry + (*GetExternalSpendableUTXOsRequest)(nil), // 17: spectrewalletd.GetExternalSpendableUTXOsRequest + (*GetExternalSpendableUTXOsResponse)(nil), // 18: spectrewalletd.GetExternalSpendableUTXOsResponse + (*SendRequest)(nil), // 19: spectrewalletd.SendRequest + (*SendResponse)(nil), // 20: spectrewalletd.SendResponse + (*SignRequest)(nil), // 21: spectrewalletd.SignRequest + (*SignResponse)(nil), // 22: spectrewalletd.SignResponse +} +var file_spectrewalletd_proto_depIdxs = []int32{ + 2, // 0: spectrewalletd.GetBalanceResponse.addressBalances:type_name -> spectrewalletd.AddressBalances + 13, // 1: spectrewalletd.UtxosByAddressesEntry.outpoint:type_name -> spectrewalletd.Outpoint + 16, // 2: spectrewalletd.UtxosByAddressesEntry.utxoEntry:type_name -> spectrewalletd.UtxoEntry + 15, // 3: spectrewalletd.UtxoEntry.scriptPublicKey:type_name -> spectrewalletd.ScriptPublicKey + 14, // 4: spectrewalletd.GetExternalSpendableUTXOsResponse.Entries:type_name -> spectrewalletd.UtxosByAddressesEntry + 0, // 5: spectrewalletd.spectrewalletd.GetBalance:input_type -> spectrewalletd.GetBalanceRequest + 17, // 6: spectrewalletd.spectrewalletd.GetExternalSpendableUTXOs:input_type -> spectrewalletd.GetExternalSpendableUTXOsRequest + 3, // 7: spectrewalletd.spectrewalletd.CreateUnsignedTransactions:input_type -> spectrewalletd.CreateUnsignedTransactionsRequest + 5, // 8: spectrewalletd.spectrewalletd.ShowAddresses:input_type -> spectrewalletd.ShowAddressesRequest + 7, // 9: spectrewalletd.spectrewalletd.NewAddress:input_type -> spectrewalletd.NewAddressRequest + 11, // 10: spectrewalletd.spectrewalletd.Shutdown:input_type -> spectrewalletd.ShutdownRequest + 9, // 11: spectrewalletd.spectrewalletd.Broadcast:input_type -> spectrewalletd.BroadcastRequest + 19, // 12: spectrewalletd.spectrewalletd.Send:input_type -> spectrewalletd.SendRequest + 21, // 13: spectrewalletd.spectrewalletd.Sign:input_type -> spectrewalletd.SignRequest + 1, // 14: spectrewalletd.spectrewalletd.GetBalance:output_type -> spectrewalletd.GetBalanceResponse + 18, // 15: spectrewalletd.spectrewalletd.GetExternalSpendableUTXOs:output_type -> spectrewalletd.GetExternalSpendableUTXOsResponse + 4, // 16: spectrewalletd.spectrewalletd.CreateUnsignedTransactions:output_type -> spectrewalletd.CreateUnsignedTransactionsResponse + 6, // 17: spectrewalletd.spectrewalletd.ShowAddresses:output_type -> spectrewalletd.ShowAddressesResponse + 8, // 18: spectrewalletd.spectrewalletd.NewAddress:output_type -> spectrewalletd.NewAddressResponse + 12, // 19: spectrewalletd.spectrewalletd.Shutdown:output_type -> spectrewalletd.ShutdownResponse + 10, // 20: spectrewalletd.spectrewalletd.Broadcast:output_type -> spectrewalletd.BroadcastResponse + 20, // 21: spectrewalletd.spectrewalletd.Send:output_type -> spectrewalletd.SendResponse + 22, // 22: spectrewalletd.spectrewalletd.Sign:output_type -> spectrewalletd.SignResponse + 14, // [14:23] is the sub-list for method output_type + 5, // [5:14] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_spectrewalletd_proto_init() } +func file_spectrewalletd_proto_init() { + if File_spectrewalletd_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spectrewalletd_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalanceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalanceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddressBalances); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUnsignedTransactionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUnsignedTransactionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowAddressesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShowAddressesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewAddressRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewAddressResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BroadcastRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BroadcastResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Outpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UtxosByAddressesEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScriptPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UtxoEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExternalSpendableUTXOsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetExternalSpendableUTXOsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spectrewalletd_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spectrewalletd_proto_rawDesc, + NumEnums: 0, + NumMessages: 23, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_spectrewalletd_proto_goTypes, + DependencyIndexes: file_spectrewalletd_proto_depIdxs, + MessageInfos: file_spectrewalletd_proto_msgTypes, + }.Build() + File_spectrewalletd_proto = out.File + file_spectrewalletd_proto_rawDesc = nil + file_spectrewalletd_proto_goTypes = nil + file_spectrewalletd_proto_depIdxs = nil +} diff --git a/cmd/spectrewallet/daemon/pb/spectrewalletd.proto b/cmd/spectrewallet/daemon/pb/spectrewalletd.proto new file mode 100644 index 0000000..4bbf08d --- /dev/null +++ b/cmd/spectrewallet/daemon/pb/spectrewalletd.proto @@ -0,0 +1,129 @@ +syntax = "proto3"; + +option go_package = "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb"; +package spectrewalletd; + +service spectrewalletd { + rpc GetBalance (GetBalanceRequest) returns (GetBalanceResponse) {} + rpc GetExternalSpendableUTXOs (GetExternalSpendableUTXOsRequest) returns (GetExternalSpendableUTXOsResponse) {} + rpc CreateUnsignedTransactions (CreateUnsignedTransactionsRequest) returns (CreateUnsignedTransactionsResponse) {} + rpc ShowAddresses (ShowAddressesRequest) returns (ShowAddressesResponse) {} + rpc NewAddress (NewAddressRequest) returns (NewAddressResponse) {} + rpc Shutdown (ShutdownRequest) returns (ShutdownResponse) {} + rpc Broadcast (BroadcastRequest) returns (BroadcastResponse) {} + // Since SendRequest contains a password - this command should only be used on a trusted or secure connection + rpc Send(SendRequest) returns (SendResponse) {} + // Since SignRequest contains a password - this command should only be used on a trusted or secure connection + rpc Sign(SignRequest) returns (SignResponse) {} +} + +message GetBalanceRequest { +} + +message GetBalanceResponse { + uint64 available = 1; + uint64 pending = 2; + repeated AddressBalances addressBalances = 3; +} + +message AddressBalances { + string address = 1; + uint64 available = 2; + uint64 pending = 3; +} + +message CreateUnsignedTransactionsRequest { + string address = 1; + uint64 amount = 2; + repeated string from = 3; + bool useExistingChangeAddress = 4; + bool isSendAll = 5; +} + +message CreateUnsignedTransactionsResponse { + repeated bytes unsignedTransactions = 1; +} + +message ShowAddressesRequest { +} + +message ShowAddressesResponse { + repeated string address = 1; +} + +message NewAddressRequest { +} + +message NewAddressResponse { + string address = 1; +} + +message BroadcastRequest { + bool isDomain = 1; + repeated bytes transactions = 2; +} + +message BroadcastResponse { + repeated string txIDs = 1; +} + +message ShutdownRequest { +} + +message ShutdownResponse { +} + +message Outpoint { + string transactionId = 1; + uint32 index = 2; +} + +message UtxosByAddressesEntry { + string address = 1; + Outpoint outpoint = 2; + UtxoEntry utxoEntry = 3; +} + +message ScriptPublicKey { + uint32 version = 1; + string scriptPublicKey = 2; +} + +message UtxoEntry { + uint64 amount = 1; + ScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; +} + +message GetExternalSpendableUTXOsRequest{ + string address = 1; +} + +message GetExternalSpendableUTXOsResponse{ + repeated UtxosByAddressesEntry Entries = 1; +} +// Since SendRequest contains a password - this command should only be used on a trusted or secure connection +message SendRequest{ + string toAddress = 1; + uint64 amount = 2; + string password = 3; + repeated string from = 4; + bool useExistingChangeAddress = 5; + bool isSendAll = 6; +} + +message SendResponse{ + repeated string txIDs = 1; + repeated bytes signedTransactions = 2; +} + +// Since SignRequest contains a password - this command should only be used on a trusted or secure connection +message SignRequest{ + repeated bytes unsignedTransactions = 1; + string password = 2; +} + +message SignResponse{ + repeated bytes signedTransactions = 1; +} diff --git a/cmd/spectrewallet/daemon/pb/spectrewalletd_grpc.pb.go b/cmd/spectrewallet/daemon/pb/spectrewalletd_grpc.pb.go new file mode 100644 index 0000000..665c62d --- /dev/null +++ b/cmd/spectrewallet/daemon/pb/spectrewalletd_grpc.pb.go @@ -0,0 +1,409 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.3 +// source: spectrewalletd.proto + +package pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Spectrewalletd_GetBalance_FullMethodName = "/spectrewalletd.spectrewalletd/GetBalance" + Spectrewalletd_GetExternalSpendableUTXOs_FullMethodName = "/spectrewalletd.spectrewalletd/GetExternalSpendableUTXOs" + Spectrewalletd_CreateUnsignedTransactions_FullMethodName = "/spectrewalletd.spectrewalletd/CreateUnsignedTransactions" + Spectrewalletd_ShowAddresses_FullMethodName = "/spectrewalletd.spectrewalletd/ShowAddresses" + Spectrewalletd_NewAddress_FullMethodName = "/spectrewalletd.spectrewalletd/NewAddress" + Spectrewalletd_Shutdown_FullMethodName = "/spectrewalletd.spectrewalletd/Shutdown" + Spectrewalletd_Broadcast_FullMethodName = "/spectrewalletd.spectrewalletd/Broadcast" + Spectrewalletd_Send_FullMethodName = "/spectrewalletd.spectrewalletd/Send" + Spectrewalletd_Sign_FullMethodName = "/spectrewalletd.spectrewalletd/Sign" +) + +// SpectrewalletdClient is the client API for Spectrewalletd service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SpectrewalletdClient interface { + GetBalance(ctx context.Context, in *GetBalanceRequest, opts ...grpc.CallOption) (*GetBalanceResponse, error) + GetExternalSpendableUTXOs(ctx context.Context, in *GetExternalSpendableUTXOsRequest, opts ...grpc.CallOption) (*GetExternalSpendableUTXOsResponse, error) + CreateUnsignedTransactions(ctx context.Context, in *CreateUnsignedTransactionsRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionsResponse, error) + ShowAddresses(ctx context.Context, in *ShowAddressesRequest, opts ...grpc.CallOption) (*ShowAddressesResponse, error) + NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error) + Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) + Broadcast(ctx context.Context, in *BroadcastRequest, opts ...grpc.CallOption) (*BroadcastResponse, error) + // Since SendRequest contains a password - this command should only be used on a trusted or secure connection + Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) + // Since SignRequest contains a password - this command should only be used on a trusted or secure connection + Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) +} + +type spectrewalletdClient struct { + cc grpc.ClientConnInterface +} + +func NewSpectrewalletdClient(cc grpc.ClientConnInterface) SpectrewalletdClient { + return &spectrewalletdClient{cc} +} + +func (c *spectrewalletdClient) GetBalance(ctx context.Context, in *GetBalanceRequest, opts ...grpc.CallOption) (*GetBalanceResponse, error) { + out := new(GetBalanceResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_GetBalance_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) GetExternalSpendableUTXOs(ctx context.Context, in *GetExternalSpendableUTXOsRequest, opts ...grpc.CallOption) (*GetExternalSpendableUTXOsResponse, error) { + out := new(GetExternalSpendableUTXOsResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_GetExternalSpendableUTXOs_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) CreateUnsignedTransactions(ctx context.Context, in *CreateUnsignedTransactionsRequest, opts ...grpc.CallOption) (*CreateUnsignedTransactionsResponse, error) { + out := new(CreateUnsignedTransactionsResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_CreateUnsignedTransactions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) ShowAddresses(ctx context.Context, in *ShowAddressesRequest, opts ...grpc.CallOption) (*ShowAddressesResponse, error) { + out := new(ShowAddressesResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_ShowAddresses_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) NewAddress(ctx context.Context, in *NewAddressRequest, opts ...grpc.CallOption) (*NewAddressResponse, error) { + out := new(NewAddressResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_NewAddress_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { + out := new(ShutdownResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_Shutdown_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) Broadcast(ctx context.Context, in *BroadcastRequest, opts ...grpc.CallOption) (*BroadcastResponse, error) { + out := new(BroadcastResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_Broadcast_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) Send(ctx context.Context, in *SendRequest, opts ...grpc.CallOption) (*SendResponse, error) { + out := new(SendResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_Send_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spectrewalletdClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { + out := new(SignResponse) + err := c.cc.Invoke(ctx, Spectrewalletd_Sign_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SpectrewalletdServer is the server API for Spectrewalletd service. +// All implementations must embed UnimplementedSpectrewalletdServer +// for forward compatibility +type SpectrewalletdServer interface { + GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error) + GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error) + CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error) + ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error) + NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error) + Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error) + // Since SendRequest contains a password - this command should only be used on a trusted or secure connection + Send(context.Context, *SendRequest) (*SendResponse, error) + // Since SignRequest contains a password - this command should only be used on a trusted or secure connection + Sign(context.Context, *SignRequest) (*SignResponse, error) + mustEmbedUnimplementedSpectrewalletdServer() +} + +// UnimplementedSpectrewalletdServer must be embedded to have forward compatible implementations. +type UnimplementedSpectrewalletdServer struct { +} + +func (UnimplementedSpectrewalletdServer) GetBalance(context.Context, *GetBalanceRequest) (*GetBalanceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBalance not implemented") +} +func (UnimplementedSpectrewalletdServer) GetExternalSpendableUTXOs(context.Context, *GetExternalSpendableUTXOsRequest) (*GetExternalSpendableUTXOsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetExternalSpendableUTXOs not implemented") +} +func (UnimplementedSpectrewalletdServer) CreateUnsignedTransactions(context.Context, *CreateUnsignedTransactionsRequest) (*CreateUnsignedTransactionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUnsignedTransactions not implemented") +} +func (UnimplementedSpectrewalletdServer) ShowAddresses(context.Context, *ShowAddressesRequest) (*ShowAddressesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShowAddresses not implemented") +} +func (UnimplementedSpectrewalletdServer) NewAddress(context.Context, *NewAddressRequest) (*NewAddressResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewAddress not implemented") +} +func (UnimplementedSpectrewalletdServer) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} +func (UnimplementedSpectrewalletdServer) Broadcast(context.Context, *BroadcastRequest) (*BroadcastResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Broadcast not implemented") +} +func (UnimplementedSpectrewalletdServer) Send(context.Context, *SendRequest) (*SendResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Send not implemented") +} +func (UnimplementedSpectrewalletdServer) Sign(context.Context, *SignRequest) (*SignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented") +} +func (UnimplementedSpectrewalletdServer) mustEmbedUnimplementedSpectrewalletdServer() {} + +// UnsafeSpectrewalletdServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SpectrewalletdServer will +// result in compilation errors. +type UnsafeSpectrewalletdServer interface { + mustEmbedUnimplementedSpectrewalletdServer() +} + +func RegisterSpectrewalletdServer(s grpc.ServiceRegistrar, srv SpectrewalletdServer) { + s.RegisterService(&Spectrewalletd_ServiceDesc, srv) +} + +func _Spectrewalletd_GetBalance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBalanceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).GetBalance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_GetBalance_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).GetBalance(ctx, req.(*GetBalanceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_GetExternalSpendableUTXOs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetExternalSpendableUTXOsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).GetExternalSpendableUTXOs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_GetExternalSpendableUTXOs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).GetExternalSpendableUTXOs(ctx, req.(*GetExternalSpendableUTXOsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_CreateUnsignedTransactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUnsignedTransactionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).CreateUnsignedTransactions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_CreateUnsignedTransactions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).CreateUnsignedTransactions(ctx, req.(*CreateUnsignedTransactionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_ShowAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShowAddressesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).ShowAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_ShowAddresses_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).ShowAddresses(ctx, req.(*ShowAddressesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_NewAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).NewAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_NewAddress_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).NewAddress(ctx, req.(*NewAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).Shutdown(ctx, req.(*ShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_Broadcast_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BroadcastRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).Broadcast(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_Broadcast_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).Broadcast(ctx, req.(*BroadcastRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).Send(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_Send_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).Send(ctx, req.(*SendRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Spectrewalletd_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpectrewalletdServer).Sign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Spectrewalletd_Sign_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpectrewalletdServer).Sign(ctx, req.(*SignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Spectrewalletd_ServiceDesc is the grpc.ServiceDesc for Spectrewalletd service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Spectrewalletd_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "spectrewalletd.spectrewalletd", + HandlerType: (*SpectrewalletdServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetBalance", + Handler: _Spectrewalletd_GetBalance_Handler, + }, + { + MethodName: "GetExternalSpendableUTXOs", + Handler: _Spectrewalletd_GetExternalSpendableUTXOs_Handler, + }, + { + MethodName: "CreateUnsignedTransactions", + Handler: _Spectrewalletd_CreateUnsignedTransactions_Handler, + }, + { + MethodName: "ShowAddresses", + Handler: _Spectrewalletd_ShowAddresses_Handler, + }, + { + MethodName: "NewAddress", + Handler: _Spectrewalletd_NewAddress_Handler, + }, + { + MethodName: "Shutdown", + Handler: _Spectrewalletd_Shutdown_Handler, + }, + { + MethodName: "Broadcast", + Handler: _Spectrewalletd_Broadcast_Handler, + }, + { + MethodName: "Send", + Handler: _Spectrewalletd_Send_Handler, + }, + { + MethodName: "Sign", + Handler: _Spectrewalletd_Sign_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "spectrewalletd.proto", +} diff --git a/cmd/spectrewallet/daemon/server/address.go b/cmd/spectrewallet/daemon/server/address.go new file mode 100644 index 0000000..5b07410 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/address.go @@ -0,0 +1,125 @@ +package server + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/util" +) + +func (s *server) changeAddress(useExisting bool, fromAddresses []*walletAddress) (util.Address, *walletAddress, error) { + var walletAddr *walletAddress + if len(fromAddresses) != 0 && useExisting { + walletAddr = fromAddresses[0] + } else { + internalIndex := uint32(0) + if !useExisting { + err := s.keysFile.SetLastUsedInternalIndex(s.keysFile.LastUsedInternalIndex() + 1) + if err != nil { + return nil, nil, err + } + + err = s.keysFile.Save() + if err != nil { + return nil, nil, err + } + + internalIndex = s.keysFile.LastUsedInternalIndex() + } + + walletAddr = &walletAddress{ + index: internalIndex, + cosignerIndex: s.keysFile.CosignerIndex, + keyChain: libspectrewallet.InternalKeychain, + } + } + + path := s.walletAddressPath(walletAddr) + address, err := libspectrewallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA) + if err != nil { + return nil, nil, err + } + return address, walletAddr, nil +} + +func (s *server) ShowAddresses(_ context.Context, request *pb.ShowAddressesRequest) (*pb.ShowAddressesResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + + if !s.isSynced() { + return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport()) + } + + addresses := make([]string, s.keysFile.LastUsedExternalIndex()) + for i := uint32(1); i <= s.keysFile.LastUsedExternalIndex(); i++ { + walletAddr := &walletAddress{ + index: i, + cosignerIndex: s.keysFile.CosignerIndex, + keyChain: libspectrewallet.ExternalKeychain, + } + path := s.walletAddressPath(walletAddr) + address, err := libspectrewallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA) + if err != nil { + return nil, err + } + addresses[i-1] = address.String() + } + + return &pb.ShowAddressesResponse{Address: addresses}, nil +} + +func (s *server) NewAddress(_ context.Context, request *pb.NewAddressRequest) (*pb.NewAddressResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + + if !s.isSynced() { + return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport()) + } + + err := s.keysFile.SetLastUsedExternalIndex(s.keysFile.LastUsedExternalIndex() + 1) + if err != nil { + return nil, err + } + + err = s.keysFile.Save() + if err != nil { + return nil, err + } + + walletAddr := &walletAddress{ + index: s.keysFile.LastUsedExternalIndex(), + cosignerIndex: s.keysFile.CosignerIndex, + keyChain: libspectrewallet.ExternalKeychain, + } + path := s.walletAddressPath(walletAddr) + address, err := libspectrewallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA) + if err != nil { + return nil, err + } + + return &pb.NewAddressResponse{Address: address.String()}, nil +} + +func (s *server) walletAddressString(wAddr *walletAddress) (string, error) { + path := s.walletAddressPath(wAddr) + addr, err := libspectrewallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, path, s.keysFile.ECDSA) + if err != nil { + return "", err + } + + return addr.String(), nil +} + +func (s *server) walletAddressPath(wAddr *walletAddress) string { + if s.isMultisig() { + return fmt.Sprintf("m/%d/%d/%d", wAddr.cosignerIndex, wAddr.keyChain, wAddr.index) + } + return fmt.Sprintf("m/%d/%d", wAddr.keyChain, wAddr.index) +} + +func (s *server) isMultisig() bool { + return len(s.keysFile.ExtendedPublicKeys) > 1 +} diff --git a/cmd/spectrewallet/daemon/server/balance.go b/cmd/spectrewallet/daemon/server/balance.go new file mode 100644 index 0000000..5b1d7ae --- /dev/null +++ b/cmd/spectrewallet/daemon/server/balance.go @@ -0,0 +1,70 @@ +package server + +import ( + "context" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" +) + +type balancesType struct{ available, pending uint64 } +type balancesMapType map[*walletAddress]*balancesType + +func (s *server) GetBalance(_ context.Context, _ *pb.GetBalanceRequest) (*pb.GetBalanceResponse, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + dagInfo, err := s.rpcClient.GetBlockDAGInfo() + if err != nil { + return nil, err + } + daaScore := dagInfo.VirtualDAAScore + maturity := s.params.BlockCoinbaseMaturity + + balancesMap := make(balancesMapType, 0) + for _, entry := range s.utxosSortedByAmount { + amount := entry.UTXOEntry.Amount() + address := entry.address + balances, ok := balancesMap[address] + if !ok { + balances = new(balancesType) + balancesMap[address] = balances + } + if isUTXOSpendable(entry, daaScore, maturity) { + balances.available += amount + } else { + balances.pending += amount + } + } + + addressBalances := make([]*pb.AddressBalances, len(balancesMap)) + i := 0 + var available, pending uint64 + for walletAddress, balances := range balancesMap { + address, err := libspectrewallet.Address(s.params, s.keysFile.ExtendedPublicKeys, s.keysFile.MinimumSignatures, s.walletAddressPath(walletAddress), s.keysFile.ECDSA) + if err != nil { + return nil, err + } + addressBalances[i] = &pb.AddressBalances{ + Address: address.String(), + Available: balances.available, + Pending: balances.pending, + } + i++ + available += balances.available + pending += balances.pending + } + + return &pb.GetBalanceResponse{ + Available: available, + Pending: pending, + AddressBalances: addressBalances, + }, nil +} + +func isUTXOSpendable(entry *walletUTXO, virtualDAAScore uint64, coinbaseMaturity uint64) bool { + if !entry.UTXOEntry.IsCoinbase() { + return true + } + return entry.UTXOEntry.BlockDAAScore()+coinbaseMaturity < virtualDAAScore +} diff --git a/cmd/spectrewallet/daemon/server/broadcast.go b/cmd/spectrewallet/daemon/server/broadcast.go new file mode 100644 index 0000000..8fe3887 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/broadcast.go @@ -0,0 +1,72 @@ +package server + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" +) + +func (s *server) Broadcast(_ context.Context, request *pb.BroadcastRequest) (*pb.BroadcastResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + + txIDs, err := s.broadcast(request.Transactions, request.IsDomain) + if err != nil { + return nil, err + } + + return &pb.BroadcastResponse{TxIDs: txIDs}, nil +} + +func (s *server) broadcast(transactions [][]byte, isDomain bool) ([]string, error) { + + txIDs := make([]string, len(transactions)) + var tx *externalapi.DomainTransaction + var err error + + for i, transaction := range transactions { + + if isDomain { + tx, err = serialization.DeserializeDomainTransaction(transaction) + if err != nil { + return nil, err + } + } else if !isDomain { //default in proto3 is false + tx, err = libspectrewallet.ExtractTransaction(transaction, s.keysFile.ECDSA) + if err != nil { + return nil, err + } + } + + txIDs[i], err = sendTransaction(s.rpcClient, tx) + if err != nil { + return nil, err + } + + for _, input := range tx.Inputs { + s.usedOutpoints[input.PreviousOutpoint] = time.Now() + } + } + + err = s.refreshUTXOs() + if err != nil { + return nil, err + } + + return txIDs, nil +} + +func sendTransaction(client *rpcclient.RPCClient, tx *externalapi.DomainTransaction) (string, error) { + submitTransactionResponse, err := client.SubmitTransaction(appmessage.DomainTransactionToRPCTransaction(tx), false) + if err != nil { + return "", errors.Wrapf(err, "error submitting transaction") + } + return submitTransactionResponse.TransactionID, nil +} diff --git a/cmd/spectrewallet/daemon/server/common.go b/cmd/spectrewallet/daemon/server/common.go new file mode 100644 index 0000000..b29b071 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/common.go @@ -0,0 +1,15 @@ +package server + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +type walletUTXO struct { + Outpoint *externalapi.DomainOutpoint + UTXOEntry externalapi.UTXOEntry + address *walletAddress +} + +type walletAddress struct { + index uint32 + cosignerIndex uint32 + keyChain uint8 +} diff --git a/cmd/spectrewallet/daemon/server/create_unsigned_transaction.go b/cmd/spectrewallet/daemon/server/create_unsigned_transaction.go new file mode 100644 index 0000000..9c02554 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/create_unsigned_transaction.go @@ -0,0 +1,158 @@ +package server + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/util" + "golang.org/x/exp/slices" +) + +// TODO: Implement a better fee estimation mechanism +const feePerInput = 10000 + +func (s *server) CreateUnsignedTransactions(_ context.Context, request *pb.CreateUnsignedTransactionsRequest) ( + *pb.CreateUnsignedTransactionsResponse, error, +) { + s.lock.Lock() + defer s.lock.Unlock() + + unsignedTransactions, err := s.createUnsignedTransactions(request.Address, request.Amount, request.IsSendAll, + request.From, request.UseExistingChangeAddress) + if err != nil { + return nil, err + } + + return &pb.CreateUnsignedTransactionsResponse{UnsignedTransactions: unsignedTransactions}, nil +} + +func (s *server) createUnsignedTransactions(address string, amount uint64, isSendAll bool, fromAddressesString []string, useExistingChangeAddress bool) ([][]byte, error) { + if !s.isSynced() { + return nil, errors.Errorf("wallet daemon is not synced yet, %s", s.formatSyncStateReport()) + } + + // make sure address string is correct before proceeding to a + // potentially long UTXO refreshment operation + toAddress, err := util.DecodeAddress(address, s.params.Prefix) + if err != nil { + return nil, err + } + + err = s.refreshUTXOs() + if err != nil { + return nil, err + } + + var fromAddresses []*walletAddress + for _, from := range fromAddressesString { + fromAddress, exists := s.addressSet[from] + if !exists { + return nil, fmt.Errorf("Specified from address %s does not exists", from) + } + fromAddresses = append(fromAddresses, fromAddress) + } + + selectedUTXOs, spendValue, changeSompi, err := s.selectUTXOs(amount, isSendAll, feePerInput, fromAddresses) + if err != nil { + return nil, err + } + + if len(selectedUTXOs) == 0 { + return nil, errors.Errorf("couldn't find funds to spend") + } + + changeAddress, changeWalletAddress, err := s.changeAddress(useExistingChangeAddress, fromAddresses) + if err != nil { + return nil, err + } + + payments := []*libspectrewallet.Payment{{ + Address: toAddress, + Amount: spendValue, + }} + if changeSompi > 0 { + payments = append(payments, &libspectrewallet.Payment{ + Address: changeAddress, + Amount: changeSompi, + }) + } + unsignedTransaction, err := libspectrewallet.CreateUnsignedTransaction(s.keysFile.ExtendedPublicKeys, + s.keysFile.MinimumSignatures, + payments, selectedUTXOs) + if err != nil { + return nil, err + } + + unsignedTransactions, err := s.maybeAutoCompoundTransaction(unsignedTransaction, toAddress, changeAddress, changeWalletAddress) + if err != nil { + return nil, err + } + return unsignedTransactions, nil +} + +func (s *server) selectUTXOs(spendAmount uint64, isSendAll bool, feePerInput uint64, fromAddresses []*walletAddress) ( + selectedUTXOs []*libspectrewallet.UTXO, totalReceived uint64, changeSompi uint64, err error) { + + selectedUTXOs = []*libspectrewallet.UTXO{} + totalValue := uint64(0) + + dagInfo, err := s.rpcClient.GetBlockDAGInfo() + if err != nil { + return nil, 0, 0, err + } + + coinbaseMaturity := s.params.BlockCoinbaseMaturity + if dagInfo.NetworkName == "spectre-testnet-1" { + coinbaseMaturity = 1000 + } + + for _, utxo := range s.utxosSortedByAmount { + if (fromAddresses != nil && !slices.Contains(fromAddresses, utxo.address)) || + !isUTXOSpendable(utxo, dagInfo.VirtualDAAScore, coinbaseMaturity) { + continue + } + + if broadcastTime, ok := s.usedOutpoints[*utxo.Outpoint]; ok { + if time.Since(broadcastTime) > time.Minute { + delete(s.usedOutpoints, *utxo.Outpoint) + } else { + continue + } + } + + selectedUTXOs = append(selectedUTXOs, &libspectrewallet.UTXO{ + Outpoint: utxo.Outpoint, + UTXOEntry: utxo.UTXOEntry, + DerivationPath: s.walletAddressPath(utxo.address), + }) + + totalValue += utxo.UTXOEntry.Amount() + + fee := feePerInput * uint64(len(selectedUTXOs)) + totalSpend := spendAmount + fee + if !isSendAll && totalValue >= totalSpend { + break + } + } + + fee := feePerInput * uint64(len(selectedUTXOs)) + var totalSpend uint64 + if isSendAll { + totalSpend = totalValue + totalReceived = totalValue - fee + } else { + totalSpend = spendAmount + fee + totalReceived = spendAmount + } + if totalValue < totalSpend { + return nil, 0, 0, errors.Errorf("Insufficient funds for send: %f required, while only %f available", + float64(totalSpend)/constants.SompiPerSpectre, float64(totalValue)/constants.SompiPerSpectre) + } + + return selectedUTXOs, totalReceived, totalValue - totalSpend, nil +} diff --git a/cmd/spectrewallet/daemon/server/external_spendable_utxos.go b/cmd/spectrewallet/daemon/server/external_spendable_utxos.go new file mode 100644 index 0000000..d80d216 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/external_spendable_utxos.go @@ -0,0 +1,62 @@ +package server + +import ( + "context" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/util" +) + +func (s *server) GetExternalSpendableUTXOs(_ context.Context, request *pb.GetExternalSpendableUTXOsRequest) (*pb.GetExternalSpendableUTXOsResponse, error) { + s.lock.RLock() + defer s.lock.RUnlock() + + _, err := util.DecodeAddress(request.Address, s.params.Prefix) + if err != nil { + return nil, err + } + externalUTXOs, err := s.rpcClient.GetUTXOsByAddresses([]string{request.Address}) + if err != nil { + return nil, err + } + selectedUTXOs, err := s.selectExternalSpendableUTXOs(externalUTXOs, request.Address) + if err != nil { + return nil, err + } + return &pb.GetExternalSpendableUTXOsResponse{ + Entries: selectedUTXOs, + }, nil +} + +func (s *server) selectExternalSpendableUTXOs(externalUTXOs *appmessage.GetUTXOsByAddressesResponseMessage, address string) ([]*pb.UtxosByAddressesEntry, error) { + dagInfo, err := s.rpcClient.GetBlockDAGInfo() + if err != nil { + return nil, err + } + + daaScore := dagInfo.VirtualDAAScore + maturity := s.params.BlockCoinbaseMaturity + + //we do not make because we do not know size, because of unspendable utxos + var selectedExternalUtxos []*pb.UtxosByAddressesEntry + + for _, entry := range externalUTXOs.Entries { + if !isExternalUTXOSpendable(entry, daaScore, maturity) { + continue + } + selectedExternalUtxos = append(selectedExternalUtxos, libspectrewallet.AppMessageUTXOToSpectrewalletdUTXO(entry)) + } + + return selectedExternalUtxos, nil +} + +func isExternalUTXOSpendable(entry *appmessage.UTXOsByAddressesEntry, virtualDAAScore uint64, coinbaseMaturity uint64) bool { + if !entry.UTXOEntry.IsCoinbase { + return true + } else if entry.UTXOEntry.Amount <= feePerInput { + return false + } + return entry.UTXOEntry.BlockDAAScore+coinbaseMaturity < virtualDAAScore +} diff --git a/cmd/spectrewallet/daemon/server/log.go b/cmd/spectrewallet/daemon/server/log.go new file mode 100644 index 0000000..a5d8f7e --- /dev/null +++ b/cmd/spectrewallet/daemon/server/log.go @@ -0,0 +1,47 @@ +package server + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spectre-project/spectred/util" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("SXWD") + spawn = panics.GoroutineWrapperFunc(log) + + defaultAppDir = util.AppDir("spectrewallet", false) + defaultLogFile = filepath.Join(defaultAppDir, "daemon.log") + defaultErrLogFile = filepath.Join(defaultAppDir, "daemon_err.log") +) + +func initLog(logFile, errLogFile string) { + log.SetLevel(logger.LevelDebug) + err := backendLog.AddLogFile(logFile, logger.LevelTrace) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, logger.LevelTrace, err) + os.Exit(1) + } + err = backendLog.AddLogFile(errLogFile, logger.LevelWarn) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, logger.LevelWarn, err) + os.Exit(1) + } + err = backendLog.AddLogWriter(os.Stdout, logger.LevelInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", logger.LevelWarn, err) + os.Exit(1) + } + err = backendLog.Run() + if err != nil { + fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err) + os.Exit(1) + } + +} diff --git a/cmd/spectrewallet/daemon/server/rpc.go b/cmd/spectrewallet/daemon/server/rpc.go new file mode 100644 index 0000000..20c22ae --- /dev/null +++ b/cmd/spectrewallet/daemon/server/rpc.go @@ -0,0 +1,26 @@ +package server + +import ( + "time" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" +) + +func connectToRPC(params *dagconfig.Params, rpcServer string, timeout uint32) (*rpcclient.RPCClient, error) { + rpcAddress, err := params.NormalizeRPCServerAddress(rpcServer) + if err != nil { + return nil, err + } + + rpcClient, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + return nil, err + } + + if timeout != 0 { + rpcClient.SetTimeout(time.Duration(timeout) * time.Second) + } + + return rpcClient, err +} diff --git a/cmd/spectrewallet/daemon/server/send.go b/cmd/spectrewallet/daemon/server/send.go new file mode 100644 index 0000000..5a0cef5 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/send.go @@ -0,0 +1,31 @@ +package server + +import ( + "context" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func (s *server) Send(_ context.Context, request *pb.SendRequest) (*pb.SendResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + + unsignedTransactions, err := s.createUnsignedTransactions(request.ToAddress, request.Amount, request.IsSendAll, + request.From, request.UseExistingChangeAddress) + + if err != nil { + return nil, err + } + + signedTransactions, err := s.signTransactions(unsignedTransactions, request.Password) + if err != nil { + return nil, err + } + + txIDs, err := s.broadcast(signedTransactions, false) + if err != nil { + return nil, err + } + + return &pb.SendResponse{TxIDs: txIDs, SignedTransactions: signedTransactions}, nil +} diff --git a/cmd/spectrewallet/daemon/server/server.go b/cmd/spectrewallet/daemon/server/server.go new file mode 100644 index 0000000..79e1550 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/server.go @@ -0,0 +1,143 @@ +package server + +import ( + "fmt" + "net" + "os" + "sync" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/util/txmass" + + "github.com/spectre-project/spectred/util/profiling" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" + "github.com/spectre-project/spectred/infrastructure/os/signal" + "github.com/spectre-project/spectred/util/panics" + + "google.golang.org/grpc" +) + +type server struct { + pb.UnimplementedSpectrewalletdServer + + rpcClient *rpcclient.RPCClient + params *dagconfig.Params + + lock sync.RWMutex + utxosSortedByAmount []*walletUTXO + nextSyncStartIndex uint32 + keysFile *keys.File + shutdown chan struct{} + addressSet walletAddressSet + txMassCalculator *txmass.Calculator + usedOutpoints map[externalapi.DomainOutpoint]time.Time + + isLogFinalProgressLineShown bool + maxUsedAddressesForLog uint32 + maxProcessedAddressesForLog uint32 +} + +// MaxDaemonSendMsgSize is the max send message size used for the daemon server. +// Currently, set to 100MB +const MaxDaemonSendMsgSize = 100_000_000 + +// Start starts the spectrewalletd server +func Start(params *dagconfig.Params, listen, rpcServer string, keysFilePath string, profile string, timeout uint32) error { + initLog(defaultLogFile, defaultErrLogFile) + + defer panics.HandlePanic(log, "MAIN", nil) + interrupt := signal.InterruptListener() + + if profile != "" { + profiling.Start(profile, log) + } + + listener, err := net.Listen("tcp", listen) + if err != nil { + return (errors.Wrapf(err, "Error listening to TCP on %s", listen)) + } + log.Infof("Listening to TCP on %s", listen) + + log.Infof("Connecting to a node at %s...", rpcServer) + rpcClient, err := connectToRPC(params, rpcServer, timeout) + if err != nil { + return (errors.Wrapf(err, "Error connecting to RPC server %s", rpcServer)) + } + + log.Infof("Connected, reading keys file %s...", keysFilePath) + keysFile, err := keys.ReadKeysFile(params, keysFilePath) + if err != nil { + return (errors.Wrapf(err, "Error reading keys file %s", keysFilePath)) + } + + err = keysFile.TryLock() + if err != nil { + return err + } + + serverInstance := &server{ + rpcClient: rpcClient, + params: params, + utxosSortedByAmount: []*walletUTXO{}, + nextSyncStartIndex: 0, + keysFile: keysFile, + shutdown: make(chan struct{}), + addressSet: make(walletAddressSet), + txMassCalculator: txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp), + usedOutpoints: map[externalapi.DomainOutpoint]time.Time{}, + isLogFinalProgressLineShown: false, + maxUsedAddressesForLog: 0, + maxProcessedAddressesForLog: 0, + } + + log.Infof("Read, syncing the wallet...") + spawn("serverInstance.sync", func() { + err := serverInstance.sync() + if err != nil { + printErrorAndExit(errors.Wrap(err, "error syncing the wallet")) + } + }) + + grpcServer := grpc.NewServer(grpc.MaxSendMsgSize(MaxDaemonSendMsgSize)) + pb.RegisterSpectrewalletdServer(grpcServer, serverInstance) + + spawn("grpcServer.Serve", func() { + err := grpcServer.Serve(listener) + if err != nil { + printErrorAndExit(errors.Wrap(err, "Error serving gRPC")) + } + }) + + select { + case <-serverInstance.shutdown: + case <-interrupt: + const stopTimeout = 2 * time.Second + + stopChan := make(chan interface{}) + spawn("gRPCServer.Stop", func() { + grpcServer.GracefulStop() + close(stopChan) + }) + + select { + case <-stopChan: + case <-time.After(stopTimeout): + log.Warnf("Could not gracefully stop: timed out after %s", stopTimeout) + grpcServer.Stop() + } + } + + return nil +} + +func printErrorAndExit(err error) { + fmt.Fprintf(os.Stderr, "%+v\n", err) + os.Exit(1) +} diff --git a/cmd/spectrewallet/daemon/server/shutdown.go b/cmd/spectrewallet/daemon/server/shutdown.go new file mode 100644 index 0000000..deae9b4 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/shutdown.go @@ -0,0 +1,14 @@ +package server + +import ( + "context" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func (s *server) Shutdown(ctx context.Context, request *pb.ShutdownRequest) (*pb.ShutdownResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + close(s.shutdown) + return &pb.ShutdownResponse{}, nil +} diff --git a/cmd/spectrewallet/daemon/server/sign.go b/cmd/spectrewallet/daemon/server/sign.go new file mode 100644 index 0000000..103f904 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/sign.go @@ -0,0 +1,36 @@ +package server + +import ( + "context" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func (s *server) Sign(_ context.Context, request *pb.SignRequest) (*pb.SignResponse, error) { + s.lock.Lock() + defer s.lock.Unlock() + + signedTransactions, err := s.signTransactions(request.UnsignedTransactions, request.Password) + if err != nil { + return nil, err + } + return &pb.SignResponse{SignedTransactions: signedTransactions}, nil +} + +func (s *server) signTransactions(unsignedTransactions [][]byte, password string) ([][]byte, error) { + mnemonics, err := s.keysFile.DecryptMnemonics(password) + if err != nil { + return nil, err + } + signedTransactions := make([][]byte, len(unsignedTransactions)) + for i, unsignedTransaction := range unsignedTransactions { + signedTransaction, err := libspectrewallet.Sign(s.params, mnemonics, unsignedTransaction, s.keysFile.ECDSA) + if err != nil { + return nil, err + } + signedTransactions[i] = signedTransaction + } + return signedTransactions, nil +} diff --git a/cmd/spectrewallet/daemon/server/split_transaction.go b/cmd/spectrewallet/daemon/server/split_transaction.go new file mode 100644 index 0000000..475c017 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/split_transaction.go @@ -0,0 +1,284 @@ +package server + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/util" +) + +// maybeAutoCompoundTransaction checks if a transaction's mass is higher that what is allowed for a standard +// transaction. +// If it is - the transaction is split into multiple transactions, each with a portion of the inputs and a single output +// into a change address. +// An additional `mergeTransaction` is generated - which merges the outputs of the above splits into a single output +// paying to the original transaction's payee. +func (s *server) maybeAutoCompoundTransaction(transactionBytes []byte, toAddress util.Address, + changeAddress util.Address, changeWalletAddress *walletAddress) ([][]byte, error) { + transaction, err := serialization.DeserializePartiallySignedTransaction(transactionBytes) + if err != nil { + return nil, err + } + + splitTransactions, err := s.maybeSplitAndMergeTransaction(transaction, toAddress, changeAddress, changeWalletAddress) + if err != nil { + return nil, err + } + splitTransactionsBytes := make([][]byte, len(splitTransactions)) + for i, splitTransaction := range splitTransactions { + splitTransactionsBytes[i], err = serialization.SerializePartiallySignedTransaction(splitTransaction) + if err != nil { + return nil, err + } + } + return splitTransactionsBytes, nil +} + +func (s *server) mergeTransaction( + splitTransactions []*serialization.PartiallySignedTransaction, + originalTransaction *serialization.PartiallySignedTransaction, + toAddress util.Address, + changeAddress util.Address, + changeWalletAddress *walletAddress, +) (*serialization.PartiallySignedTransaction, error) { + numOutputs := len(originalTransaction.Tx.Outputs) + if numOutputs > 2 || numOutputs == 0 { + // This is a sanity check to make sure originalTransaction has either 1 or 2 outputs: + // 1. For the payment itself + // 2. (optional) for change + return nil, errors.Errorf("original transaction has %d outputs, while 1 or 2 are expected", + len(originalTransaction.Tx.Outputs)) + } + + totalValue := uint64(0) + sentValue := originalTransaction.Tx.Outputs[0].Value + utxos := make([]*libspectrewallet.UTXO, len(splitTransactions)) + for i, splitTransaction := range splitTransactions { + output := splitTransaction.Tx.Outputs[0] + utxos[i] = &libspectrewallet.UTXO{ + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(splitTransaction.Tx), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(output.Value, output.ScriptPublicKey, false, constants.UnacceptedDAAScore), + DerivationPath: s.walletAddressPath(changeWalletAddress), + } + totalValue += output.Value + totalValue -= feePerInput + } + + if totalValue < sentValue { + // sometimes the fees from compound transactions make the total output higher than what's available from selected + // utxos, in such cases - find one more UTXO and use it. + additionalUTXOs, totalValueAdded, err := s.moreUTXOsForMergeTransaction(utxos, sentValue-totalValue) + if err != nil { + return nil, err + } + utxos = append(utxos, additionalUTXOs...) + totalValue += totalValueAdded + } + + payments := []*libspectrewallet.Payment{{ + Address: toAddress, + Amount: sentValue, + }} + if totalValue > sentValue { + payments = append(payments, &libspectrewallet.Payment{ + Address: changeAddress, + Amount: totalValue - sentValue, + }) + } + + mergeTransactionBytes, err := libspectrewallet.CreateUnsignedTransaction(s.keysFile.ExtendedPublicKeys, + s.keysFile.MinimumSignatures, payments, utxos) + if err != nil { + return nil, err + } + + return serialization.DeserializePartiallySignedTransaction(mergeTransactionBytes) +} + +func (s *server) maybeSplitAndMergeTransaction(transaction *serialization.PartiallySignedTransaction, toAddress util.Address, + changeAddress util.Address, changeWalletAddress *walletAddress) ([]*serialization.PartiallySignedTransaction, error) { + + transactionMass, err := s.estimateMassAfterSignatures(transaction) + if err != nil { + return nil, err + } + + if transactionMass < mempool.MaximumStandardTransactionMass { + return []*serialization.PartiallySignedTransaction{transaction}, nil + } + + splitCount, inputCountPerSplit, err := s.splitAndInputPerSplitCounts(transaction, transactionMass, changeAddress) + if err != nil { + return nil, err + } + + splitTransactions := make([]*serialization.PartiallySignedTransaction, splitCount) + for i := 0; i < splitCount; i++ { + startIndex := i * inputCountPerSplit + endIndex := startIndex + inputCountPerSplit + var err error + splitTransactions[i], err = s.createSplitTransaction(transaction, changeAddress, startIndex, endIndex) + if err != nil { + return nil, err + } + } + + if len(splitTransactions) > 1 { + mergeTransaction, err := s.mergeTransaction(splitTransactions, transaction, toAddress, changeAddress, changeWalletAddress) + if err != nil { + return nil, err + } + // Recursion will be 2-3 iterations deep even in the rarest` cases, so considered safe.. + splitMergeTransaction, err := s.maybeSplitAndMergeTransaction(mergeTransaction, toAddress, changeAddress, changeWalletAddress) + if err != nil { + return nil, err + } + splitTransactions = append(splitTransactions, splitMergeTransaction...) + + } + + return splitTransactions, nil +} + +// splitAndInputPerSplitCounts calculates the number of splits to create, and the number of inputs to assign per split. +func (s *server) splitAndInputPerSplitCounts(transaction *serialization.PartiallySignedTransaction, transactionMass uint64, + changeAddress util.Address) (splitCount, inputsPerSplitCount int, err error) { + + // Create a dummy transaction which is a clone of the original transaction, but without inputs, + // to calculate how much mass do all the inputs have + transactionWithoutInputs := transaction.Tx.Clone() + transactionWithoutInputs.Inputs = []*externalapi.DomainTransactionInput{} + massWithoutInputs := s.txMassCalculator.CalculateTransactionMass(transactionWithoutInputs) + + massOfAllInputs := transactionMass - massWithoutInputs + + // Since the transaction was generated by spectrewallet, we assume all inputs have the same number of signatures, and + // thus - the same mass. + inputCount := len(transaction.Tx.Inputs) + massPerInput := massOfAllInputs / uint64(inputCount) + if massOfAllInputs%uint64(inputCount) > 0 { + massPerInput++ + } + + // Create another dummy transaction, this time one similar to the split transactions we wish to generate, + // but with 0 inputs, to calculate how much mass for inputs do we have available in the split transactions + splitTransactionWithoutInputs, err := s.createSplitTransaction(transaction, changeAddress, 0, 0) + if err != nil { + return 0, 0, err + } + massForEverythingExceptInputsInSplitTransaction := + s.txMassCalculator.CalculateTransactionMass(splitTransactionWithoutInputs.Tx) + massForInputsInSplitTransaction := mempool.MaximumStandardTransactionMass - massForEverythingExceptInputsInSplitTransaction + + inputsPerSplitCount = int(massForInputsInSplitTransaction / massPerInput) + splitCount = inputCount / inputsPerSplitCount + if inputCount%inputsPerSplitCount > 0 { + splitCount++ + } + + return splitCount, inputsPerSplitCount, nil +} + +func (s *server) createSplitTransaction(transaction *serialization.PartiallySignedTransaction, + changeAddress util.Address, startIndex int, endIndex int) (*serialization.PartiallySignedTransaction, error) { + + selectedUTXOs := make([]*libspectrewallet.UTXO, 0, endIndex-startIndex) + totalSompi := uint64(0) + + for i := startIndex; i < endIndex && i < len(transaction.PartiallySignedInputs); i++ { + partiallySignedInput := transaction.PartiallySignedInputs[i] + selectedUTXOs = append(selectedUTXOs, &libspectrewallet.UTXO{ + Outpoint: &transaction.Tx.Inputs[i].PreviousOutpoint, + UTXOEntry: utxo.NewUTXOEntry( + partiallySignedInput.PrevOutput.Value, partiallySignedInput.PrevOutput.ScriptPublicKey, + false, constants.UnacceptedDAAScore), + DerivationPath: partiallySignedInput.DerivationPath, + }) + + totalSompi += selectedUTXOs[i-startIndex].UTXOEntry.Amount() + totalSompi -= feePerInput + } + unsignedTransactionBytes, err := libspectrewallet.CreateUnsignedTransaction(s.keysFile.ExtendedPublicKeys, + s.keysFile.MinimumSignatures, + []*libspectrewallet.Payment{{ + Address: changeAddress, + Amount: totalSompi, + }}, selectedUTXOs) + if err != nil { + return nil, err + } + + return serialization.DeserializePartiallySignedTransaction(unsignedTransactionBytes) +} + +func (s *server) estimateMassAfterSignatures(transaction *serialization.PartiallySignedTransaction) (uint64, error) { + transaction = transaction.Clone() + var signatureSize uint64 + if s.keysFile.ECDSA { + signatureSize = secp256k1.SerializedECDSASignatureSize + } else { + signatureSize = secp256k1.SerializedSchnorrSignatureSize + } + + for i, input := range transaction.PartiallySignedInputs { + for j, pubKeyPair := range input.PubKeySignaturePairs { + if uint32(j) >= s.keysFile.MinimumSignatures { + break + } + pubKeyPair.Signature = make([]byte, signatureSize+1) // +1 for SigHashType + } + transaction.Tx.Inputs[i].SigOpCount = byte(len(input.PubKeySignaturePairs)) + } + + transactionWithSignatures, err := libspectrewallet.ExtractTransactionDeserialized(transaction, s.keysFile.ECDSA) + if err != nil { + return 0, err + } + + return s.txMassCalculator.CalculateTransactionMass(transactionWithSignatures), nil +} + +func (s *server) moreUTXOsForMergeTransaction(alreadySelectedUTXOs []*libspectrewallet.UTXO, requiredAmount uint64) ( + additionalUTXOs []*libspectrewallet.UTXO, totalValueAdded uint64, err error) { + + dagInfo, err := s.rpcClient.GetBlockDAGInfo() + if err != nil { + return nil, 0, err + } + alreadySelectedUTXOsMap := make(map[externalapi.DomainOutpoint]struct{}, len(alreadySelectedUTXOs)) + for _, alreadySelectedUTXO := range alreadySelectedUTXOs { + alreadySelectedUTXOsMap[*alreadySelectedUTXO.Outpoint] = struct{}{} + } + + for _, utxo := range s.utxosSortedByAmount { + if _, ok := alreadySelectedUTXOsMap[*utxo.Outpoint]; ok { + continue + } + if !isUTXOSpendable(utxo, dagInfo.VirtualDAAScore, s.params.BlockCoinbaseMaturity) { + continue + } + additionalUTXOs = append(additionalUTXOs, &libspectrewallet.UTXO{ + Outpoint: utxo.Outpoint, + UTXOEntry: utxo.UTXOEntry, + DerivationPath: s.walletAddressPath(utxo.address)}) + totalValueAdded += utxo.UTXOEntry.Amount() - feePerInput + if totalValueAdded >= requiredAmount { + break + } + } + if totalValueAdded < requiredAmount { + return nil, 0, errors.Errorf("Insufficient funds for merge transaction") + } + + return additionalUTXOs, totalValueAdded, nil +} diff --git a/cmd/spectrewallet/daemon/server/split_transaction_test.go b/cmd/spectrewallet/daemon/server/split_transaction_test.go new file mode 100644 index 0000000..9a7ffb1 --- /dev/null +++ b/cmd/spectrewallet/daemon/server/split_transaction_test.go @@ -0,0 +1,152 @@ +package server + +import ( + "testing" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" + "github.com/spectre-project/spectred/util/txmass" + + "github.com/spectre-project/spectred/domain/dagconfig" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestEstimateMassAfterSignatures(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + unsignedTransactionBytes, mnemonics, params, teardown := testEstimateMassIncreaseForSignaturesSetUp(t, consensusConfig) + defer teardown(false) + + serverInstance := &server{ + params: params, + keysFile: &keys.File{MinimumSignatures: 2}, + shutdown: make(chan struct{}), + addressSet: make(walletAddressSet), + txMassCalculator: txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp), + } + + unsignedTransaction, err := serialization.DeserializePartiallySignedTransaction(unsignedTransactionBytes) + if err != nil { + t.Fatalf("Error deserializing unsignedTransaction: %s", err) + } + + estimatedMassAfterSignatures, err := serverInstance.estimateMassAfterSignatures(unsignedTransaction) + if err != nil { + t.Fatalf("Error from estimateMassAfterSignatures: %s", err) + } + + signedTxStep1Bytes, err := libspectrewallet.Sign(params, mnemonics[:1], unsignedTransactionBytes, false) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + signedTxStep2Bytes, err := libspectrewallet.Sign(params, mnemonics[1:2], signedTxStep1Bytes, false) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + extractedSignedTx, err := libspectrewallet.ExtractTransaction(signedTxStep2Bytes, false) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + actualMassAfterSignatures := serverInstance.txMassCalculator.CalculateTransactionMass(extractedSignedTx) + + if estimatedMassAfterSignatures != actualMassAfterSignatures { + t.Errorf("Estimated mass after signatures: %d but actually got %d", + estimatedMassAfterSignatures, actualMassAfterSignatures) + } + }) +} + +func testEstimateMassIncreaseForSignaturesSetUp(t *testing.T, consensusConfig *consensus.Config) ( + []byte, []string, *dagconfig.Params, func(keepDataDir bool)) { + + consensusConfig.BlockCoinbaseMaturity = 0 + params := &consensusConfig.Params + + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestMultisig") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + + const numKeys = 3 + mnemonics := make([]string, numKeys) + publicKeys := make([]string, numKeys) + for i := 0; i < numKeys; i++ { + var err error + mnemonics[i], err = libspectrewallet.CreateMnemonic() + if err != nil { + t.Fatalf("CreateMnemonic: %+v", err) + } + + publicKeys[i], err = libspectrewallet.MasterPublicKeyFromMnemonic(&consensusConfig.Params, mnemonics[i], true) + if err != nil { + t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err) + } + } + + const minimumSignatures = 2 + path := "m/1/2/3" + address, err := libspectrewallet.Address(params, publicKeys, minimumSignatures, path, false) + if err != nil { + t.Fatalf("Address: %+v", err) + } + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + t.Fatalf("PayToAddrScript: %+v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + ExtraData: nil, + } + + fundingBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1, _, err := tc.GetBlock(block1Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + block1Tx := block1.Transactions[0] + block1TxOut := block1Tx.Outputs[0] + selectedUTXOs := []*libspectrewallet.UTXO{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block1.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(block1TxOut.Value, block1TxOut.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + } + + unsignedTransaction, err := libspectrewallet.CreateUnsignedTransaction(publicKeys, minimumSignatures, + []*libspectrewallet.Payment{{ + Address: address, + Amount: 10, + }}, selectedUTXOs) + if err != nil { + t.Fatalf("CreateUnsignedTransactions: %+v", err) + } + + return unsignedTransaction, mnemonics, params, teardown +} diff --git a/cmd/spectrewallet/daemon/server/sync.go b/cmd/spectrewallet/daemon/server/sync.go new file mode 100644 index 0000000..a05241c --- /dev/null +++ b/cmd/spectrewallet/daemon/server/sync.go @@ -0,0 +1,323 @@ +package server + +import ( + "fmt" + "sort" + "time" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +var keyChains = []uint8{libspectrewallet.ExternalKeychain, libspectrewallet.InternalKeychain} + +type walletAddressSet map[string]*walletAddress + +func (was walletAddressSet) strings() []string { + addresses := make([]string, 0, len(was)) + for addr := range was { + addresses = append(addresses, addr) + } + return addresses +} + +func (s *server) sync() error { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + err := s.collectRecentAddresses() + if err != nil { + return err + } + + err = s.refreshExistingUTXOsWithLock() + if err != nil { + return err + } + + for range ticker.C { + err = s.collectFarAddresses() + if err != nil { + return err + } + + err = s.collectRecentAddresses() + if err != nil { + return err + } + + err = s.refreshExistingUTXOsWithLock() + if err != nil { + return err + } + } + + return nil +} + +const ( + numIndexesToQueryForFarAddresses = 100 + numIndexesToQueryForRecentAddresses = 1000 +) + +// addressesToQuery scans the addresses in the given range. Because +// each cosigner in a multisig has its own unique path for generating +// addresses it goes over all the cosigners and add their addresses +// for each key chain. +func (s *server) addressesToQuery(start, end uint32) (walletAddressSet, error) { + addresses := make(walletAddressSet) + for index := start; index < end; index++ { + for cosignerIndex := uint32(0); cosignerIndex < uint32(len(s.keysFile.ExtendedPublicKeys)); cosignerIndex++ { + for _, keychain := range keyChains { + address := &walletAddress{ + index: index, + cosignerIndex: cosignerIndex, + keyChain: keychain, + } + addressString, err := s.walletAddressString(address) + if err != nil { + return nil, err + } + addresses[addressString] = address + } + } + } + + return addresses, nil +} + +// collectFarAddresses collects numIndexesToQueryForFarAddresses addresses +// from the last point it stopped in the previous call. +func (s *server) collectFarAddresses() error { + s.lock.Lock() + defer s.lock.Unlock() + err := s.collectAddresses(s.nextSyncStartIndex, s.nextSyncStartIndex+numIndexesToQueryForFarAddresses) + if err != nil { + return err + } + + s.nextSyncStartIndex += numIndexesToQueryForFarAddresses + return nil +} + +func (s *server) maxUsedIndexWithLock() uint32 { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.maxUsedIndex() +} + +func (s *server) maxUsedIndex() uint32 { + maxUsedIndex := s.keysFile.LastUsedExternalIndex() + if s.keysFile.LastUsedInternalIndex() > maxUsedIndex { + maxUsedIndex = s.keysFile.LastUsedInternalIndex() + } + + return maxUsedIndex +} + +// collectRecentAddresses collects addresses from used addresses until +// the address with the index of the last used address + numIndexesToQueryForRecentAddresses. +// collectRecentAddresses scans addresses in batches of numIndexesToQueryForRecentAddresses, +// and releases the lock between scans. +func (s *server) collectRecentAddresses() error { + index := uint32(0) + maxUsedIndex := uint32(0) + for ; index < maxUsedIndex+numIndexesToQueryForRecentAddresses; index += numIndexesToQueryForRecentAddresses { + err := s.collectAddressesWithLock(index, index+numIndexesToQueryForRecentAddresses) + + if err != nil { + return err + } + maxUsedIndex = s.maxUsedIndexWithLock() + + s.updateSyncingProgressLog(index, maxUsedIndex) + } + + s.lock.Lock() + if index > s.nextSyncStartIndex { + s.nextSyncStartIndex = index + } + s.lock.Unlock() + + return nil +} + +func (s *server) collectAddressesWithLock(start, end uint32) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.collectAddresses(start, end) +} + +func (s *server) collectAddresses(start, end uint32) error { + addressSet, err := s.addressesToQuery(start, end) + if err != nil { + return err + } + + getBalancesByAddressesResponse, err := s.rpcClient.GetBalancesByAddresses(addressSet.strings()) + if err != nil { + return err + } + + err = s.updateAddressesAndLastUsedIndexes(addressSet, getBalancesByAddressesResponse) + if err != nil { + return err + } + + return nil +} + +func (s *server) updateAddressesAndLastUsedIndexes(requestedAddressSet walletAddressSet, + getBalancesByAddressesResponse *appmessage.GetBalancesByAddressesResponseMessage) error { + lastUsedExternalIndex := s.keysFile.LastUsedExternalIndex() + lastUsedInternalIndex := s.keysFile.LastUsedInternalIndex() + + for _, entry := range getBalancesByAddressesResponse.Entries { + walletAddress, ok := requestedAddressSet[entry.Address] + if !ok { + return errors.Errorf("Got result from address %s even though it wasn't requested", entry.Address) + } + + if entry.Balance == 0 { + continue + } + + s.addressSet[entry.Address] = walletAddress + + if walletAddress.keyChain == libspectrewallet.ExternalKeychain { + if walletAddress.index > lastUsedExternalIndex { + lastUsedExternalIndex = walletAddress.index + } + continue + } + + if walletAddress.index > lastUsedInternalIndex { + lastUsedInternalIndex = walletAddress.index + } + } + + err := s.keysFile.SetLastUsedExternalIndex(lastUsedExternalIndex) + if err != nil { + return err + } + + return s.keysFile.SetLastUsedInternalIndex(lastUsedInternalIndex) +} + +func (s *server) refreshExistingUTXOsWithLock() error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.refreshUTXOs() +} + +// updateUTXOSet clears the current UTXO set, and re-fills it with the given entries +func (s *server) updateUTXOSet(entries []*appmessage.UTXOsByAddressesEntry, mempoolEntries []*appmessage.MempoolEntryByAddress) error { + utxos := make([]*walletUTXO, 0, len(entries)) + + exclude := make(map[appmessage.RPCOutpoint]struct{}) + for _, entriesByAddress := range mempoolEntries { + for _, entry := range entriesByAddress.Sending { + for _, input := range entry.Transaction.Inputs { + exclude[*input.PreviousOutpoint] = struct{}{} + } + } + } + + for _, entry := range entries { + if _, ok := exclude[*entry.Outpoint]; ok { + continue + } + + outpoint, err := appmessage.RPCOutpointToDomainOutpoint(entry.Outpoint) + if err != nil { + return err + } + + utxoEntry, err := appmessage.RPCUTXOEntryToUTXOEntry(entry.UTXOEntry) + if err != nil { + return err + } + + address, ok := s.addressSet[entry.Address] + if !ok { + return errors.Errorf("Got result from address %s even though it wasn't requested", entry.Address) + } + utxos = append(utxos, &walletUTXO{ + Outpoint: outpoint, + UTXOEntry: utxoEntry, + address: address, + }) + } + + sort.Slice(utxos, func(i, j int) bool { return utxos[i].UTXOEntry.Amount() > utxos[j].UTXOEntry.Amount() }) + + s.utxosSortedByAmount = utxos + + return nil +} + +func (s *server) refreshUTXOs() error { + // It's important to check the mempool before calling `GetUTXOsByAddresses`: + // If we would do it the other way around an output can be spent in the mempool + // and not in consensus, and between the calls its spending transaction will be + // added to consensus and removed from the mempool, so `getUTXOsByAddressesResponse` + // will include an obsolete output. + mempoolEntriesByAddresses, err := s.rpcClient.GetMempoolEntriesByAddresses(s.addressSet.strings(), true, true) + if err != nil { + return err + } + + getUTXOsByAddressesResponse, err := s.rpcClient.GetUTXOsByAddresses(s.addressSet.strings()) + if err != nil { + return err + } + + return s.updateUTXOSet(getUTXOsByAddressesResponse.Entries, mempoolEntriesByAddresses.Entries) +} + +func (s *server) isSynced() bool { + return s.nextSyncStartIndex > s.maxUsedIndex() +} + +func (s *server) formatSyncStateReport() string { + maxUsedIndex := s.maxUsedIndex() + + if s.nextSyncStartIndex > maxUsedIndex { + maxUsedIndex = s.nextSyncStartIndex + } + + return fmt.Sprintf("scanned %d out of %d addresses (%.2f%%)", + s.nextSyncStartIndex, maxUsedIndex, float64(s.nextSyncStartIndex)*100.0/float64(maxUsedIndex)) +} + +func (s *server) updateSyncingProgressLog(currProcessedAddresses, currMaxUsedAddresses uint32) { + if currMaxUsedAddresses > s.maxUsedAddressesForLog { + s.maxUsedAddressesForLog = currMaxUsedAddresses + if s.isLogFinalProgressLineShown { + log.Infof("An additional set of previously used addresses found, processing...") + s.maxProcessedAddressesForLog = 0 + s.isLogFinalProgressLineShown = false + } + } + + if currProcessedAddresses > s.maxProcessedAddressesForLog { + s.maxProcessedAddressesForLog = currProcessedAddresses + } + + if s.maxProcessedAddressesForLog >= s.maxUsedAddressesForLog { + if !s.isLogFinalProgressLineShown { + log.Infof("Wallet is synced, ready for queries") + s.isLogFinalProgressLineShown = true + } + } else { + percentProcessed := float64(s.maxProcessedAddressesForLog) / float64(s.maxUsedAddressesForLog) * 100.0 + + log.Infof("%d addresses of %d processed (%.2f%%)...", + s.maxProcessedAddressesForLog, s.maxUsedAddressesForLog, percentProcessed) + } +} diff --git a/cmd/spectrewallet/docker/Dockerfile b/cmd/spectrewallet/docker/Dockerfile new file mode 100644 index 0000000..76bce59 --- /dev/null +++ b/cmd/spectrewallet/docker/Dockerfile @@ -0,0 +1,29 @@ +# -- multistage docker build: stage #1: build stage +FROM golang:1.18-alpine AS build + +RUN mkdir -p /go/src/github.com/spectre-project/spectred/ + +WORKDIR /go/src/github.com/spectre-project/spectred/ + +RUN apk add --no-cache curl git openssh binutils gcc musl-dev + +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +WORKDIR /go/src/github.com/spectre-project/spectred/cmd/spectrewallet +RUN GOOS=linux go build -a -installsuffix cgo -o spectrewallet . + +# --- multistage docker build: stage #2: runtime image +FROM alpine +WORKDIR /app + +RUN apk add --no-cache ca-certificates tini + +COPY --from=build /go/src/github.com/spectre-project/spectred/cmd/spectrewallet/spectrewallet /app/ + +USER nobody +ENTRYPOINT [ "/sbin/tini", "--" ] diff --git a/cmd/spectrewallet/dump_unencrypted_data.go b/cmd/spectrewallet/dump_unencrypted_data.go new file mode 100644 index 0000000..aa20184 --- /dev/null +++ b/cmd/spectrewallet/dump_unencrypted_data.go @@ -0,0 +1,77 @@ +package main + +import ( + "bufio" + "fmt" + "os" + + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" + + "github.com/pkg/errors" +) + +func dumpUnencryptedData(conf *dumpUnencryptedDataConfig) error { + if !conf.Yes { + err := confirmDump() + if err != nil { + return err + } + } + + keysFile, err := keys.ReadKeysFile(conf.NetParams(), conf.KeysFile) + if err != nil { + return err + } + + if len(conf.Password) == 0 { + conf.Password = keys.GetPassword("Password:") + } + mnemonics, err := keysFile.DecryptMnemonics(conf.Password) + if err != nil { + return err + } + + mnemonicPublicKeys := make(map[string]struct{}) + for i, mnemonic := range mnemonics { + fmt.Printf("Mnemonic #%d:\n%s\n\n", i+1, mnemonic) + publicKey, err := libspectrewallet.MasterPublicKeyFromMnemonic(conf.NetParams(), mnemonic, len(keysFile.ExtendedPublicKeys) > 1) + if err != nil { + return err + } + + mnemonicPublicKeys[publicKey] = struct{}{} + } + + i := 1 + for _, extendedPublicKey := range keysFile.ExtendedPublicKeys { + if _, exists := mnemonicPublicKeys[extendedPublicKey]; exists { + continue + } + + fmt.Printf("Extended Public key #%d:\n%s\n\n", i, extendedPublicKey) + i++ + } + + fmt.Printf("Minimum number of signatures: %d\n", keysFile.MinimumSignatures) + return nil +} + +func confirmDump() error { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("This operation will print your unencrypted keys on the screen. Anyone that sees this information " + + "will be able to steal your funds. Are you sure you want to proceed (y/N)? ") + line, err := utils.ReadLine(reader) + if err != nil { + return err + } + + fmt.Println() + + if string(line) != "y" { + return errors.Errorf("Dump aborted by user") + } + + return nil +} diff --git a/cmd/spectrewallet/keys/create.go b/cmd/spectrewallet/keys/create.go new file mode 100644 index 0000000..6eb77dc --- /dev/null +++ b/cmd/spectrewallet/keys/create.go @@ -0,0 +1,121 @@ +package keys + +import ( + "bufio" + "crypto/rand" + "crypto/subtle" + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/tyler-smith/go-bip39" +) + +// CreateMnemonics generates `numKeys` number of mnemonics. +func CreateMnemonics(params *dagconfig.Params, numKeys uint32, cmdLinePassword string, isMultisig bool) (encryptedPrivateKeys []*EncryptedMnemonic, extendedPublicKeys []string, err error) { + mnemonics := make([]string, numKeys) + for i := uint32(0); i < numKeys; i++ { + var err error + mnemonics[i], err = libspectrewallet.CreateMnemonic() + if err != nil { + return nil, nil, err + } + } + + return encryptedMnemonicExtendedPublicKeyPairs(params, mnemonics, cmdLinePassword, isMultisig) +} + +// ImportMnemonics imports a `numKeys` of mnemonics. +func ImportMnemonics(params *dagconfig.Params, numKeys uint32, cmdLinePassword string, isMultisig bool) (encryptedPrivateKeys []*EncryptedMnemonic, extendedPublicKeys []string, err error) { + mnemonics := make([]string, numKeys) + for i := uint32(0); i < numKeys; i++ { + fmt.Printf("Enter mnemonic #%d here:\n", i+1) + reader := bufio.NewReader(os.Stdin) + mnemonic, err := utils.ReadLine(reader) + if err != nil { + return nil, nil, err + } + + if !bip39.IsMnemonicValid(string(mnemonic)) { + return nil, nil, errors.Errorf("mnemonic is invalid") + } + + mnemonics[i] = string(mnemonic) + } + return encryptedMnemonicExtendedPublicKeyPairs(params, mnemonics, cmdLinePassword, isMultisig) +} + +func encryptedMnemonicExtendedPublicKeyPairs(params *dagconfig.Params, mnemonics []string, cmdLinePassword string, isMultisig bool) ( + encryptedPrivateKeys []*EncryptedMnemonic, extendedPublicKeys []string, err error) { + password := []byte(cmdLinePassword) + if len(password) == 0 { + + password = []byte(GetPassword("Enter password for the key file:")) + confirmPassword := []byte(GetPassword("Confirm password:")) + + if subtle.ConstantTimeCompare(password, confirmPassword) != 1 { + return nil, nil, errors.New("Passwords are not identical") + } + } + + encryptedPrivateKeys = make([]*EncryptedMnemonic, 0, len(mnemonics)) + extendedPublicKeys = make([]string, 0, len(mnemonics)) + + for _, mnemonic := range mnemonics { + extendedPublicKey, err := libspectrewallet.MasterPublicKeyFromMnemonic(params, mnemonic, isMultisig) + if err != nil { + return nil, nil, err + } + + extendedPublicKeys = append(extendedPublicKeys, extendedPublicKey) + + encryptedPrivateKey, err := encryptMnemonic(mnemonic, password) + if err != nil { + return nil, nil, err + } + encryptedPrivateKeys = append(encryptedPrivateKeys, encryptedPrivateKey) + } + + return encryptedPrivateKeys, extendedPublicKeys, nil +} + +func generateSalt() ([]byte, error) { + salt := make([]byte, 16) + _, err := rand.Read(salt) + if err != nil { + return nil, err + } + + return salt, nil +} + +func encryptMnemonic(mnemonic string, password []byte) (*EncryptedMnemonic, error) { + mnemonicBytes := []byte(mnemonic) + + salt, err := generateSalt() + if err != nil { + return nil, err + } + + aead, err := getAEAD(defaultNumThreads, password, salt) + if err != nil { + return nil, err + } + + // Select a random nonce, and leave capacity for the ciphertext. + nonce := make([]byte, aead.NonceSize(), aead.NonceSize()+len(mnemonicBytes)+aead.Overhead()) + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + + // Encrypt the message and append the ciphertext to the nonce. + cipher := aead.Seal(nonce, nonce, []byte(mnemonicBytes), nil) + + return &EncryptedMnemonic{ + cipher: cipher, + salt: salt, + }, nil +} diff --git a/cmd/spectrewallet/keys/get_password.go b/cmd/spectrewallet/keys/get_password.go new file mode 100644 index 0000000..9ae9784 --- /dev/null +++ b/cmd/spectrewallet/keys/get_password.go @@ -0,0 +1,42 @@ +package keys + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "golang.org/x/term" +) + +// GetPassword was adapted from https://gist.github.com/jlinoff/e8e26b4ffa38d379c7f1891fd174a6d0#file-getpassword2-go +func GetPassword(prompt string) string { + // Get the initial state of the terminal. + initialTermState, e1 := term.GetState(int(syscall.Stdin)) + if e1 != nil { + panic(e1) + } + + // Restore it in the event of an interrupt. + // CITATION: Konstantin Shaposhnikov - https://groups.google.com/forum/#!topic/golang-nuts/kTVAbtee9UA + c := make(chan os.Signal) + signal.Notify(c, os.Interrupt, os.Kill) + go func() { + <-c + _ = term.Restore(int(syscall.Stdin), initialTermState) + os.Exit(1) + }() + + // Now get the password. + fmt.Print(prompt) + p, err := term.ReadPassword(int(syscall.Stdin)) + fmt.Println() + if err != nil { + panic(err) + } + + // Stop looking for ^C on the channel. + signal.Stop(c) + + return string(p) +} diff --git a/cmd/spectrewallet/keys/keys.go b/cmd/spectrewallet/keys/keys.go new file mode 100644 index 0000000..56a968c --- /dev/null +++ b/cmd/spectrewallet/keys/keys.go @@ -0,0 +1,434 @@ +package keys + +import ( + "bufio" + "crypto/cipher" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/gofrs/flock" + + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/chacha20poly1305" +) + +var ( + defaultAppDir = util.AppDir("spectrewallet", false) +) + +// LastVersion is the most up to date file format version +const LastVersion = 1 + +func defaultKeysFile(netParams *dagconfig.Params) string { + return filepath.Join(defaultAppDir, netParams.Name, "keys.json") +} + +type encryptedPrivateKeyJSON struct { + Cipher string `json:"cipher"` + Salt string `json:"salt"` +} + +type keysFileJSON struct { + Version uint32 `json:"version"` + NumThreads uint8 `json:"numThreads,omitempty"` // This field is ignored for versions different from 0. See more details at the function `numThreads`. + EncryptedPrivateKeys []*encryptedPrivateKeyJSON `json:"encryptedMnemonics"` + ExtendedPublicKeys []string `json:"publicKeys"` + MinimumSignatures uint32 `json:"minimumSignatures"` + CosignerIndex uint32 `json:"cosignerIndex"` + LastUsedExternalIndex uint32 `json:"lastUsedExternalIndex"` + LastUsedInternalIndex uint32 `json:"lastUsedInternalIndex"` + ECDSA bool `json:"ecdsa"` +} + +// EncryptedMnemonic represents an encrypted mnemonic +type EncryptedMnemonic struct { + cipher []byte + salt []byte +} + +// File holds all the data related to the wallet keys +type File struct { + Version uint32 + NumThreads uint8 // This field is ignored for versions different than 0 + EncryptedMnemonics []*EncryptedMnemonic + ExtendedPublicKeys []string + MinimumSignatures uint32 + CosignerIndex uint32 + lastUsedExternalIndex uint32 + lastUsedInternalIndex uint32 + ECDSA bool + path string +} + +func (d *File) toJSON() *keysFileJSON { + encryptedPrivateKeysJSON := make([]*encryptedPrivateKeyJSON, len(d.EncryptedMnemonics)) + for i, encryptedPrivateKey := range d.EncryptedMnemonics { + encryptedPrivateKeysJSON[i] = &encryptedPrivateKeyJSON{ + Cipher: hex.EncodeToString(encryptedPrivateKey.cipher), + Salt: hex.EncodeToString(encryptedPrivateKey.salt), + } + } + + return &keysFileJSON{ + Version: d.Version, + NumThreads: d.NumThreads, + EncryptedPrivateKeys: encryptedPrivateKeysJSON, + ExtendedPublicKeys: d.ExtendedPublicKeys, + MinimumSignatures: d.MinimumSignatures, + ECDSA: d.ECDSA, + CosignerIndex: d.CosignerIndex, + LastUsedExternalIndex: d.lastUsedExternalIndex, + LastUsedInternalIndex: d.lastUsedInternalIndex, + } +} + +// NewFileFromMnemonic generates a new File from the given mnemonic string +func NewFileFromMnemonic(params *dagconfig.Params, mnemonic string, password string) (*File, error) { + encryptedMnemonics, extendedPublicKeys, err := + encryptedMnemonicExtendedPublicKeyPairs(params, []string{mnemonic}, password, false) + if err != nil { + return nil, err + } + return &File{ + Version: LastVersion, + NumThreads: defaultNumThreads, + EncryptedMnemonics: encryptedMnemonics, + ExtendedPublicKeys: extendedPublicKeys, + MinimumSignatures: 1, + ECDSA: false, + }, nil +} + +func (d *File) fromJSON(fileJSON *keysFileJSON) error { + d.Version = fileJSON.Version + d.NumThreads = fileJSON.NumThreads + d.MinimumSignatures = fileJSON.MinimumSignatures + d.ECDSA = fileJSON.ECDSA + d.ExtendedPublicKeys = fileJSON.ExtendedPublicKeys + d.CosignerIndex = fileJSON.CosignerIndex + d.lastUsedExternalIndex = fileJSON.LastUsedExternalIndex + d.lastUsedInternalIndex = fileJSON.LastUsedInternalIndex + + d.EncryptedMnemonics = make([]*EncryptedMnemonic, len(fileJSON.EncryptedPrivateKeys)) + for i, encryptedPrivateKeyJSON := range fileJSON.EncryptedPrivateKeys { + cipher, err := hex.DecodeString(encryptedPrivateKeyJSON.Cipher) + if err != nil { + return err + } + + salt, err := hex.DecodeString(encryptedPrivateKeyJSON.Salt) + if err != nil { + return err + } + + d.EncryptedMnemonics[i] = &EncryptedMnemonic{ + cipher: cipher, + salt: salt, + } + } + + return nil +} + +// SetPath sets the path where the file is saved to. +func (d *File) SetPath(params *dagconfig.Params, path string, forceOverride bool) error { + if path == "" { + path = defaultKeysFile(params) + } + + if !forceOverride { + exists, err := pathExists(path) + if err != nil { + return err + } + + if exists { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("The file %s already exists. Are you sure you want to override it (type 'y' to approve)? ", d.path) + line, err := utils.ReadLine(reader) + if err != nil { + return err + } + + if string(line) != "y" { + return errors.Errorf("aborted setting the file path to %s", path) + } + } + } + d.path = path + return nil +} + +// Path returns the file path. +func (d *File) Path() string { + return d.path +} + +// SetLastUsedExternalIndex sets the last used index in the external key +// chain, and saves the file with the updated data. +func (d *File) SetLastUsedExternalIndex(index uint32) error { + if d.lastUsedExternalIndex == index { + return nil + } + + d.lastUsedExternalIndex = index + return d.Save() +} + +// LastUsedExternalIndex returns the last used index in the external key +// chain and saves the file with the updated data. +func (d *File) LastUsedExternalIndex() uint32 { + return d.lastUsedExternalIndex +} + +// SetLastUsedInternalIndex sets the last used index in the internal key chain, and saves the file. +func (d *File) SetLastUsedInternalIndex(index uint32) error { + if d.lastUsedInternalIndex == index { + return nil + } + + d.lastUsedInternalIndex = index + return d.Save() +} + +// LastUsedInternalIndex returns the last used index in the internal key chain +func (d *File) LastUsedInternalIndex() uint32 { + return d.lastUsedInternalIndex +} + +// DecryptMnemonics asks the user to enter the password for the private keys and +// returns the decrypted private keys. +func (d *File) DecryptMnemonics(password string) ([]string, error) { + passwordBytes := []byte(password) + + var numThreads uint8 + if len(d.EncryptedMnemonics) > 0 { + var err error + numThreads, err = d.numThreads(passwordBytes) + if err != nil { + return nil, err + } + } + + privateKeys := make([]string, len(d.EncryptedMnemonics)) + for i, encryptedPrivateKey := range d.EncryptedMnemonics { + var err error + privateKeys[i], err = decryptMnemonic(numThreads, encryptedPrivateKey, passwordBytes) + if err != nil { + return nil, err + } + } + + return privateKeys, nil +} + +// ReadKeysFile returns the data related to the keys file +func ReadKeysFile(netParams *dagconfig.Params, path string) (*File, error) { + if path == "" { + path = defaultKeysFile(netParams) + } + + file, err := os.Open(path) + if err != nil { + return nil, err + } + + decoder := json.NewDecoder(file) + decoder.DisallowUnknownFields() + decodedFile := &keysFileJSON{} + err = decoder.Decode(&decodedFile) + if err != nil { + return nil, err + } + + keysFile := &File{ + path: path, + } + err = keysFile.fromJSON(decodedFile) + if err != nil { + return nil, err + } + + return keysFile, nil +} + +func createFileDirectoryIfDoesntExist(path string) error { + dir := filepath.Dir(path) + exists, err := pathExists(dir) + if err != nil { + return err + } + + if exists { + return nil + } + + return os.MkdirAll(dir, 0700) +} + +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + + if err == nil { + return true, nil + } + + if os.IsNotExist(err) { + return false, nil + + } + + return false, err +} + +// Save writes the file contents to the disk. +func (d *File) Save() error { + if d.path == "" { + return errors.New("cannot save a file with uninitialized path") + } + + err := createFileDirectoryIfDoesntExist(d.path) + if err != nil { + return err + } + + file, err := os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer file.Close() + + encoder := json.NewEncoder(file) + err = encoder.Encode(d.toJSON()) + if err != nil { + return err + } + + return nil +} + +const defaultNumThreads = 8 + +func (d *File) numThreads(password []byte) (uint8, error) { + // There's a bug in v0 wallets where the number of threads + // was determined by the number of logical CPUs at the machine, + // which made the authentication non-deterministic across platforms. + // In order to solve it we introduce v1 where the number of threads + // is constant, and brute force the number of threads in v0. After we + // find the right amount via brute force we save the result to the file. + + if d.Version != 0 { + return defaultNumThreads, nil + } + + numThreads, err := d.detectNumThreads(password, d.EncryptedMnemonics[0]) + if err != nil { + return 0, err + } + + d.NumThreads = numThreads + err = d.Save() + if err != nil { + return 0, err + } + + return numThreads, nil +} + +func (d *File) detectNumThreads(password []byte, encryptedMnemonic *EncryptedMnemonic) (uint8, error) { + firstGuessNumThreads := d.NumThreads + if d.NumThreads == 0 { + firstGuessNumThreads = uint8(runtime.NumCPU()) + } + _, err := decryptMnemonic(firstGuessNumThreads, encryptedMnemonic, password) + if err != nil { + if !strings.Contains(err.Error(), "message authentication failed") { + return 0, err + } + } else { + return firstGuessNumThreads, nil + } + + for numThreadsGuess := uint8(1); ; numThreadsGuess++ { + if numThreadsGuess == firstGuessNumThreads { + continue + } + + _, err := decryptMnemonic(numThreadsGuess, encryptedMnemonic, password) + if err != nil { + const maxTries = 255 + if numThreadsGuess == maxTries || !strings.Contains(err.Error(), "message authentication failed") { + return 0, err + } + } else { + return numThreadsGuess, nil + } + } +} + +func getAEAD(threads uint8, password, salt []byte) (cipher.AEAD, error) { + key := argon2.IDKey(password, salt, 1, 64*1024, threads, 32) + return chacha20poly1305.NewX(key) +} + +func decryptMnemonic(numThreads uint8, encryptedPrivateKey *EncryptedMnemonic, password []byte) (string, error) { + aead, err := getAEAD(numThreads, password, encryptedPrivateKey.salt) + if err != nil { + return "", err + } + + if len(encryptedPrivateKey.cipher) < aead.NonceSize() { + return "", errors.New("ciphertext too short") + } + + // Split nonce and ciphertext. + nonce, ciphertext := encryptedPrivateKey.cipher[:aead.NonceSize()], encryptedPrivateKey.cipher[aead.NonceSize():] + + // Decrypt the message and check it wasn't tampered with. + decrypted, err := aead.Open(nil, nonce, ciphertext, nil) + if err != nil { + return "", err + } + + return string(decrypted), nil +} + +// flockMap is a map that holds all lock file handlers. This map guarantees that +// the associated locked file handler will never get cleaned by the GC, because +// once they are cleaned the associated file will be unlocked. +var flockMap = make(map[string]*flock.Flock) + +// TryLock tries to acquire an exclusive lock for the file. +func (d *File) TryLock() error { + if _, ok := flockMap[d.path]; ok { + return errors.Errorf("file %s is already locked", d.path) + } + + lockFile := flock.New(d.path + ".lock") + err := createFileDirectoryIfDoesntExist(lockFile.Path()) + if err != nil { + return err + } + + flockMap[d.path] = lockFile + + success, err := lockFile.TryLock() + if err != nil { + return err + } + + if !success { + return errors.Errorf("%s is locked and cannot be used. Make sure that no other active wallet command is using it.", d.path) + } + return nil +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/alphabet.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/alphabet.go new file mode 100644 index 0000000..6bb39fe --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/alphabet.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// AUTOGENERATED by genalphabet.go; do not edit. + +package base58 + +const ( + // alphabet is the modified base58 alphabet used by Bitcoin. + alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" + + alphabetIdx0 = '1' +) + +var b58 = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 255, 255, 255, 255, 255, 255, + 255, 9, 10, 11, 12, 13, 14, 15, + 16, 255, 17, 18, 19, 20, 21, 255, + 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 255, 255, 255, 255, 255, + 255, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 255, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/base58.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58.go new file mode 100644 index 0000000..19a72de --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58.go @@ -0,0 +1,75 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58 + +import ( + "math/big" +) + +//go:generate go run genalphabet.go + +var bigRadix = big.NewInt(58) +var bigZero = big.NewInt(0) + +// Decode decodes a modified base58 string to a byte slice. +func Decode(b string) []byte { + answer := big.NewInt(0) + j := big.NewInt(1) + + scratch := new(big.Int) + for i := len(b) - 1; i >= 0; i-- { + tmp := b58[b[i]] + if tmp == 255 { + return []byte("") + } + scratch.SetInt64(int64(tmp)) + scratch.Mul(j, scratch) + answer.Add(answer, scratch) + j.Mul(j, bigRadix) + } + + tmpval := answer.Bytes() + + var numZeros int + for numZeros = 0; numZeros < len(b); numZeros++ { + if b[numZeros] != alphabetIdx0 { + break + } + } + flen := numZeros + len(tmpval) + val := make([]byte, flen) + copy(val[numZeros:], tmpval) + + return val +} + +// Encode encodes a byte slice to a modified base58 string. +func Encode(b []byte) string { + x := new(big.Int) + x.SetBytes(b) + + answer := make([]byte, 0, len(b)*136/100) + for x.Cmp(bigZero) > 0 { + mod := new(big.Int) + x.DivMod(x, bigRadix, mod) + answer = append(answer, alphabet[mod.Int64()]) + } + + // leading zero bytes + for _, i := range b { + if i != 0 { + break + } + answer = append(answer, alphabetIdx0) + } + + // reverse + alen := len(answer) + for i := 0; i < alen/2; i++ { + answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i] + } + + return string(answer) +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/base58_test.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58_test.go new file mode 100644 index 0000000..eb72d36 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58_test + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/btcsuite/btcutil/base58" +) + +var stringTests = []struct { + in string + out string +}{ + {"", ""}, + {" ", "Z"}, + {"-", "n"}, + {"0", "q"}, + {"1", "r"}, + {"-1", "4SU"}, + {"11", "4k8"}, + {"abc", "ZiCa"}, + {"1234598760", "3mJr7AoUXx2Wqd"}, + {"abcdefghijklmnopqrstuvwxyz", "3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f"}, + {"00000000000000000000000000000000000000000000000000000000000000", "3sN2THZeE9Eh9eYrwkvZqNstbHGvrxSAM7gXUXvyFQP8XvQLUqNCS27icwUeDT7ckHm4FUHM2mTVh1vbLmk7y"}, +} + +var invalidStringTests = []struct { + in string + out string +}{ + {"0", ""}, + {"O", ""}, + {"I", ""}, + {"l", ""}, + {"3mJr0", ""}, + {"O3yxU", ""}, + {"3sNI", ""}, + {"4kl8", ""}, + {"0OIl", ""}, + {"!@#$%^&*()-_=+~`", ""}, +} + +var hexTests = []struct { + in string + out string +}{ + {"61", "2g"}, + {"626262", "a3gV"}, + {"636363", "aPEr"}, + {"73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"}, + {"00eb15231dfceb60925886b67d065299925915aeb172c06647", "1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L"}, + {"516b6fcd0f", "ABnLTmg"}, + {"bf4f89001e670274dd", "3SEo3LWLoPntC"}, + {"572e4794", "3EFU7m"}, + {"ecac89cad93923c02321", "EJDM8drfXA6uyA"}, + {"10c8511e", "Rt5zm"}, + {"00000000000000000000", "1111111111"}, +} + +func TestBase58(t *testing.T) { + // Encode tests + for x, test := range stringTests { + tmp := []byte(test.in) + if res := base58.Encode(tmp); res != test.out { + t.Errorf("Encode test #%d failed: got: %s want: %s", + x, res, test.out) + continue + } + } + + // Decode tests + for x, test := range hexTests { + b, err := hex.DecodeString(test.in) + if err != nil { + t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in) + continue + } + if res := base58.Decode(test.out); !bytes.Equal(res, b) { + t.Errorf("Decode test #%d failed: got: %q want: %q", + x, res, test.in) + continue + } + } + + // Decode with invalid input + for x, test := range invalidStringTests { + if res := base58.Decode(test.in); string(res) != test.out { + t.Errorf("Decode invalidString test #%d failed: got: %q want: %q", + x, res, test.out) + continue + } + } +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/base58bench_test.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58bench_test.go new file mode 100644 index 0000000..2ab8fca --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58bench_test.go @@ -0,0 +1,35 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58_test + +import ( + "bytes" + "testing" + + "github.com/btcsuite/btcutil/base58" +) + +func BenchmarkBase58Encode(b *testing.B) { + b.StopTimer() + data := bytes.Repeat([]byte{0xff}, 5000) + b.SetBytes(int64(len(data))) + b.StartTimer() + + for i := 0; i < b.N; i++ { + base58.Encode(data) + } +} + +func BenchmarkBase58Decode(b *testing.B) { + b.StopTimer() + data := bytes.Repeat([]byte{0xff}, 5000) + encoded := base58.Encode(data) + b.SetBytes(int64(len(encoded))) + b.StartTimer() + + for i := 0; i < b.N; i++ { + base58.Decode(encoded) + } +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check.go new file mode 100644 index 0000000..7cdafee --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check.go @@ -0,0 +1,52 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58 + +import ( + "crypto/sha256" + "errors" +) + +// ErrChecksum indicates that the checksum of a check-encoded string does not verify against +// the checksum. +var ErrChecksum = errors.New("checksum error") + +// ErrInvalidFormat indicates that the check-encoded string has an invalid format. +var ErrInvalidFormat = errors.New("invalid format: version and/or checksum bytes missing") + +// checksum: first four bytes of sha256^2 +func checksum(input []byte) (cksum [4]byte) { + h := sha256.Sum256(input) + h2 := sha256.Sum256(h[:]) + copy(cksum[:], h2[:4]) + return +} + +// CheckEncode prepends a version byte and appends a four byte checksum. +func CheckEncode(input []byte, version byte) string { + b := make([]byte, 0, 1+len(input)+4) + b = append(b, version) + b = append(b, input[:]...) + cksum := checksum(b) + b = append(b, cksum[:]...) + return Encode(b) +} + +// CheckDecode decodes a string that was encoded with CheckEncode and verifies the checksum. +func CheckDecode(input string) (result []byte, version byte, err error) { + decoded := Decode(input) + if len(decoded) < 5 { + return nil, 0, ErrInvalidFormat + } + version = decoded[0] + var cksum [4]byte + copy(cksum[:], decoded[len(decoded)-4:]) + if checksum(decoded[:len(decoded)-4]) != cksum { + return nil, 0, ErrChecksum + } + payload := decoded[1 : len(decoded)-4] + result = append(result, payload...) + return +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check_test.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check_test.go new file mode 100644 index 0000000..21087cf --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/base58check_test.go @@ -0,0 +1,66 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58_test + +import ( + "testing" + + "github.com/btcsuite/btcutil/base58" +) + +var checkEncodingStringTests = []struct { + version byte + in string + out string +}{ + {20, "", "3MNQE1X"}, + {20, " ", "B2Kr6dBE"}, + {20, "-", "B3jv1Aft"}, + {20, "0", "B482yuaX"}, + {20, "1", "B4CmeGAC"}, + {20, "-1", "mM7eUf6kB"}, + {20, "11", "mP7BMTDVH"}, + {20, "abc", "4QiVtDjUdeq"}, + {20, "1234598760", "ZmNb8uQn5zvnUohNCEPP"}, + {20, "abcdefghijklmnopqrstuvwxyz", "K2RYDcKfupxwXdWhSAxQPCeiULntKm63UXyx5MvEH2"}, + {20, "00000000000000000000000000000000000000000000000000000000000000", "bi1EWXwJay2udZVxLJozuTb8Meg4W9c6xnmJaRDjg6pri5MBAxb9XwrpQXbtnqEoRV5U2pixnFfwyXC8tRAVC8XxnjK"}, +} + +func TestBase58Check(t *testing.T) { + for x, test := range checkEncodingStringTests { + // test encoding + if res := base58.CheckEncode([]byte(test.in), test.version); res != test.out { + t.Errorf("CheckEncode test #%d failed: got %s, want: %s", x, res, test.out) + } + + // test decoding + res, version, err := base58.CheckDecode(test.out) + if err != nil { + t.Errorf("CheckDecode test #%d failed with err: %v", x, err) + } else if version != test.version { + t.Errorf("CheckDecode test #%d failed: got version: %d want: %d", x, version, test.version) + } else if string(res) != test.in { + t.Errorf("CheckDecode test #%d failed: got: %s want: %s", x, res, test.in) + } + } + + // test the two decoding failure cases + // case 1: checksum error + _, _, err := base58.CheckDecode("3MNQE1Y") + if err != base58.ErrChecksum { + t.Error("Checkdecode test failed, expected ErrChecksum") + } + // case 2: invalid formats (string lengths below 5 mean the version byte and/or the checksum + // bytes are missing). + testString := "" + for len := 0; len < 4; len++ { + // make a string of length `len` + _, _, err = base58.CheckDecode(testString) + if err != base58.ErrInvalidFormat { + t.Error("Checkdecode test failed, expected ErrInvalidFormat") + } + } + +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/cov_report.sh b/cmd/spectrewallet/libspectrewallet/bip32/base58/cov_report.sh new file mode 100644 index 0000000..307f05b --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/cov_report.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +type gocov >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi +gocov test | gocov report diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/doc.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/doc.go new file mode 100644 index 0000000..d657f05 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) 2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package base58 provides an API for working with modified base58 and Base58Check +encodings. + +# Modified Base58 Encoding + +Standard base58 encoding is similar to standard base64 encoding except, as the +name implies, it uses a 58 character alphabet which results in an alphanumeric +string and allows some characters which are problematic for humans to be +excluded. Due to this, there can be various base58 alphabets. + +The modified base58 alphabet used by Bitcoin, and hence this package, omits the +0, O, I, and l characters that look the same in many fonts and are therefore +hard to humans to distinguish. + +# Base58Check Encoding Scheme + +The Base58Check encoding scheme is primarily used for Bitcoin addresses at the +time of this writing, however it can be used to generically encode arbitrary +byte arrays into human-readable strings along with a version byte that can be +used to differentiate the same payload. For Bitcoin addresses, the extra +version is used to differentiate the network of otherwise identical public keys +which helps prevent using an address intended for one network on another. +*/ +package base58 diff --git a/cmd/spectrewallet/libspectrewallet/bip32/base58/example_test.go b/cmd/spectrewallet/libspectrewallet/bip32/base58/example_test.go new file mode 100644 index 0000000..230a784 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/base58/example_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58_test + +import ( + "fmt" + + "github.com/btcsuite/btcutil/base58" +) + +// This example demonstrates how to decode modified base58 encoded data. +func ExampleDecode() { + // Decode example modified base58 encoded data. + encoded := "25JnwSn7XKfNQ" + decoded := base58.Decode(encoded) + + // Show the decoded data. + fmt.Println("Decoded Data:", string(decoded)) + + // Output: + // Decoded Data: Test data +} + +// This example demonstrates how to encode data using the modified base58 +// encoding scheme. +func ExampleEncode() { + // Encode example data with the modified base58 encoding scheme. + data := []byte("Test data") + encoded := base58.Encode(data) + + // Show the encoded data. + fmt.Println("Encoded Data:", encoded) + + // Output: + // Encoded Data: 25JnwSn7XKfNQ +} + +// This example demonstrates how to decode Base58Check encoded data. +func ExampleCheckDecode() { + // Decode an example Base58Check encoded data. + encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" + decoded, version, err := base58.CheckDecode(encoded) + if err != nil { + fmt.Println(err) + return + } + + // Show the decoded data. + fmt.Printf("Decoded data: %x\n", decoded) + fmt.Println("Version Byte:", version) + + // Output: + // Decoded data: 62e907b15cbf27d5425399ebf6f0fb50ebb88f18 + // Version Byte: 0 +} + +// This example demonstrates how to encode data using the Base58Check encoding +// scheme. +func ExampleCheckEncode() { + // Encode example data with the Base58Check encoding scheme. + data := []byte("Test data") + encoded := base58.CheckEncode(data, 0) + + // Show the encoded data. + fmt.Println("Encoded Data:", encoded) + + // Output: + // Encoded Data: 182iP79GRURMp7oMHDU +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/bip32.go b/cmd/spectrewallet/libspectrewallet/bip32/bip32.go new file mode 100644 index 0000000..f3a7a34 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/bip32.go @@ -0,0 +1,46 @@ +package bip32 + +import "crypto/rand" + +// GenerateSeed generates seed that can be used to initialize a master key. +func GenerateSeed() ([]byte, error) { + randBytes := make([]byte, 32) + _, err := rand.Read(randBytes) + if err != nil { + return nil, err + } + + return randBytes, nil +} + +// NewMasterWithPath returns a new master key based on the given seed and version, with a derivation +// to the given path. +func NewMasterWithPath(seed []byte, version [4]byte, pathString string) (*ExtendedKey, error) { + masterKey, err := NewMaster(seed, version) + if err != nil { + return nil, err + } + + return masterKey.DeriveFromPath(pathString) +} + +// NewPublicMasterWithPath returns a new public master key based on the given seed and version, with a derivation +// to the given path. +func NewPublicMasterWithPath(seed []byte, version [4]byte, pathString string) (*ExtendedKey, error) { + masterKey, err := NewMaster(seed, version) + if err != nil { + return nil, err + } + + path, err := parsePath(pathString) + if err != nil { + return nil, err + } + + descendantKey, err := masterKey.path(path) + if err != nil { + return nil, err + } + + return descendantKey.Public() +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/bip32_test.go b/cmd/spectrewallet/libspectrewallet/bip32/bip32_test.go new file mode 100644 index 0000000..7bd6736 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/bip32_test.go @@ -0,0 +1,420 @@ +package bip32 + +import ( + "encoding/hex" + "math/rand" + "strconv" + "strings" + "testing" +) + +func TestBIP32SpecVectors(t *testing.T) { + type testPath struct { + path string + extendedPublicKey string + extendedPrivateKey string + } + + type testVector struct { + seed string + version [4]byte + paths []testPath + } + + // test vectors are copied from https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Test_Vectors + testVectors := []testVector{ + { + seed: "000102030405060708090a0b0c0d0e0f", + version: BitcoinMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8", + extendedPrivateKey: "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi", + }, + { + path: "m/0'", + extendedPublicKey: "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw", + extendedPrivateKey: "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7", + }, + { + path: "m/0'/1", + extendedPublicKey: "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ", + extendedPrivateKey: "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs", + }, + { + path: "m/0'/1/2'", + extendedPublicKey: "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5", + extendedPrivateKey: "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM", + }, + { + path: "m/0'/1/2'/2", + extendedPublicKey: "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV", + extendedPrivateKey: "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334", + }, + { + path: "m/0'/1/2'/2/1000000000", + extendedPublicKey: "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy", + extendedPrivateKey: "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76", + }, + }, + }, + { + seed: "000102030405060708090a0b0c0d0e0f", + version: SpectreMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "kpub2C2CKMtB3F5r4LEGRnS3o73omeQB3KJ5QfAzC5R3t9bpChBEZNitvn92JYeCTMtnR7oE1im7DhsxGqV72JErXFG9G3YnTHRnZPkGZLFE6PZ", + extendedPrivateKey: "kprv5y2qurMHCsXYqr9oKku3Ry75DcZgdraE3SFPPh1SKp4qKtr61qQeNypYTGztwUUiVauHWmjxaQXeUKHxj4QCuDG4ULpZHkvBoH9XX19ynXm", + }, + { + path: "m/0'", + extendedPublicKey: "kpub2EHcK5Be8WCqCwMydYJgg99v6TxXRPn66GbtAAoArLo6ZyUQycFz3vVS5pCuCfoKRL5nsxJXxLx3FETEyKyEb8isTgM3NbL15KsprxXRXYP", + extendedPrivateKey: "kprv61JFuZekJ8eXzTHWXWmgK1DBYS831w4Ej3gHMnPZJ1G7hB9GS4wjW8AxEYMEMBrgCdnyt54pxmNXC5KgNegPhHLaYDVhXid5WHnNxE7Nir6", + }, + { + path: "m/0'/1", + extendedPublicKey: "kpub2GTjWrjXXD5u3PQRMoCZGt3a9qwdRRWP2bGikSZynybJoWyYhQgJ1VPfVtfUccWfP3hqfNke4wSWqYC4Sf98GnYoktBtrELGi4Qc9xmGTUP", + extendedPrivateKey: "kprv63UP7MCdgqXbpuKxFmfYuk6qbp791xnXfNM7x4ANEe4KvieQ9sN3Th5BebYHx7dieiYfgtfG3UKwL1quVzUNUSq23zTRbUPwB66kV2rWPC8", + }, + { + path: "m/0'/1/2'", + extendedPublicKey: "kpub2K51ZPZPE5wJuZCWcPbvdt5iNzp9gy6NN8WPzms8xqxkDNAfWAWiuvwb3urK4UwyjZoaGkjFSt1VHsLM9kgfLEheLnA2wBPxRkKkFDqc9zP", + extendedPrivateKey: "kprv665f9t2VPiP1h583WN4vGk8ypxyfHWNWzuaoCPTXQWRmLZqWxdCUN8d7CdkuvM9DABa4HMcBTt9qZDaf61PZbYGgQc1ykQdsnMqy7fTCNrm", + }, + { + path: "m/0'/1/2'/2", + extendedPublicKey: "kpub2MJQPpgLQZcHz2gEJep1XPF2Tp6tKZQZocPhFjPcHHXMaTo2ZwD67WQWjEqhUH6iCsvkQmDCVcubrHgMF47s3qAuFZiDmNHnSSEbPpuRWiZ", + extendedPrivateKey: "kprv68K3zK9SaC3zmYbmCdH1AFJHunGPv6giSPU6TLyziwzNhfTt2PtqZi62sxANP1YeDyhkuGqkNhc12QV7HRvunvrior75JVTawLK8d8zN34Z", + }, + { + path: "m/0'/1/2'/2/1000000000", + extendedPublicKey: "kpub2P2AsWHaXgzVWNTgRCNjq6F2G3gC94DbbrnFW1mkVMurHbCR6MTkNcZaN4keKYBRgaDHv7912pcCSi5NLuchu6L2878JZqsRFPrWduDKq9i", + extendedPrivateKey: "kprv6A2pTzkghKSCHtPDKAqjTxJHi1qhjbVkEdrehdN8w2NsQnsGYp9VppF6WowaHvfiqP71gdphDk982aVUpVwdutWG9LsJRQDJDfsVNMbtSap", + }, + }, + }, + { + seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542", + version: BitcoinMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB", + extendedPrivateKey: "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U", + }, + { + path: "m/0", + extendedPublicKey: "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH", + extendedPrivateKey: "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt", + }, + { + path: "m/0/2147483647'", + extendedPublicKey: "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a", + extendedPrivateKey: "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9", + }, + { + path: "m/0/2147483647'/1", + extendedPublicKey: "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon", + extendedPrivateKey: "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef", + }, + { + path: "m/0/2147483647'/1/2147483646'", + extendedPublicKey: "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL", + extendedPrivateKey: "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc", + }, + { + path: "m/0/2147483647'/1/2147483646'/2", + extendedPublicKey: "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt", + extendedPrivateKey: "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j", + }, + }, + }, + { + seed: "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542", + version: SpectreMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "kpub2C2CKMtB3F5r3wjbXwWLFJma1qYbiwYu6ExiV29srXigVgCRjXVqMgJceejBAcFkKg31vPRGcnPCzdDL9VA1fAG67ykFHmvSsmRNqKZg1po", + extendedPrivateKey: "kprv5y2qurMHCsXYqTf8RuyKtApqToi7KUq3j237gdkGJCBhcssHBzBaosz8oLmx7z2ojdeiG4CQrWZqZr24mUnuWaapvktoS6pvNXmkszbHsFE", + }, + { + path: "m/0", + extendedPublicKey: "kpub2FHwb5a8XFuvaDKtfitDK7B6NoHrRv3BeQi5eqBKvwaeBeeQJnquWWssE7h4xhGBXzXBncR21sEB9ne22drRzkvNQ2UvC84q1FY3GVzjZr1", + extendedPrivateKey: "kprv62JbBa3EgtMdMjFRZhMCwyEMpmTN2TKLHBnUrSmiNc3fJrKFmFXexiZPNr3km3se9HtYA4c9HfyxvMetKmHxSokDvwJrpazfVwgKFEAdr1L", + }, + { + path: "m/0/2147483647'", + extendedPublicKey: "kpub2GSzqgbeuA62k5Y56AsrnSremYWkyQCsjZncaE66agM2dwsrvgGDiafTqVwBiRsHKWSjSTGdK5empTWMoYLYiuNzw76yYrKqsdoe7KSjW9n", + extendedPrivateKey: "kprv63TeSB4m4nXjXbTbz9LrRJuvDWgGZwV2NLs1mqgV2Lp3m9YiP8wyAnLyzCXjVJc83XDRw5onLgV5MbPf48u627BnMfYCb6ivHj1r1gJwAAq", + }, + { + path: "m/0/2147483647'/1", + extendedPublicKey: "kpub2KFyFhab4oPDqhDD9q2RkPnt75PG5b8941HURHkRtZhUJmk2EBnvcV3qgJ8KWJZZuguHH6MrxCxbuFmNiSmVzEquXPJpmPm3oQUbMkjZU7h", + extendedPrivateKey: "kprv66GcrC3hERpvdD8k3oVRPFr9Z3Ymg8QHgnMscuLpLEAVRyQsgeUg4gjMpzjMX1opMUa8gNtAkEAHgJAp72RU2b15VS51SChJmXSaVHSHVgJ", + }, + { + path: "m/0/2147483647'/1/2147483646'", + extendedPublicKey: "kpub2LS1AfWwgCLw8eSotJqy7uV51ord8Zke5i1Mx1SqjKxim84xKriw91QwJxFphg61s8Yv5bRZzpHTYtvmQKt1hbYMoHdKKgrTfdZAtem6FS7", + extendedPrivateKey: "kprv67Sem9z3qpndvANLnHJxkmYLTn28j72niV5m9d3EAzRjtKjonKQgbD6TThTk9SC6u3rpzCfA8bjsVRGBcyKxiRgFKNcKaQiw77T6Z6V751r", + }, + { + path: "m/0/2147483647'/1/2147483646'/2", + extendedPublicKey: "kpub2Mo386jTCNfAsudhcNyf6es3QsPjNtfijsdFMnoLN7pJqKQXVVehKaMwPML6qFSiPBm9MWvytXJT3KzGERZv1rPwSTTQG49CLvkMZGaHgA1", + extendedPrivateKey: "kprv68ogibCZN16sfRZEWMSejWvJrqZEyRwsNeheZQPionHKxX5NwxLSmn3TY78kJTHAwMiZGxHyahaZXy9hMHhBmQQy8E7pdpreoUnedk17vmK", + }, + }, + }, + { + seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be", + version: BitcoinMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13", + extendedPrivateKey: "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6", + }, + { + path: "m/0'", + extendedPublicKey: "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y", + extendedPrivateKey: "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L", + }, + }, + }, + { + seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be", + version: SpectreMainnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "kpub2C2CKMtB3F5r31Bm4L18TJ2btUshUoiAoajGtKBS8DoUTvRhPfjoJwY98eG9zCqPVknskPJH1TD4RvrEzCCT5VvEFDeU2LNHfUw5MkWwVFF", + extendedPrivateKey: "kprv5y2qurMHCsXYpX7HxJU86A5sLT3D5LzKSMog5vmpZtGVb86Yr8RYm9DfHLW851G8pLKTpytWkwJYvVdNzuwLJ465T3TSdYAtfFS7Xx2owSo", + }, + { + path: "m/0'", + extendedPublicKey: "kpub2EPQ4KiJicTCEYHAHULdWYnGaqV5df85D4yDhYsH4XiqiwZ9yAWfPQKrSN6fiZS8h8HiXM41rQQZ4PnavS8dekCAvKbMaBs69fHz2AFgp7S", + extendedPrivateKey: "kprv61Q3epBQtEtu24ChBSod9QqY2oebECQDqr3cuATfWCBrr9E1RdCQqc1Nb6rUQxb4GUxsqvgPQfw1a3GXa8X63pHhtBNVNhShnGPmVHW2UAU", + }, + }, + }, + { + seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be", + version: SpectreTestnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "ktub1vi816jr1DomD4Qct6C1n3qZ98kvP4TCgq21QUVkJ1xTADT74WfStAQAv5NA5ACsLYRLiAEs2YMz91LoWXh6YY6bFd6BidfhFE7dxhSVK7H", + extendedPrivateKey: "ktrv5himbbCxArFTzaL9n4f1Qutpb6vRybjMKc6Qc668jgRUHR7xWyMCLN5h4mc89xdcf7wvnkq6n2TUda7wXFRym6GSTSuAKqUJEzcg8smnbs9", + }, + { + path: "m/0'", + extendedPublicKey: "ktub1y5Kk4ZygbB7QbW27EXWqJbDqVNJXus76KFxDiBbEKspREaZe1SJxdBtDoCfoWocXuvBV7zbsVZUmUH9SmdH7nNXvj35GVAVjQUYd41EFiN", + extendedPrivateKey: "ktrv5k5yLZ35rDcpC7RZ1CzWUAeVHTXp8T9Fj6LMRKmyfzLqYSFR6U84QpsQNXxUVuxY7GbLohcyRm5wH7m66U1jWrU4tapD4zk7N1aL6C3rnp7", + }, + }, + }, + { + seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be", + version: SpectreDevnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "kdub4zZ57oeuxodRE6ZjEtX6KnUpww7hSaHbRg745QUF8GSe3Y6EKZa6tkjuJvCw8cTYni4dWNgh3tr5HoER7gTzpEyQcM4VNJmxyu9H2k8C3SL", + extendedPrivateKey: "kdrv8mZiiJ828S581cVG8rz5xeY6PuHD37Zk4TBTH24dZvufAjm5n2FrLxRRTcSuDQtJ7HbDayGvoNwZnN1Z8QCt2o9FpAsTyWaZyfeKCvnUDsn", + }, + { + path: "m/0'", + extendedPublicKey: "kdub52vGrmV3eAzmRdf8U2rbP3EVeHj5bRhVqALzteA64aN1JZDgu4LxyDXcce3Sry4Hz5ZUHLSRtr3ZvGAm3vQBPVFMHT1NvAGmU5WBhDEnN5e", + extendedPrivateKey: "kdrv8ovvTFx9ooSUD9afN1Kb1uHm6FtbBxyeTwRQ6FkUWEq2RktYMX2iRRD8mNoFZNDDZSEdbv4oT7a2RuehhcndnZLtFJnWifrP6gbyAN52eWX", + }, + }, + }, + { + seed: "4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be", + version: SpectreSimnetPrivate, + paths: []testPath{ + { + path: "m", + extendedPublicKey: "ksub8Dp9tLuSVjDpAnKQ1sEyyZfrLiYhUQ7jd5TD92tgeX7mTQdyCwZYDNuhiN2WC23BwhGKgKvoJaWSPERp5gSuV4h2QEt1JwHyrSCjoEf5VHR", + extendedPrivateKey: "ksrv3h87jvsCMMY9HKHc8DaPaA1jmh2mK4vq3mrRrWrPEFKNcvPCaYjx2tHqYCMLkgu1qonn9ScvgKMgeUG5pkgVdEgB579ytyAQBLrnezdttFb", + }, + { + path: "m/0'", + extendedPublicKey: "ksub8GBMdJjaB6bANKQoF1aV2pRX35A5dFXe2Zh9xGaXaq38iRmRnSLQHqhR25s1vNdw94mATHgY9Xhw1hNA1vP64Jxy5LptrnnnLcZeTbX9Sng", + extendedPrivateKey: "ksrv3jVKUthL2iuVUrP1MMutdQmQU3e9TvLjTG6NfkYEAZEjswWfA3Wp7M5Yqxhh6eDwHxSCAPQoL3z9J1uEPyGFNzsoWF52e8SDJMpScP8xuRw", + }, + }, + }, + } + + for i, vector := range testVectors { + seed, err := hex.DecodeString(vector.seed) + if err != nil { + t.Fatalf("DecodeString: %+v", err) + } + + masterKey, err := NewMaster(seed, vector.version) + if err != nil { + t.Fatalf("NewMaster: %+v", err) + } + + for j, path := range vector.paths { + extendedPrivateKey, err := masterKey.DeriveFromPath(path.path) + if err != nil { + t.Fatalf("Path: %+v", err) + } + + if extendedPrivateKey.String() != path.extendedPrivateKey { + t.Fatalf("Test (%d, %d): expected extended private key %s but got %s", i, j, path.extendedPrivateKey, extendedPrivateKey.String()) + } + + decodedExtendedPrivateKey, err := DeserializeExtendedKey(extendedPrivateKey.String()) + if err != nil { + t.Fatalf("DeserializeExtendedKey: %+v", err) + } + + if extendedPrivateKey.String() != decodedExtendedPrivateKey.String() { + t.Fatalf("Test (%d, %d): deserializing and serializing the extended private key didn't preserve the data", i, j) + } + + extendedPublicKey, err := extendedPrivateKey.Public() + if err != nil { + t.Fatalf("Public: %+v", err) + } + + if extendedPublicKey.String() != path.extendedPublicKey { + t.Fatalf("Test (%d, %d): expected extended public key %s but got %s", i, j, path.extendedPublicKey, extendedPublicKey.String()) + } + + decodedExtendedPublicKey, err := DeserializeExtendedKey(extendedPublicKey.String()) + if err != nil { + t.Fatalf("DeserializeExtendedPublicKey: %+v", err) + } + + if extendedPublicKey.String() != decodedExtendedPublicKey.String() { + t.Fatalf("Test (%d, %d): deserializing and serializing the ext pub didn't preserve the data", i, j) + } + } + } +} + +// TestExtendedKey_DeriveFromPath checks that path that derive from extended private key and extended +// public key lead to the same public keys. +func TestExtendedKey_DeriveFromPath(t *testing.T) { + r := rand.New(rand.NewSource(0)) + seed, err := GenerateSeed() + if err != nil { + t.Fatalf("GenerateSeed: %+v", err) + } + + master, err := NewMaster(seed, SpectreMainnetPrivate) + if err != nil { + t.Fatalf("GenerateSeed: %+v", err) + } + + masterPublic, err := master.Public() + if err != nil { + t.Fatalf("Public: %+v", err) + } + + for i := 0; i < 10; i++ { + numIndexes := 1 + r.Intn(100) + indexes := make([]string, numIndexes) + for i := 0; i < numIndexes; i++ { + index := r.Intn(hardenedIndexStart) + indexes[i] = strconv.Itoa(int(index)) + } + + indexesStr := strings.Join(indexes, "/") + pathPrivate := "m/" + indexesStr + pathPublic := "M/" + indexesStr + + extendedPrivateKey, err := master.DeriveFromPath(pathPrivate) + if err != nil { + t.Fatalf("Path: %+v", err) + } + + extendedPublicKeyFromPrivateKey, err := extendedPrivateKey.Public() + if err != nil { + t.Fatalf("Public: %+v", err) + } + + extendedPublicKey, err := masterPublic.DeriveFromPath(pathPublic) + if err != nil { + t.Fatalf("Path: %+v", err) + } + + if extendedPublicKeyFromPrivateKey.String() != extendedPublicKey.String() { + t.Fatalf("Path gives different result from private and public master keys") + } + } +} + +// TestPublicParentPublicChildDerivation was copied and modified from https://github.com/tyler-smith/go-bip32/blob/master/bip32_test.go +func TestPublicParentPublicChildDerivation(t *testing.T) { + // Generated using https://iancoleman.github.io/bip39/ + // Root key: + // xprv9s21ZrQH143K2Cfj4mDZBcEecBmJmawReGwwoAou2zZzG45bM6cFPJSvobVTCB55L6Ld2y8RzC61CpvadeAnhws3CHsMFhNjozBKGNgucYm + // Derivation Path m/44'/60'/0'/0: + // xprv9zy5o7z1GMmYdaeQdmabWFhUf52Ytbpe3G5hduA4SghboqWe7aDGWseN8BJy1GU72wPjkCbBE1hvbXYqpCecAYdaivxjNnBoSNxwYD4wHpW + // xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u + extendedMasterPublic, err := DeserializeExtendedKey("xpub6DxSCdWu6jKqr4isjo7bsPeDD6s3J4YVQV1JSHZg12Eagdqnf7XX4fxqyW2sLhUoFWutL7tAELU2LiGZrEXtjVbvYptvTX5Eoa4Mamdjm9u") + if err != nil { + t.Fatalf("DeserializeExtendedPublicKey: %+v", err) + } + + type testChildKey struct { + pathFragment uint32 + privKey string + pubKey string + hexPubKey string + } + + expectedChildren := []testChildKey{ + {pathFragment: 0, hexPubKey: "0243187e1a2ba9ba824f5f81090650c8f4faa82b7baf93060d10b81f4b705afd46"}, + {pathFragment: 1, hexPubKey: "023790d11eb715c4320d8e31fba3a09b700051dc2cdbcce03f44b11c274d1e220b"}, + {pathFragment: 2, hexPubKey: "0302c5749c3c75cea234878ae3f4d8f65b75d584bcd7ed0943b016d6f6b59a2bad"}, + {pathFragment: 3, hexPubKey: "03f0440c94e5b14ea5b15875934597afff541bec287c6e65dc1102cafc07f69699"}, + {pathFragment: 4, hexPubKey: "026419d0d8996707605508ac44c5871edc7fe206a79ef615b74f2eea09c5852e2b"}, + {pathFragment: 5, hexPubKey: "02f63c6f195eea98bdb163c4a094260dea71d264b21234bed4df3899236e6c2298"}, + {pathFragment: 6, hexPubKey: "02d74709cd522081064858f393d009ead5a0ecd43ede3a1f57befcc942025cb5f9"}, + {pathFragment: 7, hexPubKey: "03e54bb92630c943d38bbd8a4a2e65fca7605e672d30a0e545a7198cbb60729ceb"}, + {pathFragment: 8, hexPubKey: "027e9d5acd14d39c4938697fba388cd2e8f31fc1c5dc02fafb93a10a280de85199"}, + {pathFragment: 9, hexPubKey: "02a167a9f0d57468fb6abf2f3f7967e2cadf574314753a06a9ef29bc76c54638d2"}, + + {pathFragment: 100, hexPubKey: "020db9ba00ddf68428e3f5bfe54252bbcd75b21e42f51bf3bfc4172bf0e5fa7905"}, + {pathFragment: 101, hexPubKey: "0299e3790956570737d6164e6fcda5a3daa304065ca95ba46bc73d436b84f34d46"}, + {pathFragment: 102, hexPubKey: "0202e0732c4c5d2b1036af173640e01957998cfd4f9cdaefab6ffe76eb869e2c59"}, + {pathFragment: 103, hexPubKey: "03d050adbd996c0c5d737ff638402dfbb8c08e451fef10e6d62fb57887c1ac6cb2"}, + {pathFragment: 104, hexPubKey: "038d466399e2d68b4b16043ad4d88893b3b2f84fc443368729a973df1e66f4f530"}, + {pathFragment: 105, hexPubKey: "034811e2f0c8c50440c08c2c9799b99c911c036e877e8325386ff61723ae3ffdce"}, + {pathFragment: 106, hexPubKey: "026339fd5842921888e711a6ba9104a5f0c94cc0569855273cf5faefdfbcd3cc29"}, + {pathFragment: 107, hexPubKey: "02833705c1069fab2aa92c6b0dac27807290d72e9f52378d493ac44849ca003b22"}, + {pathFragment: 108, hexPubKey: "032d2639bde1eb7bdf8444bd4f6cc26a9d1bdecd8ea15fac3b992c3da68d9d1df5"}, + {pathFragment: 109, hexPubKey: "02479c6d4a64b93a2f4343aa862c938fbc658c99219dd7bebb4830307cbd76c9e9"}, + } + + for i, child := range expectedChildren { + extendedPublicKey, err := extendedMasterPublic.Child(child.pathFragment) + if err != nil { + t.Fatalf("Child: %+v", err) + } + + publicKey, err := extendedPublicKey.PublicKey() + if err != nil { + t.Fatalf("PublicKey: %+v", err) + } + + pubKeyBytes, err := publicKey.Serialize() + if err != nil { + t.Fatalf("Serialize: %+v", err) + } + + pubKeyHex := hex.EncodeToString(pubKeyBytes[:]) + if child.hexPubKey != pubKeyHex { + t.Fatalf("Test #%d: expected public key %s but got %s", i, child.hexPubKey, pubKeyHex) + } + } +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/child_key_derivation.go b/cmd/spectrewallet/libspectrewallet/bip32/child_key_derivation.go new file mode 100644 index 0000000..abae695 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/child_key_derivation.go @@ -0,0 +1,153 @@ +package bip32 + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" +) + +const hardenedIndexStart = 0x80000000 + +// NewMaster returns a new extended private key based on the given seed and version +func NewMaster(seed []byte, version [4]byte) (*ExtendedKey, error) { + mac := newHMACWriter([]byte("Bitcoin seed")) + mac.InfallibleWrite(seed) + I := mac.Sum(nil) + + var iL, iR [32]byte + copy(iL[:], I[:32]) + copy(iR[:], I[32:]) + + privateKey, err := secp256k1.DeserializeECDSAPrivateKeyFromSlice(iL[:]) + if err != nil { + return nil, err + } + + return &ExtendedKey{ + privateKey: privateKey, + Version: version, + Depth: 0, + ParentFingerprint: [4]byte{}, + ChildNumber: 0, + ChainCode: iR, + }, nil +} + +func isHardened(i uint32) bool { + return i >= hardenedIndexStart +} + +// Child return the i'th derived child of extKey. +func (extKey *ExtendedKey) Child(i uint32) (*ExtendedKey, error) { + I, err := extKey.calcI(i) + if err != nil { + return nil, err + } + + var iL, iR [32]byte + copy(iL[:], I[:32]) + copy(iR[:], I[32:]) + + fingerPrint, err := extKey.calcFingerprint() + if err != nil { + return nil, err + } + + childExt := &ExtendedKey{ + Version: extKey.Version, + Depth: extKey.Depth + 1, + ParentFingerprint: fingerPrint, + ChildNumber: i, + ChainCode: iR, + } + + if extKey.privateKey != nil { + childExt.privateKey, err = privateKeyAdd(extKey.privateKey, iL) + if err != nil { + return nil, err + } + } else { + publicKey, err := extKey.PublicKey() + if err != nil { + return nil, err + } + + childExt.publicKey, err = pointAdd(publicKey, iL) + if err != nil { + return nil, err + } + } + + return childExt, nil +} + +func (extKey *ExtendedKey) calcFingerprint() ([4]byte, error) { + publicKey, err := extKey.PublicKey() + if err != nil { + return [4]byte{}, err + } + + serializedPoint, err := publicKey.Serialize() + if err != nil { + return [4]byte{}, err + } + + hash := hash160(serializedPoint[:]) + var fingerprint [4]byte + copy(fingerprint[:], hash[:4]) + return fingerprint, nil +} + +func privateKeyAdd(k *secp256k1.ECDSAPrivateKey, tweak [32]byte) (*secp256k1.ECDSAPrivateKey, error) { + kCopy := *k + err := kCopy.Add(tweak) + if err != nil { + return nil, err + } + + return &kCopy, nil +} + +func (extKey *ExtendedKey) calcI(i uint32) ([]byte, error) { + if isHardened(i) && !extKey.IsPrivate() { + return nil, errors.Errorf("Cannot calculate hardened child for public key") + } + + mac := newHMACWriter(extKey.ChainCode[:]) + if isHardened(i) { + mac.InfallibleWrite([]byte{0x00}) + mac.InfallibleWrite(extKey.privateKey.Serialize()[:]) + } else { + publicKey, err := extKey.PublicKey() + if err != nil { + return nil, err + } + + serializedPublicKey, err := publicKey.Serialize() + if err != nil { + return nil, errors.Wrap(err, "error serializing public key") + } + + mac.InfallibleWrite(serializedPublicKey[:]) + } + + mac.InfallibleWrite(serializeUint32(i)) + return mac.Sum(nil), nil +} + +func serializeUint32(v uint32) []byte { + serialized := make([]byte, 4) + binary.BigEndian.PutUint32(serialized, v) + return serialized +} + +func pointAdd(point *secp256k1.ECDSAPublicKey, tweak [32]byte) (*secp256k1.ECDSAPublicKey, error) { + pointCopy := *point + err := pointCopy.Add(tweak) + if err != nil { + return nil, err + } + + return &pointCopy, nil +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/extended_key.go b/cmd/spectrewallet/libspectrewallet/bip32/extended_key.go new file mode 100644 index 0000000..d35aee9 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/extended_key.go @@ -0,0 +1,104 @@ +package bip32 + +import ( + "github.com/btcsuite/btcutil/base58" + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" +) + +// ExtendedKey is a bip32 extended key +type ExtendedKey struct { + privateKey *secp256k1.ECDSAPrivateKey + publicKey *secp256k1.ECDSAPublicKey + Version [4]byte + Depth uint8 + ParentFingerprint [4]byte + ChildNumber uint32 + ChainCode [32]byte +} + +// PrivateKey returns the ECDSA private key associated with the extended key +func (extKey *ExtendedKey) PrivateKey() *secp256k1.ECDSAPrivateKey { + return extKey.privateKey +} + +// PublicKey returns the ECDSA public key associated with the extended key +func (extKey *ExtendedKey) PublicKey() (*secp256k1.ECDSAPublicKey, error) { + if extKey.publicKey != nil { + return extKey.publicKey, nil + } + + publicKey, err := extKey.privateKey.ECDSAPublicKey() + if err != nil { + return nil, err + } + + extKey.publicKey = publicKey + return publicKey, nil +} + +// IsPrivate returns whether the extended key is private +func (extKey *ExtendedKey) IsPrivate() bool { + return extKey.privateKey != nil +} + +// Public returns public version of the extended key +func (extKey *ExtendedKey) Public() (*ExtendedKey, error) { + if !extKey.IsPrivate() { + return extKey, nil + } + + publicKey, err := extKey.PublicKey() + if err != nil { + return nil, errors.Wrap(err, "error calculating publicKey") + } + + version, err := toPublicVersion(extKey.Version) + if err != nil { + return nil, err + } + + return &ExtendedKey{ + publicKey: publicKey, + Version: version, + Depth: extKey.Depth, + ParentFingerprint: extKey.ParentFingerprint, + ChildNumber: extKey.ChildNumber, + ChainCode: extKey.ChainCode, + }, nil +} + +// DeriveFromPath returns the extended key derived from the given path +func (extKey *ExtendedKey) DeriveFromPath(pathString string) (*ExtendedKey, error) { + path, err := parsePath(pathString) + if err != nil { + return nil, err + } + + return extKey.path(path) +} + +func (extKey *ExtendedKey) path(path *path) (*ExtendedKey, error) { + descendantExtKey := extKey + for _, index := range path.indexes { + var err error + descendantExtKey, err = descendantExtKey.Child(index) + if err != nil { + return nil, err + } + } + + if path.isPublic { + return descendantExtKey.Public() + } + + return descendantExtKey, nil +} + +func (extKey *ExtendedKey) String() string { + serialized, err := extKey.serialize() + if err != nil { + panic(errors.Wrap(err, "error serializing key")) + } + return base58.Encode(serialized) +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/hash.go b/cmd/spectrewallet/libspectrewallet/bip32/hash.go new file mode 100644 index 0000000..e6f0f75 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/hash.go @@ -0,0 +1,58 @@ +package bip32 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "github.com/pkg/errors" + "golang.org/x/crypto/ripemd160" + "hash" +) + +func newHMACWriter(key []byte) hmacWriter { + return hmacWriter{ + Hash: hmac.New(sha512.New, key), + } +} + +type hmacWriter struct { + hash.Hash +} + +func (hw hmacWriter) InfallibleWrite(p []byte) { + _, err := hw.Write(p) + if err != nil { + panic(errors.Wrap(err, "writing to hmac should never fail")) + } +} + +func calcChecksum(data []byte) []byte { + return doubleSha256(data)[:checkSumLen] +} + +func doubleSha256(data []byte) []byte { + inner := sha256.Sum256(data) + outer := sha256.Sum256(inner[:]) + return outer[:] +} + +// validateChecksum validates that the last checkSumLen bytes of the +// given data are its valid checksum. +func validateChecksum(data []byte) error { + checksum := data[len(data)-checkSumLen:] + expectedChecksum := calcChecksum(data[:len(data)-checkSumLen]) + if !bytes.Equal(expectedChecksum, checksum) { + return errors.Errorf("expected checksum %x but got %x", expectedChecksum, checksum) + } + + return nil +} + +func hash160(data []byte) []byte { + sha := sha256.New() + ripe := ripemd160.New() + sha.Write(data) + ripe.Write(sha.Sum(nil)) + return ripe.Sum(nil) +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/path.go b/cmd/spectrewallet/libspectrewallet/bip32/path.go new file mode 100644 index 0000000..de6ccaa --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/path.go @@ -0,0 +1,60 @@ +package bip32 + +import ( + "github.com/pkg/errors" + "strconv" + "strings" +) + +type path struct { + isPublic bool + indexes []uint32 +} + +func parsePath(pathString string) (*path, error) { + parts := strings.Split(pathString, "/") + isPublic := false + switch parts[0] { + case "m": + isPublic = false + case "M": + isPublic = true + default: + return nil, errors.Errorf("%s is an invalid extended key type", parts[0]) + } + + indexParts := parts[1:] + indexes := make([]uint32, len(indexParts)) + for i, part := range indexParts { + var err error + indexes[i], err = parseIndex(part) + if err != nil { + return nil, err + } + } + + return &path{ + isPublic: isPublic, + indexes: indexes, + }, nil +} + +func parseIndex(indexString string) (uint32, error) { + const isHardenedSuffix = "'" + isHardened := strings.HasSuffix(indexString, isHardenedSuffix) + trimmedIndexString := strings.TrimSuffix(indexString, isHardenedSuffix) + index, err := strconv.Atoi(trimmedIndexString) + if err != nil { + return 0, err + } + + if index >= hardenedIndexStart { + return 0, errors.Errorf("max index value is %d but got %d", hardenedIndexStart, index) + } + + if isHardened { + return uint32(index) + hardenedIndexStart, nil + } + + return uint32(index), nil +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/serialization.go b/cmd/spectrewallet/libspectrewallet/bip32/serialization.go new file mode 100644 index 0000000..037a1ab --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/serialization.go @@ -0,0 +1,149 @@ +package bip32 + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32/base58" +) + +const ( + versionSerializationLen = 4 + depthSerializationLen = 1 + fingerprintSerializationLen = 4 + childNumberSerializationLen = 4 + chainCodeSerializationLen = 32 + keySerializationLen = 33 + checkSumLen = 4 +) + +const extendedKeySerializationLen = versionSerializationLen + + depthSerializationLen + + fingerprintSerializationLen + + childNumberSerializationLen + + chainCodeSerializationLen + + keySerializationLen + + checkSumLen + +// DeserializeExtendedKey deserialized the given base58 string and returns an extended key +func DeserializeExtendedKey(extKeyString string) (*ExtendedKey, error) { + serializedBytes := base58.Decode(extKeyString) + return deserializeExtendedPrivateKey(serializedBytes) +} + +func deserializeExtendedPrivateKey(serialized []byte) (*ExtendedKey, error) { + if len(serialized) != extendedKeySerializationLen { + return nil, errors.Errorf("key length must be %d bytes but got %d", extendedKeySerializationLen, len(serialized)) + } + + err := validateChecksum(serialized) + if err != nil { + return nil, err + } + + extKey := &ExtendedKey{} + + copy(extKey.Version[:], serialized[:versionSerializationLen]) + extKey.Depth = serialized[versionSerializationLen] + copy(extKey.ParentFingerprint[:], serialized[versionSerializationLen+depthSerializationLen:]) + extKey.ChildNumber = binary.BigEndian.Uint32( + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen:], + ) + copy( + extKey.ChainCode[:], + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen+childNumberSerializationLen:], + ) + + isPrivate := isPrivateVersion(extKey.Version) + if isPrivate { + privateKeyPadding := serialized[versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen] + if privateKeyPadding != 0 { + return nil, errors.Errorf("expected 0 padding for private key but got %d", privateKeyPadding) + } + + extKey.privateKey, err = secp256k1.DeserializeECDSAPrivateKeyFromSlice(serialized[versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen+1 : versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen+ + keySerializationLen]) + if err != nil { + return nil, err + } + } else { + extKey.publicKey, err = secp256k1.DeserializeECDSAPubKey(serialized[versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen : versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen+ + keySerializationLen]) + if err != nil { + return nil, err + } + } + + return extKey, nil +} + +func (extKey *ExtendedKey) serialize() ([]byte, error) { + var serialized [extendedKeySerializationLen]byte + copy(serialized[:], extKey.Version[:]) + serialized[versionSerializationLen] = extKey.Depth + copy(serialized[versionSerializationLen+depthSerializationLen:], extKey.ParentFingerprint[:]) + binary.BigEndian.PutUint32( + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen:], + extKey.ChildNumber, + ) + copy( + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen+childNumberSerializationLen:], + extKey.ChainCode[:], + ) + + if extKey.IsPrivate() { + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen+childNumberSerializationLen+chainCodeSerializationLen] = 0 + copy( + serialized[versionSerializationLen+ + depthSerializationLen+ + fingerprintSerializationLen+ + childNumberSerializationLen+ + chainCodeSerializationLen+ + 1:], + extKey.privateKey.Serialize()[:], + ) + } else { + publicKey, err := extKey.PublicKey() + if err != nil { + return nil, err + } + + serializedPublicKey, err := publicKey.Serialize() + if err != nil { + return nil, err + } + + copy( + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen+childNumberSerializationLen+chainCodeSerializationLen:], + serializedPublicKey[:], + ) + } + + checkSum := doubleSha256(serialized[:len(serialized)-checkSumLen]) + copy( + serialized[versionSerializationLen+depthSerializationLen+fingerprintSerializationLen+childNumberSerializationLen+chainCodeSerializationLen+keySerializationLen:], + checkSum, + ) + return serialized[:], nil +} diff --git a/cmd/spectrewallet/libspectrewallet/bip32/version.go b/cmd/spectrewallet/libspectrewallet/bip32/version.go new file mode 100644 index 0000000..f6a1840 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip32/version.go @@ -0,0 +1,137 @@ +package bip32 + +import "github.com/pkg/errors" + +// BitcoinMainnetPrivate is the version that is used for +// bitcoin mainnet bip32 private extended keys. +// Ecnodes to xprv in base58. +var BitcoinMainnetPrivate = [4]byte{ + 0x04, + 0x88, + 0xad, + 0xe4, +} + +// BitcoinMainnetPublic is the version that is used for +// bitcoin mainnet bip32 public extended keys. +// Ecnodes to xpub in base58. +var BitcoinMainnetPublic = [4]byte{ + 0x04, + 0x88, + 0xb2, + 0x1e, +} + +// SpectreMainnetPrivate is the version that is used for +// spectre mainnet bip32 private extended keys. +// Ecnodes to xprv in base58. +var SpectreMainnetPrivate = [4]byte{ + 0x03, + 0x8f, + 0x2e, + 0xf4, +} + +// SpectreMainnetPublic is the version that is used for +// spectre mainnet bip32 public extended keys. +// Ecnodes to kpub in base58. +var SpectreMainnetPublic = [4]byte{ + 0x03, + 0x8f, + 0x33, + 0x2e, +} + +// SpectreTestnetPrivate is the version that is used for +// spectre testnet bip32 public extended keys. +// Ecnodes to ktrv in base58. +var SpectreTestnetPrivate = [4]byte{ + 0x03, + 0x90, + 0x9e, + 0x07, +} + +// SpectreTestnetPublic is the version that is used for +// spectre testnet bip32 public extended keys. +// Ecnodes to ktub in base58. +var SpectreTestnetPublic = [4]byte{ + 0x03, + 0x90, + 0xa2, + 0x41, +} + +// SpectreDevnetPrivate is the version that is used for +// spectre devnet bip32 public extended keys. +// Ecnodes to kdrv in base58. +var SpectreDevnetPrivate = [4]byte{ + 0x03, + 0x8b, + 0x3d, + 0x80, +} + +// SpectreDevnetPublic is the version that is used for +// spectre devnet bip32 public extended keys. +// Ecnodes to xdub in base58. +var SpectreDevnetPublic = [4]byte{ + 0x03, + 0x8b, + 0x41, + 0xba, +} + +// SpectreSimnetPrivate is the version that is used for +// spectre simnet bip32 public extended keys. +// Ecnodes to ksrv in base58. +var SpectreSimnetPrivate = [4]byte{ + 0x03, + 0x90, + 0x42, + 0x42, +} + +// SpectreSimnetPublic is the version that is used for +// spectre simnet bip32 public extended keys. +// Ecnodes to xsub in base58. +var SpectreSimnetPublic = [4]byte{ + 0x03, + 0x90, + 0x46, + 0x7d, +} + +func toPublicVersion(version [4]byte) ([4]byte, error) { + switch version { + case BitcoinMainnetPrivate: + return BitcoinMainnetPublic, nil + case SpectreMainnetPrivate: + return SpectreMainnetPublic, nil + case SpectreTestnetPrivate: + return SpectreTestnetPublic, nil + case SpectreDevnetPrivate: + return SpectreDevnetPublic, nil + case SpectreSimnetPrivate: + return SpectreSimnetPublic, nil + } + + return [4]byte{}, errors.Errorf("unknown version %x", version) +} + +func isPrivateVersion(version [4]byte) bool { + switch version { + case BitcoinMainnetPrivate: + return true + case SpectreMainnetPrivate: + return true + case SpectreTestnetPrivate: + return true + case SpectreDevnetPrivate: + return true + case SpectreSimnetPrivate: + return true + } + + return false +} diff --git a/cmd/spectrewallet/libspectrewallet/bip39.go b/cmd/spectrewallet/libspectrewallet/bip39.go new file mode 100644 index 0000000..6cc7ebc --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/bip39.go @@ -0,0 +1,82 @@ +package libspectrewallet + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/tyler-smith/go-bip39" +) + +// CreateMnemonic creates a new bip-39 compatible mnemonic +func CreateMnemonic() (string, error) { + const bip39BitSize = 256 + entropy, _ := bip39.NewEntropy(bip39BitSize) + return bip39.NewMnemonic(entropy) +} + +// Purpose and CoinType constants +const ( + SingleSignerPurpose = 44 + // Note: this is not entirely compatible to BIP 45 since + // BIP 45 doesn't have a coin type in its derivation path. + MultiSigPurpose = 45 + // TODO: Register the coin type in https://github.com/satoshilabs/slips/blob/master/slip-0044.md + CoinType = 123456 +) + +func defaultPath(isMultisig bool) string { + purpose := SingleSignerPurpose + if isMultisig { + purpose = MultiSigPurpose + } + + return fmt.Sprintf("m/%d'/%d'/0'", purpose, CoinType) +} + +// MasterPublicKeyFromMnemonic returns the master public key with the correct derivation for the given mnemonic. +func MasterPublicKeyFromMnemonic(params *dagconfig.Params, mnemonic string, isMultisig bool) (string, error) { + path := defaultPath(isMultisig) + extendedKey, err := extendedKeyFromMnemonicAndPath(mnemonic, path, params) + if err != nil { + return "", err + } + + extendedPublicKey, err := extendedKey.Public() + if err != nil { + return "", err + } + + return extendedPublicKey.String(), nil +} + +func extendedKeyFromMnemonicAndPath(mnemonic string, path string, params *dagconfig.Params) (*bip32.ExtendedKey, error) { + seed := bip39.NewSeed(mnemonic, "") + version, err := versionFromParams(params) + if err != nil { + return nil, err + } + + master, err := bip32.NewMasterWithPath(seed, version, path) + if err != nil { + return nil, err + } + + return master, nil +} + +func versionFromParams(params *dagconfig.Params) ([4]byte, error) { + switch params.Name { + case dagconfig.MainnetParams.Name: + return bip32.SpectreMainnetPrivate, nil + case dagconfig.TestnetParams.Name: + return bip32.SpectreTestnetPrivate, nil + case dagconfig.DevnetParams.Name: + return bip32.SpectreDevnetPrivate, nil + case dagconfig.SimnetParams.Name: + return bip32.SpectreSimnetPrivate, nil + } + + return [4]byte{}, errors.Errorf("unknown network %s", params.Name) +} diff --git a/cmd/spectrewallet/libspectrewallet/converters.go b/cmd/spectrewallet/libspectrewallet/converters.go new file mode 100644 index 0000000..4a6594c --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/converters.go @@ -0,0 +1,61 @@ +package libspectrewallet + +import ( + "encoding/hex" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +// SpectrewalletdUTXOsTolibspectrewalletUTXOs converts a []*pb.UtxosByAddressesEntry to a []*libspectrewallet.UTXO +func SpectrewalletdUTXOsTolibspectrewalletUTXOs(spectrewalletdUtxoEntires []*pb.UtxosByAddressesEntry) ([]*UTXO, error) { + UTXOs := make([]*UTXO, len(spectrewalletdUtxoEntires)) + for i, entry := range spectrewalletdUtxoEntires { + script, err := hex.DecodeString(entry.UtxoEntry.ScriptPublicKey.ScriptPublicKey) + if err != nil { + return nil, err + } + transactionID, err := transactionid.FromString(entry.Outpoint.TransactionId) + if err != nil { + return nil, err + } + UTXOs[i] = &UTXO{ + UTXOEntry: utxo.NewUTXOEntry( + entry.UtxoEntry.Amount, + &externalapi.ScriptPublicKey{ + Script: script, + Version: uint16(entry.UtxoEntry.ScriptPublicKey.Version), + }, + entry.UtxoEntry.IsCoinbase, + entry.UtxoEntry.BlockDaaScore, + ), + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *transactionID, + Index: entry.Outpoint.Index, + }, + } + } + return UTXOs, nil +} + +// AppMessageUTXOToSpectrewalletdUTXO converts an appmessage.UTXOsByAddressesEntry to a pb.UtxosByAddressesEntry +func AppMessageUTXOToSpectrewalletdUTXO(appUTXOsByAddressesEntry *appmessage.UTXOsByAddressesEntry) *pb.UtxosByAddressesEntry { + return &pb.UtxosByAddressesEntry{ + Outpoint: &pb.Outpoint{ + TransactionId: appUTXOsByAddressesEntry.Outpoint.TransactionID, + Index: appUTXOsByAddressesEntry.Outpoint.Index, + }, + UtxoEntry: &pb.UtxoEntry{ + Amount: appUTXOsByAddressesEntry.UTXOEntry.Amount, + ScriptPublicKey: &pb.ScriptPublicKey{ + Version: uint32(appUTXOsByAddressesEntry.UTXOEntry.ScriptPublicKey.Version), + ScriptPublicKey: appUTXOsByAddressesEntry.UTXOEntry.ScriptPublicKey.Script, + }, + BlockDaaScore: appUTXOsByAddressesEntry.UTXOEntry.BlockDAAScore, + IsCoinbase: appUTXOsByAddressesEntry.UTXOEntry.IsCoinbase, + }, + } +} diff --git a/cmd/spectrewallet/libspectrewallet/keychains.go b/cmd/spectrewallet/libspectrewallet/keychains.go new file mode 100644 index 0000000..874f260 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/keychains.go @@ -0,0 +1,8 @@ +package libspectrewallet + +const ( + // ExternalKeychain is the key chain that is used to create receive addresses + ExternalKeychain = 0 + // InternalKeychain is used to create change addresses + InternalKeychain = 1 +) diff --git a/cmd/spectrewallet/libspectrewallet/keypair.go b/cmd/spectrewallet/libspectrewallet/keypair.go new file mode 100644 index 0000000..bb1767e --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/keypair.go @@ -0,0 +1,169 @@ +package libspectrewallet + +import ( + "math" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +// CreateKeyPair generates a private-public key pair +func CreateKeyPair(ecdsa bool) ([]byte, []byte, error) { + if ecdsa { + return createKeyPairECDSA() + } + + return createKeyPair() +} + +func createKeyPair() ([]byte, []byte, error) { + keyPair, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to generate private key") + } + publicKey, err := keyPair.SchnorrPublicKey() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to generate public key") + } + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to serialize public key") + } + + return keyPair.SerializePrivateKey()[:], publicKeySerialized[:], nil +} + +func createKeyPairECDSA() ([]byte, []byte, error) { + keyPair, err := secp256k1.GenerateECDSAPrivateKey() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to generate private key") + } + publicKey, err := keyPair.ECDSAPublicKey() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to generate public key") + } + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + return nil, nil, errors.Wrap(err, "Failed to serialize public key") + } + + return keyPair.Serialize()[:], publicKeySerialized[:], nil +} + +// PublicKeyFromPrivateKey returns the public key associated with a private key +func PublicKeyFromPrivateKey(privateKeyBytes []byte) ([]byte, error) { + keyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + return nil, errors.Wrap(err, "Failed to deserialize private key") + } + + publicKey, err := keyPair.SchnorrPublicKey() + if err != nil { + return nil, errors.Wrap(err, "Failed to generate public key") + } + + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + return nil, errors.Wrap(err, "Failed to serialize public key") + } + + return publicKeySerialized[:], nil +} + +// Address returns the address associated with the given public keys and minimum signatures parameters. +func Address(params *dagconfig.Params, extendedPublicKeys []string, minimumSignatures uint32, path string, ecdsa bool) (util.Address, error) { + sortPublicKeys(extendedPublicKeys) + if uint32(len(extendedPublicKeys)) < minimumSignatures { + return nil, errors.Errorf("The minimum amount of signatures (%d) is greater than the amount of "+ + "provided public keys (%d)", minimumSignatures, len(extendedPublicKeys)) + } + + if len(extendedPublicKeys) == 1 { + return p2pkAddress(params, extendedPublicKeys[0], path, ecdsa) + } + + redeemScript, err := multiSigRedeemScript(extendedPublicKeys, minimumSignatures, path, ecdsa) + if err != nil { + return nil, err + } + + return util.NewAddressScriptHash(redeemScript, params.Prefix) +} + +func p2pkAddress(params *dagconfig.Params, extendedPublicKey string, path string, ecdsa bool) (util.Address, error) { + extendedKey, err := bip32.DeserializeExtendedKey(extendedPublicKey) + if err != nil { + return nil, err + } + + derivedKey, err := extendedKey.DeriveFromPath(path) + if err != nil { + return nil, err + } + + publicKey, err := derivedKey.PublicKey() + if err != nil { + return nil, err + } + + if ecdsa { + serializedECDSAPublicKey, err := publicKey.Serialize() + if err != nil { + return nil, err + } + return util.NewAddressPublicKeyECDSA(serializedECDSAPublicKey[:], params.Prefix) + } + + schnorrPublicKey, err := publicKey.ToSchnorr() + if err != nil { + return nil, err + } + + serializedSchnorrPublicKey, err := schnorrPublicKey.Serialize() + if err != nil { + return nil, err + } + + return util.NewAddressPublicKey(serializedSchnorrPublicKey[:], params.Prefix) +} + +func sortPublicKeys(extendedPublicKeys []string) { + sort.Slice(extendedPublicKeys, func(i, j int) bool { + return strings.Compare(extendedPublicKeys[i], extendedPublicKeys[j]) < 0 + }) +} + +func cosignerIndex(extendedPublicKey string, sortedExtendedPublicKeys []string) (uint32, error) { + cosignerIndex := sort.SearchStrings(sortedExtendedPublicKeys, extendedPublicKey) + if cosignerIndex == len(sortedExtendedPublicKeys) { + return 0, errors.Errorf("couldn't find extended public key %s", extendedPublicKey) + } + + return uint32(cosignerIndex), nil +} + +// MinimumCosignerIndex returns the minimum index for the cosigner from the set of all extended public keys. +func MinimumCosignerIndex(cosignerExtendedPublicKeys, allExtendedPublicKeys []string) (uint32, error) { + allExtendedPublicKeysCopy := make([]string, len(allExtendedPublicKeys)) + copy(allExtendedPublicKeysCopy, allExtendedPublicKeys) + sortPublicKeys(allExtendedPublicKeysCopy) + + min := uint32(math.MaxUint32) + for _, extendedPublicKey := range cosignerExtendedPublicKeys { + cosignerIndex, err := cosignerIndex(extendedPublicKey, allExtendedPublicKeysCopy) + if err != nil { + return 0, err + } + + if cosignerIndex < min { + min = cosignerIndex + } + } + + return min, nil +} diff --git a/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/generate.go b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/generate.go new file mode 100644 index 0000000..b54db9f --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative wallet.proto + +package protoserialization diff --git a/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.pb.go b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.pb.go new file mode 100644 index 0000000..5f5df6d --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.pb.go @@ -0,0 +1,932 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: wallet.proto + +package protoserialization + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PartiallySignedTransaction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tx *TransactionMessage `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + PartiallySignedInputs []*PartiallySignedInput `protobuf:"bytes,2,rep,name=partiallySignedInputs,proto3" json:"partiallySignedInputs,omitempty"` +} + +func (x *PartiallySignedTransaction) Reset() { + *x = PartiallySignedTransaction{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PartiallySignedTransaction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PartiallySignedTransaction) ProtoMessage() {} + +func (x *PartiallySignedTransaction) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PartiallySignedTransaction.ProtoReflect.Descriptor instead. +func (*PartiallySignedTransaction) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{0} +} + +func (x *PartiallySignedTransaction) GetTx() *TransactionMessage { + if x != nil { + return x.Tx + } + return nil +} + +func (x *PartiallySignedTransaction) GetPartiallySignedInputs() []*PartiallySignedInput { + if x != nil { + return x.PartiallySignedInputs + } + return nil +} + +type PartiallySignedInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RedeemScript []byte `protobuf:"bytes,1,opt,name=redeemScript,proto3" json:"redeemScript,omitempty"` + PrevOutput *TransactionOutput `protobuf:"bytes,2,opt,name=prevOutput,proto3" json:"prevOutput,omitempty"` + MinimumSignatures uint32 `protobuf:"varint,3,opt,name=minimumSignatures,proto3" json:"minimumSignatures,omitempty"` + PubKeySignaturePairs []*PubKeySignaturePair `protobuf:"bytes,4,rep,name=pubKeySignaturePairs,proto3" json:"pubKeySignaturePairs,omitempty"` + DerivationPath string `protobuf:"bytes,5,opt,name=derivationPath,proto3" json:"derivationPath,omitempty"` +} + +func (x *PartiallySignedInput) Reset() { + *x = PartiallySignedInput{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PartiallySignedInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PartiallySignedInput) ProtoMessage() {} + +func (x *PartiallySignedInput) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PartiallySignedInput.ProtoReflect.Descriptor instead. +func (*PartiallySignedInput) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{1} +} + +func (x *PartiallySignedInput) GetRedeemScript() []byte { + if x != nil { + return x.RedeemScript + } + return nil +} + +func (x *PartiallySignedInput) GetPrevOutput() *TransactionOutput { + if x != nil { + return x.PrevOutput + } + return nil +} + +func (x *PartiallySignedInput) GetMinimumSignatures() uint32 { + if x != nil { + return x.MinimumSignatures + } + return 0 +} + +func (x *PartiallySignedInput) GetPubKeySignaturePairs() []*PubKeySignaturePair { + if x != nil { + return x.PubKeySignaturePairs + } + return nil +} + +func (x *PartiallySignedInput) GetDerivationPath() string { + if x != nil { + return x.DerivationPath + } + return "" +} + +type PubKeySignaturePair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExtendedPubKey string `protobuf:"bytes,1,opt,name=extendedPubKey,proto3" json:"extendedPubKey,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *PubKeySignaturePair) Reset() { + *x = PubKeySignaturePair{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PubKeySignaturePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PubKeySignaturePair) ProtoMessage() {} + +func (x *PubKeySignaturePair) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PubKeySignaturePair.ProtoReflect.Descriptor instead. +func (*PubKeySignaturePair) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{2} +} + +func (x *PubKeySignaturePair) GetExtendedPubKey() string { + if x != nil { + return x.ExtendedPubKey + } + return "" +} + +func (x *PubKeySignaturePair) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +type SubnetworkId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *SubnetworkId) Reset() { + *x = SubnetworkId{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubnetworkId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubnetworkId) ProtoMessage() {} + +func (x *SubnetworkId) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubnetworkId.ProtoReflect.Descriptor instead. +func (*SubnetworkId) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{3} +} + +func (x *SubnetworkId) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type TransactionMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Inputs []*TransactionInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*TransactionOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + LockTime uint64 `protobuf:"varint,4,opt,name=lockTime,proto3" json:"lockTime,omitempty"` + SubnetworkId *SubnetworkId `protobuf:"bytes,5,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` + Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"` + Payload []byte `protobuf:"bytes,8,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *TransactionMessage) Reset() { + *x = TransactionMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionMessage) ProtoMessage() {} + +func (x *TransactionMessage) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionMessage.ProtoReflect.Descriptor instead. +func (*TransactionMessage) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{4} +} + +func (x *TransactionMessage) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *TransactionMessage) GetInputs() []*TransactionInput { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *TransactionMessage) GetOutputs() []*TransactionOutput { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *TransactionMessage) GetLockTime() uint64 { + if x != nil { + return x.LockTime + } + return 0 +} + +func (x *TransactionMessage) GetSubnetworkId() *SubnetworkId { + if x != nil { + return x.SubnetworkId + } + return nil +} + +func (x *TransactionMessage) GetGas() uint64 { + if x != nil { + return x.Gas + } + return 0 +} + +func (x *TransactionMessage) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type TransactionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousOutpoint *Outpoint `protobuf:"bytes,1,opt,name=previousOutpoint,proto3" json:"previousOutpoint,omitempty"` + SignatureScript []byte `protobuf:"bytes,2,opt,name=signatureScript,proto3" json:"signatureScript,omitempty"` + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` + SigOpCount uint32 `protobuf:"varint,4,opt,name=sigOpCount,proto3" json:"sigOpCount,omitempty"` +} + +func (x *TransactionInput) Reset() { + *x = TransactionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionInput) ProtoMessage() {} + +func (x *TransactionInput) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionInput.ProtoReflect.Descriptor instead. +func (*TransactionInput) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{5} +} + +func (x *TransactionInput) GetPreviousOutpoint() *Outpoint { + if x != nil { + return x.PreviousOutpoint + } + return nil +} + +func (x *TransactionInput) GetSignatureScript() []byte { + if x != nil { + return x.SignatureScript + } + return nil +} + +func (x *TransactionInput) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *TransactionInput) GetSigOpCount() uint32 { + if x != nil { + return x.SigOpCount + } + return 0 +} + +type Outpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId *TransactionId `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *Outpoint) Reset() { + *x = Outpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Outpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Outpoint) ProtoMessage() {} + +func (x *Outpoint) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Outpoint.ProtoReflect.Descriptor instead. +func (*Outpoint) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{6} +} + +func (x *Outpoint) GetTransactionId() *TransactionId { + if x != nil { + return x.TransactionId + } + return nil +} + +func (x *Outpoint) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +type TransactionId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *TransactionId) Reset() { + *x = TransactionId{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionId) ProtoMessage() {} + +func (x *TransactionId) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionId.ProtoReflect.Descriptor instead. +func (*TransactionId) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{7} +} + +func (x *TransactionId) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type ScriptPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Script []byte `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ScriptPublicKey) Reset() { + *x = ScriptPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScriptPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScriptPublicKey) ProtoMessage() {} + +func (x *ScriptPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScriptPublicKey.ProtoReflect.Descriptor instead. +func (*ScriptPublicKey) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{8} +} + +func (x *ScriptPublicKey) GetScript() []byte { + if x != nil { + return x.Script + } + return nil +} + +func (x *ScriptPublicKey) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +type TransactionOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + ScriptPublicKey *ScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` +} + +func (x *TransactionOutput) Reset() { + *x = TransactionOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_wallet_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionOutput) ProtoMessage() {} + +func (x *TransactionOutput) ProtoReflect() protoreflect.Message { + mi := &file_wallet_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionOutput.ProtoReflect.Descriptor instead. +func (*TransactionOutput) Descriptor() ([]byte, []int) { + return file_wallet_proto_rawDescGZIP(), []int{9} +} + +func (x *TransactionOutput) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *TransactionOutput) GetScriptPublicKey() *ScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +var File_wallet_proto protoreflect.FileDescriptor + +var file_wallet_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0xb4, 0x01, 0x0a, 0x1a, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, + 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x36, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x02, 0x74, 0x78, 0x12, 0x5e, 0x0a, 0x15, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0xb4, 0x02, 0x0a, 0x14, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x65, 0x65, 0x6d, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x64, 0x65, 0x65, 0x6d, + 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, + 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x14, 0x70, + 0x75, 0x62, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x50, 0x61, + 0x69, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, + 0x75, 0x62, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x50, 0x61, + 0x69, 0x72, 0x52, 0x14, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x64, 0x65, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x64, 0x65, 0x72, 0x69, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, + 0x22, 0x5b, 0x0a, 0x13, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x26, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x65, 0x64, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x50, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x24, 0x0a, + 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x44, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x22, 0xc2, 0x01, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x48, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, + 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x10, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x4f, 0x70, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x4f, + 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x69, 0x0a, 0x08, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x0d, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x22, 0x25, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0f, 0x53, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x0a, + 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4d, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x69, 0x5a, 0x67, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x63, + 0x6d, 0x64, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, + 0x2f, 0x6c, 0x69, 0x62, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x65, + 0x74, 0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_wallet_proto_rawDescOnce sync.Once + file_wallet_proto_rawDescData = file_wallet_proto_rawDesc +) + +func file_wallet_proto_rawDescGZIP() []byte { + file_wallet_proto_rawDescOnce.Do(func() { + file_wallet_proto_rawDescData = protoimpl.X.CompressGZIP(file_wallet_proto_rawDescData) + }) + return file_wallet_proto_rawDescData +} + +var file_wallet_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_wallet_proto_goTypes = []interface{}{ + (*PartiallySignedTransaction)(nil), // 0: protoserialization.PartiallySignedTransaction + (*PartiallySignedInput)(nil), // 1: protoserialization.PartiallySignedInput + (*PubKeySignaturePair)(nil), // 2: protoserialization.PubKeySignaturePair + (*SubnetworkId)(nil), // 3: protoserialization.SubnetworkId + (*TransactionMessage)(nil), // 4: protoserialization.TransactionMessage + (*TransactionInput)(nil), // 5: protoserialization.TransactionInput + (*Outpoint)(nil), // 6: protoserialization.Outpoint + (*TransactionId)(nil), // 7: protoserialization.TransactionId + (*ScriptPublicKey)(nil), // 8: protoserialization.ScriptPublicKey + (*TransactionOutput)(nil), // 9: protoserialization.TransactionOutput +} +var file_wallet_proto_depIdxs = []int32{ + 4, // 0: protoserialization.PartiallySignedTransaction.tx:type_name -> protoserialization.TransactionMessage + 1, // 1: protoserialization.PartiallySignedTransaction.partiallySignedInputs:type_name -> protoserialization.PartiallySignedInput + 9, // 2: protoserialization.PartiallySignedInput.prevOutput:type_name -> protoserialization.TransactionOutput + 2, // 3: protoserialization.PartiallySignedInput.pubKeySignaturePairs:type_name -> protoserialization.PubKeySignaturePair + 5, // 4: protoserialization.TransactionMessage.inputs:type_name -> protoserialization.TransactionInput + 9, // 5: protoserialization.TransactionMessage.outputs:type_name -> protoserialization.TransactionOutput + 3, // 6: protoserialization.TransactionMessage.subnetworkId:type_name -> protoserialization.SubnetworkId + 6, // 7: protoserialization.TransactionInput.previousOutpoint:type_name -> protoserialization.Outpoint + 7, // 8: protoserialization.Outpoint.transactionId:type_name -> protoserialization.TransactionId + 8, // 9: protoserialization.TransactionOutput.scriptPublicKey:type_name -> protoserialization.ScriptPublicKey + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_wallet_proto_init() } +func file_wallet_proto_init() { + if File_wallet_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_wallet_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PartiallySignedTransaction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PartiallySignedInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PubKeySignaturePair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubnetworkId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Outpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScriptPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_wallet_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_wallet_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_wallet_proto_goTypes, + DependencyIndexes: file_wallet_proto_depIdxs, + MessageInfos: file_wallet_proto_msgTypes, + }.Build() + File_wallet_proto = out.File + file_wallet_proto_rawDesc = nil + file_wallet_proto_goTypes = nil + file_wallet_proto_depIdxs = nil +} diff --git a/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.proto b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.proto new file mode 100644 index 0000000..0c2d7d4 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/serialization/protoserialization/wallet.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; +package protoserialization; + +option go_package = "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization/protoserialization"; + +message PartiallySignedTransaction{ + TransactionMessage tx = 1; + repeated PartiallySignedInput partiallySignedInputs = 2; +} + +message PartiallySignedInput{ + bytes redeemScript = 1; + TransactionOutput prevOutput = 2; + uint32 minimumSignatures = 3; + repeated PubKeySignaturePair pubKeySignaturePairs = 4; + string derivationPath = 5; +} + +message PubKeySignaturePair{ + string extendedPubKey = 1; + bytes signature = 2; +} + +message SubnetworkId{ + bytes bytes = 1; +} + +message TransactionMessage{ + uint32 version = 1; + repeated TransactionInput inputs = 2; + repeated TransactionOutput outputs = 3; + uint64 lockTime = 4; + SubnetworkId subnetworkId = 5; + uint64 gas = 6; + bytes payload = 8; +} + +message TransactionInput{ + Outpoint previousOutpoint = 1; + bytes signatureScript = 2; + uint64 sequence = 3; + uint32 sigOpCount = 4; +} + +message Outpoint{ + TransactionId transactionId = 1; + uint32 index = 2; +} + +message TransactionId{ + bytes bytes = 1; +} +message ScriptPublicKey { + bytes script = 1; + uint32 version = 2; +} + +message TransactionOutput{ + uint64 value = 1; + ScriptPublicKey scriptPublicKey = 2; +} diff --git a/cmd/spectrewallet/libspectrewallet/serialization/serialization.go b/cmd/spectrewallet/libspectrewallet/serialization/serialization.go new file mode 100644 index 0000000..dcab4fb --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/serialization/serialization.go @@ -0,0 +1,334 @@ +package serialization + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization/protoserialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "google.golang.org/protobuf/proto" +) + +// PartiallySignedTransaction is a type that is intended +// to be transferred between multiple parties so each +// party will be able to sign the transaction before +// it's fully signed. +type PartiallySignedTransaction struct { + Tx *externalapi.DomainTransaction + PartiallySignedInputs []*PartiallySignedInput +} + +// PartiallySignedInput represents an input signed +// only by some of the relevant parties. +type PartiallySignedInput struct { + PrevOutput *externalapi.DomainTransactionOutput + MinimumSignatures uint32 + PubKeySignaturePairs []*PubKeySignaturePair + DerivationPath string +} + +// PubKeySignaturePair is a pair of public key and (potentially) its associated signature +type PubKeySignaturePair struct { + ExtendedPublicKey string + Signature []byte +} + +// Clone creates a deep-clone of this PartiallySignedTransaction +func (pst *PartiallySignedTransaction) Clone() *PartiallySignedTransaction { + clone := &PartiallySignedTransaction{ + Tx: pst.Tx.Clone(), + PartiallySignedInputs: make([]*PartiallySignedInput, len(pst.PartiallySignedInputs)), + } + for i, partiallySignedInput := range pst.PartiallySignedInputs { + clone.PartiallySignedInputs[i] = partiallySignedInput.Clone() + } + return clone +} + +// Clone creates a deep-clone of this PartiallySignedInput +func (psi PartiallySignedInput) Clone() *PartiallySignedInput { + clone := &PartiallySignedInput{ + PrevOutput: psi.PrevOutput.Clone(), + MinimumSignatures: psi.MinimumSignatures, + PubKeySignaturePairs: make([]*PubKeySignaturePair, len(psi.PubKeySignaturePairs)), + DerivationPath: psi.DerivationPath, + } + for i, pubKeySignaturePair := range psi.PubKeySignaturePairs { + clone.PubKeySignaturePairs[i] = pubKeySignaturePair.Clone() + } + return clone +} + +// Clone creates a deep-clone of this PubKeySignaturePair +func (psp PubKeySignaturePair) Clone() *PubKeySignaturePair { + clone := &PubKeySignaturePair{ + ExtendedPublicKey: psp.ExtendedPublicKey, + } + if psp.Signature != nil { + clone.Signature = make([]byte, len(psp.Signature)) + copy(clone.Signature, psp.Signature) + } + return clone +} + +// DeserializePartiallySignedTransaction deserializes a byte slice into PartiallySignedTransaction. +func DeserializePartiallySignedTransaction(serializedPartiallySignedTransaction []byte) (*PartiallySignedTransaction, error) { + protoPartiallySignedTransaction := &protoserialization.PartiallySignedTransaction{} + err := proto.Unmarshal(serializedPartiallySignedTransaction, protoPartiallySignedTransaction) + if err != nil { + return nil, err + } + + return partiallySignedTransactionFromProto(protoPartiallySignedTransaction) +} + +// SerializePartiallySignedTransaction serializes a PartiallySignedTransaction. +func SerializePartiallySignedTransaction(partiallySignedTransaction *PartiallySignedTransaction) ([]byte, error) { + return proto.Marshal(partiallySignedTransactionToProto(partiallySignedTransaction)) +} + +// DeserializeDomainTransaction Deserialize a Transaction to an *externalapi.DomainTransaction +func DeserializeDomainTransaction(serializedTransactionMessage []byte) (*externalapi.DomainTransaction, error) { + protoTransactionMessage := &protoserialization.TransactionMessage{} + err := proto.Unmarshal(serializedTransactionMessage, protoTransactionMessage) + if err != nil { + return nil, err + } + + return transactionFromProto(protoTransactionMessage) +} + +// SerializeDomainTransaction Serialize a *externalapi.DomainTransaction +func SerializeDomainTransaction(tx *externalapi.DomainTransaction) ([]byte, error) { + return proto.Marshal(transactionToProto(tx)) +} + +func partiallySignedTransactionFromProto(protoPartiallySignedTransaction *protoserialization.PartiallySignedTransaction) (*PartiallySignedTransaction, error) { + tx, err := transactionFromProto(protoPartiallySignedTransaction.Tx) + if err != nil { + return nil, err + } + + inputs := make([]*PartiallySignedInput, len(protoPartiallySignedTransaction.PartiallySignedInputs)) + for i, protoInput := range protoPartiallySignedTransaction.PartiallySignedInputs { + inputs[i], err = partiallySignedInputFromProto(protoInput) + if err != nil { + return nil, err + } + } + + return &PartiallySignedTransaction{ + Tx: tx, + PartiallySignedInputs: inputs, + }, nil +} + +func partiallySignedTransactionToProto(partiallySignedTransaction *PartiallySignedTransaction) *protoserialization.PartiallySignedTransaction { + protoInputs := make([]*protoserialization.PartiallySignedInput, len(partiallySignedTransaction.PartiallySignedInputs)) + for i, input := range partiallySignedTransaction.PartiallySignedInputs { + protoInputs[i] = partiallySignedInputToProto(input) + } + + return &protoserialization.PartiallySignedTransaction{ + Tx: transactionToProto(partiallySignedTransaction.Tx), + PartiallySignedInputs: protoInputs, + } +} + +func partiallySignedInputFromProto(protoPartiallySignedInput *protoserialization.PartiallySignedInput) (*PartiallySignedInput, error) { + output, err := transactionOutputFromProto(protoPartiallySignedInput.PrevOutput) + if err != nil { + return nil, err + } + + pubKeySignaturePairs := make([]*PubKeySignaturePair, len(protoPartiallySignedInput.PubKeySignaturePairs)) + for i, protoPair := range protoPartiallySignedInput.PubKeySignaturePairs { + pubKeySignaturePairs[i] = pubKeySignaturePairFromProto(protoPair) + } + + return &PartiallySignedInput{ + PrevOutput: output, + MinimumSignatures: protoPartiallySignedInput.MinimumSignatures, + PubKeySignaturePairs: pubKeySignaturePairs, + DerivationPath: protoPartiallySignedInput.DerivationPath, + }, nil +} + +func partiallySignedInputToProto(partiallySignedInput *PartiallySignedInput) *protoserialization.PartiallySignedInput { + protoPairs := make([]*protoserialization.PubKeySignaturePair, len(partiallySignedInput.PubKeySignaturePairs)) + for i, pair := range partiallySignedInput.PubKeySignaturePairs { + protoPairs[i] = pubKeySignaturePairToProto(pair) + } + + return &protoserialization.PartiallySignedInput{ + PrevOutput: transactionOutputToProto(partiallySignedInput.PrevOutput), + MinimumSignatures: partiallySignedInput.MinimumSignatures, + PubKeySignaturePairs: protoPairs, + DerivationPath: partiallySignedInput.DerivationPath, + } +} + +func pubKeySignaturePairFromProto(protoPubKeySignaturePair *protoserialization.PubKeySignaturePair) *PubKeySignaturePair { + return &PubKeySignaturePair{ + ExtendedPublicKey: protoPubKeySignaturePair.ExtendedPubKey, + Signature: protoPubKeySignaturePair.Signature, + } +} + +func pubKeySignaturePairToProto(pubKeySignaturePair *PubKeySignaturePair) *protoserialization.PubKeySignaturePair { + return &protoserialization.PubKeySignaturePair{ + ExtendedPubKey: pubKeySignaturePair.ExtendedPublicKey, + Signature: pubKeySignaturePair.Signature, + } +} + +func transactionFromProto(protoTransaction *protoserialization.TransactionMessage) (*externalapi.DomainTransaction, error) { + if protoTransaction.Version > math.MaxUint16 { + return nil, errors.Errorf("protoTransaction.Version is %d and is too big to be a uint16", protoTransaction.Version) + } + + inputs := make([]*externalapi.DomainTransactionInput, len(protoTransaction.Inputs)) + for i, protoInput := range protoTransaction.Inputs { + var err error + inputs[i], err = transactionInputFromProto(protoInput) + if err != nil { + return nil, err + } + } + + outputs := make([]*externalapi.DomainTransactionOutput, len(protoTransaction.Outputs)) + for i, protoOutput := range protoTransaction.Outputs { + var err error + outputs[i], err = transactionOutputFromProto(protoOutput) + if err != nil { + return nil, err + } + } + + subnetworkID, err := subnetworks.FromBytes(protoTransaction.SubnetworkId.Bytes) + if err != nil { + return nil, err + } + + return &externalapi.DomainTransaction{ + Version: uint16(protoTransaction.Version), + Inputs: inputs, + Outputs: outputs, + LockTime: protoTransaction.LockTime, + SubnetworkID: *subnetworkID, + Gas: protoTransaction.Gas, + Payload: protoTransaction.Payload, + }, nil +} + +func transactionToProto(tx *externalapi.DomainTransaction) *protoserialization.TransactionMessage { + protoInputs := make([]*protoserialization.TransactionInput, len(tx.Inputs)) + for i, input := range tx.Inputs { + protoInputs[i] = transactionInputToProto(input) + } + + protoOutputs := make([]*protoserialization.TransactionOutput, len(tx.Outputs)) + for i, output := range tx.Outputs { + protoOutputs[i] = transactionOutputToProto(output) + } + + return &protoserialization.TransactionMessage{ + Version: uint32(tx.Version), + Inputs: protoInputs, + Outputs: protoOutputs, + LockTime: tx.LockTime, + SubnetworkId: &protoserialization.SubnetworkId{Bytes: tx.SubnetworkID[:]}, + Gas: tx.Gas, + Payload: tx.Payload, + } +} + +func transactionInputFromProto(protoInput *protoserialization.TransactionInput) (*externalapi.DomainTransactionInput, error) { + if protoInput.SigOpCount > math.MaxUint8 { + return nil, errors.New("TransactionInput SigOpCount > math.MaxUint8") + } + + outpoint, err := outpointFromProto(protoInput.PreviousOutpoint) + if err != nil { + return nil, err + } + + return &externalapi.DomainTransactionInput{ + PreviousOutpoint: *outpoint, + SignatureScript: protoInput.SignatureScript, + Sequence: protoInput.Sequence, + SigOpCount: byte(protoInput.SigOpCount), + }, nil +} + +func transactionInputToProto(input *externalapi.DomainTransactionInput) *protoserialization.TransactionInput { + return &protoserialization.TransactionInput{ + PreviousOutpoint: outpointToProto(&input.PreviousOutpoint), + SignatureScript: input.SignatureScript, + Sequence: input.Sequence, + SigOpCount: uint32(input.SigOpCount), + } +} + +func outpointFromProto(protoOutpoint *protoserialization.Outpoint) (*externalapi.DomainOutpoint, error) { + txID, err := transactionIDFromProto(protoOutpoint.TransactionId) + if err != nil { + return nil, err + } + return &externalapi.DomainOutpoint{ + TransactionID: *txID, + Index: protoOutpoint.Index, + }, nil +} + +func outpointToProto(outpoint *externalapi.DomainOutpoint) *protoserialization.Outpoint { + return &protoserialization.Outpoint{ + TransactionId: &protoserialization.TransactionId{Bytes: outpoint.TransactionID.ByteSlice()}, + Index: outpoint.Index, + } +} + +func transactionIDFromProto(protoTxID *protoserialization.TransactionId) (*externalapi.DomainTransactionID, error) { + if protoTxID == nil { + return nil, errors.Errorf("protoTxID is nil") + } + + return externalapi.NewDomainTransactionIDFromByteSlice(protoTxID.Bytes) +} + +func transactionOutputFromProto(protoOutput *protoserialization.TransactionOutput) (*externalapi.DomainTransactionOutput, error) { + scriptPublicKey, err := scriptPublicKeyFromProto(protoOutput.ScriptPublicKey) + if err != nil { + return nil, err + } + + return &externalapi.DomainTransactionOutput{ + Value: protoOutput.Value, + ScriptPublicKey: scriptPublicKey, + }, nil +} + +func transactionOutputToProto(output *externalapi.DomainTransactionOutput) *protoserialization.TransactionOutput { + return &protoserialization.TransactionOutput{ + Value: output.Value, + ScriptPublicKey: scriptPublicKeyToProto(output.ScriptPublicKey), + } +} + +func scriptPublicKeyFromProto(protoScriptPublicKey *protoserialization.ScriptPublicKey) (*externalapi.ScriptPublicKey, error) { + if protoScriptPublicKey.Version > math.MaxUint16 { + return nil, errors.Errorf("protoOutput.ScriptPublicKey.Version is %d and is too big to be a uint16", protoScriptPublicKey.Version) + } + return &externalapi.ScriptPublicKey{ + Script: protoScriptPublicKey.Script, + Version: uint16(protoScriptPublicKey.Version), + }, nil +} + +func scriptPublicKeyToProto(scriptPublicKey *externalapi.ScriptPublicKey) *protoserialization.ScriptPublicKey { + return &protoserialization.ScriptPublicKey{ + Script: scriptPublicKey.Script, + Version: uint32(scriptPublicKey.Version), + } +} diff --git a/cmd/spectrewallet/libspectrewallet/sign.go b/cmd/spectrewallet/libspectrewallet/sign.go new file mode 100644 index 0000000..39dac8b --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/sign.go @@ -0,0 +1,99 @@ +package libspectrewallet + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func rawTxInSignature(extendedKey *bip32.ExtendedKey, tx *externalapi.DomainTransaction, idx int, hashType consensushashing.SigHashType, + sighashReusedValues *consensushashing.SighashReusedValues, ecdsa bool) ([]byte, error) { + + privateKey := extendedKey.PrivateKey() + if ecdsa { + return txscript.RawTxInSignatureECDSA(tx, idx, hashType, privateKey, sighashReusedValues) + } + + schnorrKeyPair, err := privateKey.ToSchnorr() + if err != nil { + return nil, err + } + + return txscript.RawTxInSignature(tx, idx, hashType, schnorrKeyPair, sighashReusedValues) +} + +// Sign signs the transaction with the given private keys +func Sign(params *dagconfig.Params, mnemonics []string, serializedPSTx []byte, ecdsa bool) ([]byte, error) { + partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(serializedPSTx) + if err != nil { + return nil, err + } + + for _, mnemonic := range mnemonics { + err = sign(params, mnemonic, partiallySignedTransaction, ecdsa) + if err != nil { + return nil, err + } + } + return serialization.SerializePartiallySignedTransaction(partiallySignedTransaction) +} + +func sign(params *dagconfig.Params, mnemonic string, partiallySignedTransaction *serialization.PartiallySignedTransaction, ecdsa bool) error { + if isTransactionFullySigned(partiallySignedTransaction) { + return nil + } + + sighashReusedValues := &consensushashing.SighashReusedValues{} + for i, partiallySignedInput := range partiallySignedTransaction.PartiallySignedInputs { + prevOut := partiallySignedInput.PrevOutput + partiallySignedTransaction.Tx.Inputs[i].UTXOEntry = utxo.NewUTXOEntry( + prevOut.Value, + prevOut.ScriptPublicKey, + false, // This is a fake value, because it's irrelevant for the signature + 0, // This is a fake value, because it's irrelevant for the signature + ) + partiallySignedTransaction.Tx.Inputs[i].SigOpCount = byte(len(partiallySignedInput.PubKeySignaturePairs)) + } + + signed := false + for i, partiallySignedInput := range partiallySignedTransaction.PartiallySignedInputs { + isMultisig := len(partiallySignedInput.PubKeySignaturePairs) > 1 + path := defaultPath(isMultisig) + extendedKey, err := extendedKeyFromMnemonicAndPath(mnemonic, path, params) + if err != nil { + return err + } + + derivedKey, err := extendedKey.DeriveFromPath(partiallySignedInput.DerivationPath) + if err != nil { + return err + } + + derivedPublicKey, err := derivedKey.Public() + if err != nil { + return err + } + + for _, pair := range partiallySignedInput.PubKeySignaturePairs { + if pair.ExtendedPublicKey == derivedPublicKey.String() { + pair.Signature, err = rawTxInSignature(derivedKey, partiallySignedTransaction.Tx, i, consensushashing.SigHashAll, sighashReusedValues, ecdsa) + if err != nil { + return err + } + + signed = true + } + } + } + + if !signed { + return errors.Errorf("Public key doesn't match any of the transaction public keys") + } + + return nil +} diff --git a/cmd/spectrewallet/libspectrewallet/transaction.go b/cmd/spectrewallet/libspectrewallet/transaction.go new file mode 100644 index 0000000..75ec9de --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/transaction.go @@ -0,0 +1,261 @@ +package libspectrewallet + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/bip32" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util" +) + +// Payment contains a recipient payment details +type Payment struct { + Address util.Address + Amount uint64 +} + +// UTXO is a type that stores a UTXO and meta data +// that is needed in order to sign it and create +// transactions with it. +type UTXO struct { + Outpoint *externalapi.DomainOutpoint + UTXOEntry externalapi.UTXOEntry + DerivationPath string +} + +// CreateUnsignedTransaction creates an unsigned transaction +func CreateUnsignedTransaction( + extendedPublicKeys []string, + minimumSignatures uint32, + payments []*Payment, + selectedUTXOs []*UTXO) ([]byte, error) { + + sortPublicKeys(extendedPublicKeys) + unsignedTransaction, err := createUnsignedTransaction(extendedPublicKeys, minimumSignatures, payments, selectedUTXOs) + if err != nil { + return nil, err + } + + return serialization.SerializePartiallySignedTransaction(unsignedTransaction) +} + +func multiSigRedeemScript(extendedPublicKeys []string, minimumSignatures uint32, path string, ecdsa bool) ([]byte, error) { + scriptBuilder := txscript.NewScriptBuilder() + scriptBuilder.AddInt64(int64(minimumSignatures)) + for _, key := range extendedPublicKeys { + extendedKey, err := bip32.DeserializeExtendedKey(key) + if err != nil { + return nil, err + } + + derivedKey, err := extendedKey.DeriveFromPath(path) + if err != nil { + return nil, err + } + + publicKey, err := derivedKey.PublicKey() + if err != nil { + return nil, err + } + + var serializedPublicKey []byte + if ecdsa { + serializedECDSAPublicKey, err := publicKey.Serialize() + if err != nil { + return nil, err + } + serializedPublicKey = serializedECDSAPublicKey[:] + } else { + schnorrPublicKey, err := publicKey.ToSchnorr() + if err != nil { + return nil, err + } + + serializedSchnorrPublicKey, err := schnorrPublicKey.Serialize() + if err != nil { + return nil, err + } + serializedPublicKey = serializedSchnorrPublicKey[:] + } + + scriptBuilder.AddData(serializedPublicKey) + } + scriptBuilder.AddInt64(int64(len(extendedPublicKeys))) + + if ecdsa { + scriptBuilder.AddOp(txscript.OpCheckMultiSigECDSA) + } else { + scriptBuilder.AddOp(txscript.OpCheckMultiSig) + } + + return scriptBuilder.Script() +} + +func createUnsignedTransaction( + extendedPublicKeys []string, + minimumSignatures uint32, + payments []*Payment, + selectedUTXOs []*UTXO) (*serialization.PartiallySignedTransaction, error) { + + inputs := make([]*externalapi.DomainTransactionInput, len(selectedUTXOs)) + partiallySignedInputs := make([]*serialization.PartiallySignedInput, len(selectedUTXOs)) + for i, utxo := range selectedUTXOs { + emptyPubKeySignaturePairs := make([]*serialization.PubKeySignaturePair, len(extendedPublicKeys)) + for i, extendedPublicKey := range extendedPublicKeys { + extendedKey, err := bip32.DeserializeExtendedKey(extendedPublicKey) + if err != nil { + return nil, err + } + + derivedKey, err := extendedKey.DeriveFromPath(utxo.DerivationPath) + if err != nil { + return nil, err + } + + emptyPubKeySignaturePairs[i] = &serialization.PubKeySignaturePair{ + ExtendedPublicKey: derivedKey.String(), + } + } + + inputs[i] = &externalapi.DomainTransactionInput{PreviousOutpoint: *utxo.Outpoint} + partiallySignedInputs[i] = &serialization.PartiallySignedInput{ + PrevOutput: &externalapi.DomainTransactionOutput{ + Value: utxo.UTXOEntry.Amount(), + ScriptPublicKey: utxo.UTXOEntry.ScriptPublicKey(), + }, + MinimumSignatures: minimumSignatures, + PubKeySignaturePairs: emptyPubKeySignaturePairs, + DerivationPath: utxo.DerivationPath, + } + } + + outputs := make([]*externalapi.DomainTransactionOutput, len(payments)) + for i, payment := range payments { + scriptPublicKey, err := txscript.PayToAddrScript(payment.Address) + if err != nil { + return nil, err + } + + outputs[i] = &externalapi.DomainTransactionOutput{ + Value: payment.Amount, + ScriptPublicKey: scriptPublicKey, + } + } + + domainTransaction := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: inputs, + Outputs: outputs, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: nil, + } + + return &serialization.PartiallySignedTransaction{ + Tx: domainTransaction, + PartiallySignedInputs: partiallySignedInputs, + }, nil + +} + +// IsTransactionFullySigned returns whether the transaction is fully signed and ready to broadcast. +func IsTransactionFullySigned(partiallySignedTransactionBytes []byte) (bool, error) { + partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(partiallySignedTransactionBytes) + if err != nil { + return false, err + } + + return isTransactionFullySigned(partiallySignedTransaction), nil +} + +func isTransactionFullySigned(partiallySignedTransaction *serialization.PartiallySignedTransaction) bool { + for _, input := range partiallySignedTransaction.PartiallySignedInputs { + numSignatures := 0 + for _, pair := range input.PubKeySignaturePairs { + if pair.Signature != nil { + numSignatures++ + } + } + if uint32(numSignatures) < input.MinimumSignatures { + return false + } + } + return true +} + +// ExtractTransaction extracts a domain transaction from partially signed transaction after all of the +// relevant parties have signed it. +func ExtractTransaction(partiallySignedTransactionBytes []byte, ecdsa bool) (*externalapi.DomainTransaction, error) { + partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(partiallySignedTransactionBytes) + if err != nil { + return nil, err + } + + return ExtractTransactionDeserialized(partiallySignedTransaction, ecdsa) +} + +// ExtractTransactionDeserialized does the same thing ExtractTransaction does, only receives the PartiallySignedTransaction +// in an already deserialized format +func ExtractTransactionDeserialized(partiallySignedTransaction *serialization.PartiallySignedTransaction, ecdsa bool) ( + *externalapi.DomainTransaction, error) { + + for i, input := range partiallySignedTransaction.PartiallySignedInputs { + isMultisig := len(input.PubKeySignaturePairs) > 1 + scriptBuilder := txscript.NewScriptBuilder() + if isMultisig { + signatureCount := 0 + for _, pair := range input.PubKeySignaturePairs { + if pair.Signature != nil { + scriptBuilder.AddData(pair.Signature) + signatureCount++ + } + } + if uint32(signatureCount) < input.MinimumSignatures { + return nil, errors.Errorf("missing %d signatures", input.MinimumSignatures-uint32(signatureCount)) + } + + redeemScript, err := partiallySignedInputMultisigRedeemScript(input, ecdsa) + if err != nil { + return nil, err + } + + scriptBuilder.AddData(redeemScript) + sigScript, err := scriptBuilder.Script() + if err != nil { + return nil, err + } + + partiallySignedTransaction.Tx.Inputs[i].SignatureScript = sigScript + } else { + if len(input.PubKeySignaturePairs) > 1 { + return nil, errors.Errorf("Cannot sign on P2PK when len(input.PubKeySignaturePairs) > 1") + } + + if input.PubKeySignaturePairs[0].Signature == nil { + return nil, errors.Errorf("missing signature") + } + + sigScript, err := txscript.NewScriptBuilder(). + AddData(input.PubKeySignaturePairs[0].Signature). + Script() + if err != nil { + return nil, err + } + partiallySignedTransaction.Tx.Inputs[i].SignatureScript = sigScript + } + } + return partiallySignedTransaction.Tx, nil +} + +func partiallySignedInputMultisigRedeemScript(input *serialization.PartiallySignedInput, ecdsa bool) ([]byte, error) { + extendedPublicKeys := make([]string, len(input.PubKeySignaturePairs)) + for i, pair := range input.PubKeySignaturePairs { + extendedPublicKeys[i] = pair.ExtendedPublicKey + } + + return multiSigRedeemScript(extendedPublicKeys, input.MinimumSignatures, "m", ecdsa) +} diff --git a/cmd/spectrewallet/libspectrewallet/transaction_test.go b/cmd/spectrewallet/libspectrewallet/transaction_test.go new file mode 100644 index 0000000..361f1f0 --- /dev/null +++ b/cmd/spectrewallet/libspectrewallet/transaction_test.go @@ -0,0 +1,519 @@ +package libspectrewallet_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/util" +) + +func forSchnorrAndECDSA(t *testing.T, testFunc func(t *testing.T, ecdsa bool)) { + t.Run("schnorr", func(t *testing.T) { + testFunc(t, false) + }) + + t.Run("ecdsa", func(t *testing.T) { + testFunc(t, true) + }) +} + +func TestMultisig(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + params := &consensusConfig.Params + forSchnorrAndECDSA(t, func(t *testing.T, ecdsa bool) { + consensusConfig.BlockCoinbaseMaturity = 0 + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestMultisig") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardown(false) + + const numKeys = 3 + mnemonics := make([]string, numKeys) + publicKeys := make([]string, numKeys) + for i := 0; i < numKeys; i++ { + var err error + mnemonics[i], err = libspectrewallet.CreateMnemonic() + if err != nil { + t.Fatalf("CreateMnemonic: %+v", err) + } + + publicKeys[i], err = libspectrewallet.MasterPublicKeyFromMnemonic(&consensusConfig.Params, mnemonics[i], true) + if err != nil { + t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err) + } + } + + const minimumSignatures = 2 + path := "m/1/2/3" + address, err := libspectrewallet.Address(params, publicKeys, minimumSignatures, path, ecdsa) + if err != nil { + t.Fatalf("Address: %+v", err) + } + + if _, ok := address.(*util.AddressScriptHash); !ok { + t.Fatalf("The address is of unexpected type") + } + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + t.Fatalf("PayToAddrScript: %+v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + ExtraData: nil, + } + + fundingBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1, _, err := tc.GetBlock(block1Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + block1Tx := block1.Transactions[0] + block1TxOut := block1Tx.Outputs[0] + selectedUTXOs := []*libspectrewallet.UTXO{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block1.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(block1TxOut.Value, block1TxOut.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + } + + unsignedTransaction, err := libspectrewallet.CreateUnsignedTransaction(publicKeys, minimumSignatures, + []*libspectrewallet.Payment{{ + Address: address, + Amount: 10, + }}, selectedUTXOs) + if err != nil { + t.Fatalf("CreateUnsignedTransactions: %+v", err) + } + + isFullySigned, err := libspectrewallet.IsTransactionFullySigned(unsignedTransaction) + if err != nil { + t.Fatalf("IsTransactionFullySigned: %+v", err) + } + + if isFullySigned { + t.Fatalf("Transaction is not expected to be signed") + } + + _, err = libspectrewallet.ExtractTransaction(unsignedTransaction, ecdsa) + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("missing %d signatures", minimumSignatures)) { + t.Fatal("Unexpectedly succeed to extract a valid transaction out of unsigned transaction") + } + + signedTxStep1, err := libspectrewallet.Sign(params, mnemonics[:1], unsignedTransaction, ecdsa) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + isFullySigned, err = libspectrewallet.IsTransactionFullySigned(signedTxStep1) + if err != nil { + t.Fatalf("IsTransactionFullySigned: %+v", err) + } + + if isFullySigned { + t.Fatalf("Transaction is not expected to be fully signed") + } + + signedTxStep2, err := libspectrewallet.Sign(params, mnemonics[1:2], signedTxStep1, ecdsa) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + extractedSignedTxStep2, err := libspectrewallet.ExtractTransaction(signedTxStep2, ecdsa) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + signedTxOneStep, err := libspectrewallet.Sign(params, mnemonics[:2], unsignedTransaction, ecdsa) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + extractedSignedTxOneStep, err := libspectrewallet.ExtractTransaction(signedTxOneStep, ecdsa) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + // We check IDs instead of comparing the actual transactions because the actual transactions have different + // signature scripts due to non deterministic signature scheme. + if !consensushashing.TransactionID(extractedSignedTxStep2).Equal(consensushashing.TransactionID(extractedSignedTxOneStep)) { + t.Fatalf("Expected extractedSignedTxOneStep and extractedSignedTxStep2 IDs to be equal") + } + + _, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, []*externalapi.DomainTransaction{extractedSignedTxStep2}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + addedUTXO := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(extractedSignedTxStep2), + Index: 0, + } + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO) { + t.Fatalf("Transaction wasn't accepted in the DAG") + } + }) + }) +} + +func TestP2PK(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + params := &consensusConfig.Params + forSchnorrAndECDSA(t, func(t *testing.T, ecdsa bool) { + consensusConfig.BlockCoinbaseMaturity = 0 + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestMultisig") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardown(false) + + const numKeys = 1 + mnemonics := make([]string, numKeys) + publicKeys := make([]string, numKeys) + for i := 0; i < numKeys; i++ { + var err error + mnemonics[i], err = libspectrewallet.CreateMnemonic() + if err != nil { + t.Fatalf("CreateMnemonic: %+v", err) + } + + publicKeys[i], err = libspectrewallet.MasterPublicKeyFromMnemonic(&consensusConfig.Params, mnemonics[i], false) + if err != nil { + t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err) + } + } + + const minimumSignatures = 1 + path := "m/1/2/3" + address, err := libspectrewallet.Address(params, publicKeys, minimumSignatures, path, ecdsa) + if err != nil { + t.Fatalf("Address: %+v", err) + } + + if ecdsa { + if _, ok := address.(*util.AddressPublicKeyECDSA); !ok { + t.Fatalf("The address is of unexpected type") + } + } else { + if _, ok := address.(*util.AddressPublicKey); !ok { + t.Fatalf("The address is of unexpected type") + } + } + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + t.Fatalf("PayToAddrScript: %+v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + ExtraData: nil, + } + + fundingBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1, _, err := tc.GetBlock(block1Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + block1Tx := block1.Transactions[0] + block1TxOut := block1Tx.Outputs[0] + selectedUTXOs := []*libspectrewallet.UTXO{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block1.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(block1TxOut.Value, block1TxOut.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + } + + unsignedTransaction, err := libspectrewallet.CreateUnsignedTransaction(publicKeys, minimumSignatures, + []*libspectrewallet.Payment{{ + Address: address, + Amount: 10, + }}, selectedUTXOs) + if err != nil { + t.Fatalf("CreateUnsignedTransactions: %+v", err) + } + + isFullySigned, err := libspectrewallet.IsTransactionFullySigned(unsignedTransaction) + if err != nil { + t.Fatalf("IsTransactionFullySigned: %+v", err) + } + + if isFullySigned { + t.Fatalf("Transaction is not expected to be signed") + } + + _, err = libspectrewallet.ExtractTransaction(unsignedTransaction, ecdsa) + if err == nil || !strings.Contains(err.Error(), "missing signature") { + t.Fatal("Unexpectedly succeed to extract a valid transaction out of unsigned transaction") + } + + signedTx, err := libspectrewallet.Sign(params, mnemonics, unsignedTransaction, ecdsa) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + tx, err := libspectrewallet.ExtractTransaction(signedTx, ecdsa) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + _, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, []*externalapi.DomainTransaction{tx}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + addedUTXO := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(tx), + Index: 0, + } + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO) { + t.Fatalf("Transaction wasn't accepted in the DAG") + } + }) + }) +} + +func TestMaxSompi(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + params := &consensusConfig.Params + cfg := *consensusConfig + cfg.BlockCoinbaseMaturity = 0 + cfg.PreDeflationaryPhaseBaseSubsidy = 20e6 * constants.SompiPerSpectre + tc, teardown, err := consensus.NewFactory().NewTestConsensus(&cfg, "TestMaxSompi") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardown(false) + + const numKeys = 1 + mnemonics := make([]string, numKeys) + publicKeys := make([]string, numKeys) + for i := 0; i < numKeys; i++ { + var err error + mnemonics[i], err = libspectrewallet.CreateMnemonic() + if err != nil { + t.Fatalf("CreateMnemonic: %+v", err) + } + + publicKeys[i], err = libspectrewallet.MasterPublicKeyFromMnemonic(&cfg.Params, mnemonics[i], false) + if err != nil { + t.Fatalf("MasterPublicKeyFromMnemonic: %+v", err) + } + } + + const minimumSignatures = 1 + path := "m/1/2/3" + address, err := libspectrewallet.Address(params, publicKeys, minimumSignatures, path, false) + if err != nil { + t.Fatalf("Address: %+v", err) + } + + scriptPublicKey, err := txscript.PayToAddrScript(address) + if err != nil { + t.Fatalf("PayToAddrScript: %+v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + ExtraData: nil, + } + + fundingBlock1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{cfg.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + fundingBlock2Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock1Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + fundingBlock3Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock2Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + fundingBlock4Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock3Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + fundingBlock2, _, err := tc.GetBlock(fundingBlock2Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + fundingBlock3, _, err := tc.GetBlock(fundingBlock3Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + fundingBlock4, _, err := tc.GetBlock(fundingBlock4Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock4Hash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1, _, err := tc.GetBlock(block1Hash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + txOut1 := fundingBlock2.Transactions[0].Outputs[0] + txOut2 := fundingBlock3.Transactions[0].Outputs[0] + txOut3 := fundingBlock4.Transactions[0].Outputs[0] + txOut4 := block1.Transactions[0].Outputs[0] + selectedUTXOsForTxWithLargeInputAmount := []*libspectrewallet.UTXO{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(fundingBlock2.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(txOut1.Value, txOut1.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(fundingBlock3.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(txOut2.Value, txOut2.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + } + + unsignedTxWithLargeInputAmount, err := libspectrewallet.CreateUnsignedTransaction(publicKeys, minimumSignatures, + []*libspectrewallet.Payment{{ + Address: address, + Amount: 10, + }}, selectedUTXOsForTxWithLargeInputAmount) + if err != nil { + t.Fatalf("CreateUnsignedTransactions: %+v", err) + } + + signedTxWithLargeInputAmount, err := libspectrewallet.Sign(params, mnemonics, unsignedTxWithLargeInputAmount, false) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + txWithLargeInputAmount, err := libspectrewallet.ExtractTransaction(signedTxWithLargeInputAmount, false) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + _, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, []*externalapi.DomainTransaction{txWithLargeInputAmount}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + addedUTXO1 := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(txWithLargeInputAmount), + Index: 0, + } + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO1) { + t.Fatalf("Transaction wasn't accepted in the DAG") + } + + selectedUTXOsForTxWithLargeInputAndOutputAmount := []*libspectrewallet.UTXO{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(fundingBlock4.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(txOut3.Value, txOut3.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block1.Transactions[0]), + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry(txOut4.Value, txOut4.ScriptPublicKey, true, 0), + DerivationPath: path, + }, + } + + unsignedTxWithLargeInputAndOutputAmount, err := libspectrewallet.CreateUnsignedTransaction(publicKeys, minimumSignatures, + []*libspectrewallet.Payment{{ + Address: address, + Amount: 22e6 * constants.SompiPerSpectre, + }}, selectedUTXOsForTxWithLargeInputAndOutputAmount) + if err != nil { + t.Fatalf("CreateUnsignedTransactions: %+v", err) + } + + signedTxWithLargeInputAndOutputAmount, err := libspectrewallet.Sign(params, mnemonics, unsignedTxWithLargeInputAndOutputAmount, false) + if err != nil { + t.Fatalf("Sign: %+v", err) + } + + txWithLargeInputAndOutputAmount, err := libspectrewallet.ExtractTransaction(signedTxWithLargeInputAndOutputAmount, false) + if err != nil { + t.Fatalf("ExtractTransaction: %+v", err) + } + + // We're creating a new longer chain so we can double spend txWithLargeInputAmount + newChainRoot, _, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + _, virtualChangeSet, err = tc.AddBlock([]*externalapi.DomainHash{newChainRoot}, nil, []*externalapi.DomainTransaction{txWithLargeInputAndOutputAmount}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + addedUTXO2 := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(txWithLargeInputAndOutputAmount), + Index: 0, + } + + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(addedUTXO2) { + t.Fatalf("txWithLargeInputAndOutputAmount weren't accepted in the DAG") + } + }) +} diff --git a/cmd/spectrewallet/main.go b/cmd/spectrewallet/main.go new file mode 100644 index 0000000..4899592 --- /dev/null +++ b/cmd/spectrewallet/main.go @@ -0,0 +1,41 @@ +package main + +import "github.com/pkg/errors" + +func main() { + subCmd, config := parseCommandLine() + + var err error + switch subCmd { + case createSubCmd: + err = create(config.(*createConfig)) + case balanceSubCmd: + err = balance(config.(*balanceConfig)) + case sendSubCmd: + err = send(config.(*sendConfig)) + case createUnsignedTransactionSubCmd: + err = createUnsignedTransaction(config.(*createUnsignedTransactionConfig)) + case signSubCmd: + err = sign(config.(*signConfig)) + case broadcastSubCmd: + err = broadcast(config.(*broadcastConfig)) + case parseSubCmd: + err = parse(config.(*parseConfig)) + case showAddressesSubCmd: + err = showAddresses(config.(*showAddressesConfig)) + case newAddressSubCmd: + err = newAddress(config.(*newAddressConfig)) + case dumpUnencryptedDataSubCmd: + err = dumpUnencryptedData(config.(*dumpUnencryptedDataConfig)) + case startDaemonSubCmd: + err = startDaemon(config.(*startDaemonConfig)) + case sweepSubCmd: + err = sweep(config.(*sweepConfig)) + default: + err = errors.Errorf("Unknown sub-command '%s'\n", subCmd) + } + + if err != nil { + printErrorAndExit(err) + } +} diff --git a/cmd/spectrewallet/new_address.go b/cmd/spectrewallet/new_address.go new file mode 100644 index 0000000..0eefb13 --- /dev/null +++ b/cmd/spectrewallet/new_address.go @@ -0,0 +1,28 @@ +package main + +import ( + "context" + "fmt" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func newAddress(conf *newAddressConfig) error { + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + response, err := daemonClient.NewAddress(ctx, &pb.NewAddressRequest{}) + if err != nil { + return err + } + + fmt.Printf("New address:\n%s\n", response.Address) + return nil +} diff --git a/cmd/spectrewallet/parse.go b/cmd/spectrewallet/parse.go new file mode 100644 index 0000000..e8a8864 --- /dev/null +++ b/cmd/spectrewallet/parse.go @@ -0,0 +1,86 @@ +package main + +import ( + "encoding/hex" + "fmt" + "io/ioutil" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +func parse(conf *parseConfig) error { + if conf.Transaction == "" && conf.TransactionFile == "" { + return errors.Errorf("Either --transaction or --transaction-file is required") + } + if conf.Transaction != "" && conf.TransactionFile != "" { + return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time") + } + + transactionHex := conf.Transaction + if conf.TransactionFile != "" { + transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile) + if err != nil { + return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile) + } + transactionHex = strings.TrimSpace(string(transactionHexBytes)) + } + + transactions, err := decodeTransactionsFromHex(transactionHex) + if err != nil { + return err + } + for i, transaction := range transactions { + + partiallySignedTransaction, err := serialization.DeserializePartiallySignedTransaction(transaction) + if err != nil { + return err + } + + fmt.Printf("Transaction #%d ID: \t%s\n", i+1, consensushashing.TransactionID(partiallySignedTransaction.Tx)) + fmt.Println() + + allInputSompi := uint64(0) + for index, input := range partiallySignedTransaction.Tx.Inputs { + partiallySignedInput := partiallySignedTransaction.PartiallySignedInputs[index] + + if conf.Verbose { + fmt.Printf("Input %d: \tOutpoint: %s:%d \tAmount: %.2f Spectre\n", index, input.PreviousOutpoint.TransactionID, + input.PreviousOutpoint.Index, float64(partiallySignedInput.PrevOutput.Value)/float64(constants.SompiPerSpectre)) + } + + allInputSompi += partiallySignedInput.PrevOutput.Value + } + if conf.Verbose { + fmt.Println() + } + + allOutputSompi := uint64(0) + for index, output := range partiallySignedTransaction.Tx.Outputs { + scriptPublicKeyType, scriptPublicKeyAddress, err := txscript.ExtractScriptPubKeyAddress(output.ScriptPublicKey, conf.ActiveNetParams) + if err != nil { + return err + } + + addressString := scriptPublicKeyAddress.EncodeAddress() + if scriptPublicKeyType == txscript.NonStandardTy { + scriptPublicKeyHex := hex.EncodeToString(output.ScriptPublicKey.Script) + addressString = fmt.Sprintf("", scriptPublicKeyHex) + } + + fmt.Printf("Output %d: \tRecipient: %s \tAmount: %.2f Spectre\n", + index, addressString, float64(output.Value)/float64(constants.SompiPerSpectre)) + + allOutputSompi += output.Value + } + fmt.Println() + + fmt.Printf("Fee:\t%d Sompi\n\n", allInputSompi-allOutputSompi) + } + + return nil +} diff --git a/cmd/spectrewallet/send.go b/cmd/spectrewallet/send.go new file mode 100644 index 0000000..ea3b3cf --- /dev/null +++ b/cmd/spectrewallet/send.go @@ -0,0 +1,105 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" +) + +func send(conf *sendConfig) error { + keysFile, err := keys.ReadKeysFile(conf.NetParams(), conf.KeysFile) + if err != nil { + return err + } + + if len(keysFile.ExtendedPublicKeys) > len(keysFile.EncryptedMnemonics) { + return errors.Errorf("Cannot use 'send' command for multisig wallet without all of the keys") + } + + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + var sendAmountSompi uint64 + if !conf.IsSendAll { + sendAmountSompi, err = utils.SprToSompi(conf.SendAmount) + + if err != nil { + return err + } + } + + createUnsignedTransactionsResponse, err := + daemonClient.CreateUnsignedTransactions(ctx, &pb.CreateUnsignedTransactionsRequest{ + From: conf.FromAddresses, + Address: conf.ToAddress, + Amount: sendAmountSompi, + IsSendAll: conf.IsSendAll, + UseExistingChangeAddress: conf.UseExistingChangeAddress, + }) + if err != nil { + return err + } + + if len(conf.Password) == 0 { + conf.Password = keys.GetPassword("Password:") + } + mnemonics, err := keysFile.DecryptMnemonics(conf.Password) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + fmt.Fprintf(os.Stderr, "Password decryption failed. Sometimes this is a result of not "+ + "specifying the same keys file used by the wallet daemon process.\n") + } + return err + } + + signedTransactions := make([][]byte, len(createUnsignedTransactionsResponse.UnsignedTransactions)) + for i, unsignedTransaction := range createUnsignedTransactionsResponse.UnsignedTransactions { + signedTransaction, err := libspectrewallet.Sign(conf.NetParams(), mnemonics, unsignedTransaction, keysFile.ECDSA) + if err != nil { + return err + } + signedTransactions[i] = signedTransaction + } + + if len(signedTransactions) > 1 { + fmt.Printf("Broadcasting %d transactions\n", len(signedTransactions)) + } + + // Since we waited for user input when getting the password, which could take unbound amount of time - + // create a new context for broadcast, to reset the timeout. + broadcastCtx, broadcastCancel := context.WithTimeout(context.Background(), daemonTimeout) + defer broadcastCancel() + + response, err := daemonClient.Broadcast(broadcastCtx, &pb.BroadcastRequest{Transactions: signedTransactions}) + if err != nil { + return err + } + fmt.Println("Transactions were sent successfully") + fmt.Println("Transaction ID(s): ") + for _, txID := range response.TxIDs { + fmt.Printf("\t%s\n", txID) + } + + if conf.Verbose { + fmt.Println("Serialized Transaction(s) (can be parsed via the `parse` command or resent via `broadcast`): ") + for _, signedTx := range signedTransactions { + fmt.Printf("\t%x\n\n", signedTx) + } + } + + return nil +} diff --git a/cmd/spectrewallet/show_addresses.go b/cmd/spectrewallet/show_addresses.go new file mode 100644 index 0000000..4e267a1 --- /dev/null +++ b/cmd/spectrewallet/show_addresses.go @@ -0,0 +1,34 @@ +package main + +import ( + "context" + "fmt" + + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" +) + +func showAddresses(conf *showAddressesConfig) error { + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + response, err := daemonClient.ShowAddresses(ctx, &pb.ShowAddressesRequest{}) + if err != nil { + return err + } + + fmt.Printf("Addresses (%d):\n", len(response.Address)) + for _, address := range response.Address { + fmt.Println(address) + } + + fmt.Printf("\nNote: the above are only addresses that were manually created by the 'new-address' command. If you want to see a list of all addresses, including change addresses, " + + "that have a positive balance, use the command 'balance -v'\n") + return nil +} diff --git a/cmd/spectrewallet/sign.go b/cmd/spectrewallet/sign.go new file mode 100644 index 0000000..0adea54 --- /dev/null +++ b/cmd/spectrewallet/sign.go @@ -0,0 +1,77 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/cmd/spectrewallet/keys" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" +) + +func sign(conf *signConfig) error { + if conf.Transaction == "" && conf.TransactionFile == "" { + return errors.Errorf("Either --transaction or --transaction-file is required") + } + if conf.Transaction != "" && conf.TransactionFile != "" { + return errors.Errorf("Both --transaction and --transaction-file cannot be passed at the same time") + } + + keysFile, err := keys.ReadKeysFile(conf.NetParams(), conf.KeysFile) + if err != nil { + return err + } + + if len(conf.Password) == 0 { + conf.Password = keys.GetPassword("Password:") + } + privateKeys, err := keysFile.DecryptMnemonics(conf.Password) + if err != nil { + return err + } + + transactionsHex := conf.Transaction + if conf.TransactionFile != "" { + transactionHexBytes, err := ioutil.ReadFile(conf.TransactionFile) + if err != nil { + return errors.Wrapf(err, "Could not read hex from %s", conf.TransactionFile) + } + transactionsHex = strings.TrimSpace(string(transactionHexBytes)) + } + partiallySignedTransactions, err := decodeTransactionsFromHex(transactionsHex) + if err != nil { + return err + } + + updatedPartiallySignedTransactions := make([][]byte, len(partiallySignedTransactions)) + for i, partiallySignedTransaction := range partiallySignedTransactions { + updatedPartiallySignedTransactions[i], err = + libspectrewallet.Sign(conf.NetParams(), privateKeys, partiallySignedTransaction, keysFile.ECDSA) + if err != nil { + return err + } + } + + areAllTransactionsFullySigned := true + for _, updatedPartiallySignedTransaction := range updatedPartiallySignedTransactions { + // This is somewhat redundant to check all transactions, but we do that just-in-case + isFullySigned, err := libspectrewallet.IsTransactionFullySigned(updatedPartiallySignedTransaction) + if err != nil { + return err + } + if !isFullySigned { + areAllTransactionsFullySigned = false + } + } + + if areAllTransactionsFullySigned { + fmt.Fprintln(os.Stderr, "The transaction is signed and ready to broadcast") + } else { + fmt.Fprintln(os.Stderr, "Successfully signed transaction") + } + + fmt.Println(encodeTransactionsToHex(updatedPartiallySignedTransactions)) + return nil +} diff --git a/cmd/spectrewallet/start_daemon.go b/cmd/spectrewallet/start_daemon.go new file mode 100644 index 0000000..4660d57 --- /dev/null +++ b/cmd/spectrewallet/start_daemon.go @@ -0,0 +1,7 @@ +package main + +import "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/server" + +func startDaemon(conf *startDaemonConfig) error { + return server.Start(conf.NetParams(), conf.Listen, conf.RPCServer, conf.KeysFile, conf.Profile, conf.Timeout) +} diff --git a/cmd/spectrewallet/sweep.go b/cmd/spectrewallet/sweep.go new file mode 100644 index 0000000..7a8203e --- /dev/null +++ b/cmd/spectrewallet/sweep.go @@ -0,0 +1,242 @@ +package main + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/client" + "github.com/spectre-project/spectred/cmd/spectrewallet/daemon/pb" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet/serialization" + "github.com/spectre-project/spectred/cmd/spectrewallet/utils" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/util/txmass" +) + +const feePerInput = 10000 + +func sweep(conf *sweepConfig) error { + + privateKeyBytes, err := hex.DecodeString(conf.PrivateKey) + if err != nil { + return err + } + + publicKeybytes, err := libspectrewallet.PublicKeyFromPrivateKey(privateKeyBytes) + if err != nil { + return err + } + + addressPubKey, err := util.NewAddressPublicKey(publicKeybytes, conf.NetParams().Prefix) + if err != nil { + return err + } + + address, err := util.DecodeAddress(addressPubKey.String(), conf.NetParams().Prefix) + if err != nil { + return err + } + + daemonClient, tearDown, err := client.Connect(conf.DaemonAddress) + if err != nil { + return err + } + defer tearDown() + + ctx, cancel := context.WithTimeout(context.Background(), daemonTimeout) + defer cancel() + + getExternalSpendableUTXOsResponse, err := daemonClient.GetExternalSpendableUTXOs(ctx, &pb.GetExternalSpendableUTXOsRequest{ + Address: address.String(), + }) + if err != nil { + return err + } + + UTXOs, err := libspectrewallet.SpectrewalletdUTXOsTolibspectrewalletUTXOs(getExternalSpendableUTXOsResponse.Entries) + if err != nil { + return err + } + + paymentAmount := uint64(0) + + if len(UTXOs) == 0 { + return errors.Errorf("Could not find any spendable UTXOs in %s", addressPubKey) + } + + for _, UTXO := range UTXOs { + paymentAmount = paymentAmount + UTXO.UTXOEntry.Amount() + } + + newAddressResponse, err := daemonClient.NewAddress(ctx, &pb.NewAddressRequest{}) + if err != nil { + return err + } + + toAddress, err := util.DecodeAddress(newAddressResponse.Address, conf.ActiveNetParams.Prefix) + if err != nil { + return err + } + + splitTransactions, err := createSplitTransactionsWithSchnorrPrivteKey(conf.NetParams(), UTXOs, toAddress, feePerInput) + if err != nil { + return err + } + + serializedSplitTransactions, err := signWithSchnorrPrivateKey(conf.NetParams(), privateKeyBytes, splitTransactions) + if err != nil { + return err + } + + fmt.Println("\nSweeping...") + fmt.Println("\tFrom:\t", addressPubKey) + fmt.Println("\tTo:\t", toAddress) + + response, err := daemonClient.Broadcast(ctx, &pb.BroadcastRequest{ + IsDomain: true, + Transactions: serializedSplitTransactions, + }) + if err != nil { + return err + } + + totalExtracted := uint64(0) + + fmt.Println("\nTransaction ID(s):") + for i, txID := range response.TxIDs { + fmt.Printf("\t%s\n", txID) + fmt.Println("\tSwept:\t", utils.FormatSpr(splitTransactions[i].Outputs[0].Value), " SPR") + totalExtracted = totalExtracted + splitTransactions[i].Outputs[0].Value + } + + fmt.Println("\nTotal Funds swept (including transaction fees):") + fmt.Println("\t", utils.FormatSpr(totalExtracted), " SPR") + + return nil +} + +func newDummyTransaction() *externalapi.DomainTransaction { + return &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: make([]*externalapi.DomainTransactionInput, 0), //we create empty inputs + LockTime: 0, + Outputs: make([]*externalapi.DomainTransactionOutput, 1), // we should always have 1 output to the toAdress + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: nil, + } +} + +func createSplitTransactionsWithSchnorrPrivteKey( + params *dagconfig.Params, + selectedUTXOs []*libspectrewallet.UTXO, + toAddress util.Address, + feePerInput int) ([]*externalapi.DomainTransaction, error) { + + var splitTransactions []*externalapi.DomainTransaction + + extraMass := uint64(7000) // Account for future signatures. + + massCalculater := txmass.NewCalculator(params.MassPerTxByte, params.MassPerScriptPubKeyByte, params.MassPerSigOp) + + scriptPublicKey, err := txscript.PayToAddrScript(toAddress) + if err != nil { + return nil, err + } + + totalSplitAmount := uint64(0) + + lastValidTx := newDummyTransaction() + currentTx := newDummyTransaction() //i.e. the tested tx + + //loop through utxos commit segments that don't violate max mass + for i, currentUTXO := range selectedUTXOs { + + totalSplitAmount = totalSplitAmount + currentUTXO.UTXOEntry.Amount() + + currentTx.Inputs = append( + currentTx.Inputs, + &externalapi.DomainTransactionInput{ + PreviousOutpoint: *currentUTXO.Outpoint, + UTXOEntry: utxo.NewUTXOEntry( + currentUTXO.UTXOEntry.Amount(), + currentUTXO.UTXOEntry.ScriptPublicKey(), + false, + constants.UnacceptedDAAScore, + ), + SigOpCount: 1, + }, + ) + + currentTx.Outputs[0] = &externalapi.DomainTransactionOutput{ + Value: totalSplitAmount - uint64(len(currentTx.Inputs)*feePerInput), + ScriptPublicKey: scriptPublicKey, + } + + if massCalculater.CalculateTransactionMass(currentTx)+extraMass >= mempool.MaximumStandardTransactionMass { + + //in this loop we assume a transaction with one input and one output cannot violate max transaction mass, hence a sanity check. + if len(currentTx.Inputs) == 1 { + return nil, errors.Errorf("transaction with one input and one output violates transaction mass") + } + + splitTransactions = append(splitTransactions, lastValidTx) + totalSplitAmount = 0 + lastValidTx = newDummyTransaction() + currentTx = newDummyTransaction() + continue + } + + //Special case, end of inputs, with no violation, where we can assign currentTX to split and break + if i == len(selectedUTXOs)-1 { + splitTransactions = append(splitTransactions, currentTx) + break + + } + + lastValidTx = currentTx.Clone() + currentTx.Outputs = make([]*externalapi.DomainTransactionOutput, 1) + + } + return splitTransactions, nil +} + +func signWithSchnorrPrivateKey(params *dagconfig.Params, privateKeyBytes []byte, domainTransactions []*externalapi.DomainTransaction) ([][]byte, error) { + + schnorrkeyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + return nil, err + } + + serializedDomainTransactions := make([][]byte, len(domainTransactions)) + + for i1, domainTransaction := range domainTransactions { + + sighashReusedValues := &consensushashing.SighashReusedValues{} + + for i2, input := range domainTransaction.Inputs { + signature, err := txscript.SignatureScript(domainTransaction, i2, consensushashing.SigHashAll, schnorrkeyPair, sighashReusedValues) + if err != nil { + return nil, err + } + input.SignatureScript = signature + } + serializedDomainTransactions[i1], err = serialization.SerializeDomainTransaction(domainTransaction) + if err != nil { + return nil, err + } + } + + return serializedDomainTransactions, nil +} diff --git a/cmd/spectrewallet/transactions_hex_encoding.go b/cmd/spectrewallet/transactions_hex_encoding.go new file mode 100644 index 0000000..e7e857b --- /dev/null +++ b/cmd/spectrewallet/transactions_hex_encoding.go @@ -0,0 +1,33 @@ +package main + +import ( + "encoding/hex" + "strings" +) + +// hexTransactionsSeparator is used to mark the end of one transaction and the beginning of the next one. +// We use a separator that is not in the hex alphabet, but which will not split selection with a double click +const hexTransactionsSeparator = "_" + +func encodeTransactionsToHex(transactions [][]byte) string { + transactionsInHex := make([]string, len(transactions)) + for i, transaction := range transactions { + transactionsInHex[i] = hex.EncodeToString(transaction) + } + return strings.Join(transactionsInHex, hexTransactionsSeparator) +} + +func decodeTransactionsFromHex(transactionsHex string) ([][]byte, error) { + splitTransactionsHexes := strings.Split(transactionsHex, hexTransactionsSeparator) + transactions := make([][]byte, len(splitTransactionsHexes)) + + var err error + for i, transactionHex := range splitTransactionsHexes { + transactions[i], err = hex.DecodeString(transactionHex) + if err != nil { + return nil, err + } + } + + return transactions, nil +} diff --git a/cmd/spectrewallet/utils/format_spr.go b/cmd/spectrewallet/utils/format_spr.go new file mode 100644 index 0000000..b990f10 --- /dev/null +++ b/cmd/spectrewallet/utils/format_spr.go @@ -0,0 +1,68 @@ +package utils + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" +) + +// FormatSpr takes the amount of sompis as uint64, and returns amount of SPR with 8 decimal places +func FormatSpr(amount uint64) string { + res := " " + if amount > 0 { + res = fmt.Sprintf("%19.8f", float64(amount)/constants.SompiPerSpectre) + } + return res +} + +// SprToSompi takes in a string representation of the Spr value to convert to Sompi +func SprToSompi(amount string) (uint64, error) { + err := validateSPRAmountFormat(amount) + + if err != nil { + return 0, err + } + + // after validation, amount can only be either an int OR + // a float with an int component and decimal places + parts := strings.Split(amount, ".") + amountStr := "" + + if constants.SompiPerSpectre%10 != 0 { + return 0, errors.Errorf("Unable to convert to sompi when SompiPerSpectre is not a multiple of 10") + } + + decimalPlaces := int(math.Log10(constants.SompiPerSpectre)) + decimalStr := "" + + if len(parts) == 2 { + decimalStr = parts[1] + } + + amountStr = fmt.Sprintf("%s%-*s", parts[0], decimalPlaces, decimalStr) // Padded with spaces at the end to fill for missing decimals: Sample "0.01234 " + amountStr = strings.ReplaceAll(amountStr, " ", "0") // Make the spaces be 0s. Sample "0.012340000" + + convertedAmount, err := strconv.ParseUint(amountStr, 10, 64) + + return convertedAmount, err +} + +func validateSPRAmountFormat(amount string) error { + // Check whether it's an integer, or a float with max 8 digits + match, err := regexp.MatchString("^([1-9]\\d{0,11}|0)(\\.\\d{0,8})?$", amount) + + if !match { + return errors.Errorf("Invalid amount") + } + + if err != nil { + return err + } + + return nil +} diff --git a/cmd/spectrewallet/utils/format_spr_test.go b/cmd/spectrewallet/utils/format_spr_test.go new file mode 100644 index 0000000..b55c218 --- /dev/null +++ b/cmd/spectrewallet/utils/format_spr_test.go @@ -0,0 +1,90 @@ +package utils + +import "testing" + +// Takes in a string representation of the Spr value to convert to Sompi +func TestSprToSompi(t *testing.T) { + type testVector struct { + originalAmount string + convertedAmount uint64 + } + + validCases := []testVector{ + {originalAmount: "0", convertedAmount: 0}, + {originalAmount: "1", convertedAmount: 100000000}, + {originalAmount: "33184.1489732", convertedAmount: 3318414897320}, + {originalAmount: "21.35808032", convertedAmount: 2135808032}, + {originalAmount: "184467440737.09551615", convertedAmount: 18446744073709551615}, + } + + for _, currentTestVector := range validCases { + convertedAmount, err := SprToSompi(currentTestVector.originalAmount) + + if err != nil { + t.Error(err) + } else if convertedAmount != currentTestVector.convertedAmount { + t.Errorf("Expected %s, to convert to %d. Got: %d", currentTestVector.originalAmount, currentTestVector.convertedAmount, convertedAmount) + } + } + + invalidCases := []string{ + "184467440737.09551616", // Bigger than max uint64 + "-1", + "a", + "", + } + + for _, currentTestVector := range invalidCases { + _, err := SprToSompi(currentTestVector) + + if err == nil { + t.Errorf("Expected an error but succeeded validation for test case %s", currentTestVector) + } + } +} + +func TestValidateAmountFormat(t *testing.T) { + validCases := []string{ + "0", + "1", + "1.0", + "0.1", + "0.12345678", + "111111111111.11111111", // 12 digits to the left of decimal, 8 digits to the right + "184467440737.09551615", // Maximum input that can be represented in sompi later + "184467440737.09551616", // Cannot be represented in sompi, but we'll acccept for "correct format" + "999999999999.99999999", // Cannot be represented in sompi, but we'll acccept for "correct format" + } + + for _, testCase := range validCases { + err := validateSPRAmountFormat(testCase) + + if err != nil { + t.Error(err) + } + } + + invalidCases := []string{ + "", + "a", + "-1", + "0.123456789", // 9 decimal digits + ".1", // decimal but no integer component + "0a", // Extra character + "0000000000000", // 13 zeros + "012", // Int padded with zero + "00.1", // Decimal padded with zeros + "111111111111111111111", // all digits + "111111111111A11111111", // non-period/non-digit where decimal would be + "000000000000.00000000", // all zeros + "spectre", // all text + } + + for _, testCase := range invalidCases { + err := validateSPRAmountFormat(testCase) + + if err == nil { + t.Errorf("Expected an error but succeeded validation for test case %s", testCase) + } + } +} diff --git a/cmd/spectrewallet/utils/readline.go b/cmd/spectrewallet/utils/readline.go new file mode 100644 index 0000000..7b83cb0 --- /dev/null +++ b/cmd/spectrewallet/utils/readline.go @@ -0,0 +1,18 @@ +package utils + +import ( + "bufio" + "strings" + + "github.com/pkg/errors" +) + +// ReadLine reads one line from the given reader with trimmed white space. +func ReadLine(reader *bufio.Reader) (string, error) { + line, err := reader.ReadBytes('\n') + if err != nil { + return "", errors.WithStack(err) + } + + return strings.TrimSpace(string(line)), nil +} diff --git a/doc.go b/doc.go new file mode 100644 index 0000000..6abfca3 --- /dev/null +++ b/doc.go @@ -0,0 +1,30 @@ +/* +Copyright (c) 2024-2024 The Spectre developers +Copyright (c) 2018-2019 The kaspanet developers +Copyright (c) 2013-2018 The btcsuite developers +Copyright (c) 2015-2016 The Decred developers +Copyright (c) 2013-2014 Conformal Systems LLC. +Use of this source code is governed by an ISC +license that can be found in the LICENSE file. + +Spectred is a full-node spectre implementation written in Go. + +The default options are sane for most users. This means spectred will work 'out of +the box' for most users. However, there are also a wide variety of flags that +can be used to control it. + +Usage: + + spectred [OPTIONS] + +For an up-to-date help message: + + spectred --help + +The long form of all option flags (except -C) can be specified in a configuration +file that is automatically parsed when spectred starts up. By default, the +configuration file is located at ~/.spectred/spectred.conf on POSIX-style operating +systems and %LOCALAPPDATA%\spectred\spectred.conf on Windows. The -C (--configfile) +flag can be used to override this location. +*/ +package main diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..c2c8e1c --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,31 @@ +# -- multistage docker build: stage #1: build stage +FROM golang:1.19-alpine AS build + +RUN mkdir -p /go/src/github.com/spectre-project/spectred/ + +WORKDIR /go/src/github.com/spectre-project/spectred/ + +RUN apk add --no-cache curl git openssh binutils gcc musl-dev + +COPY go.mod . +COPY go.sum . + + +# Cache spectred dependencies +RUN go mod download + +COPY . . + +RUN go build $FLAGS -o spectred . + +# --- multistage docker build: stage #2: runtime image +FROM alpine +WORKDIR /app + +RUN apk add --no-cache ca-certificates tini + +COPY --from=build /go/src/github.com/spectre-project/spectred/spectred /app/ +COPY --from=build /go/src/github.com/spectre-project/spectred/infrastructure/config/sample-spectred.conf /app/ + +USER root +ENTRYPOINT [ "/sbin/tini", "--" ] diff --git a/docker/build.sh b/docker/build.sh new file mode 100644 index 0000000..ab47d9e --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,17 @@ +#!/bin/sh +PUSH=$1 +DOCKER_REPO=spectrenetwork/spectred + +set -e + +tag=$(git log -n1 --format="%cs.%h") + +cd $(dirname $(cd $(dirname $0); pwd)) +docker build --pull -t $DOCKER_REPO:$tag -f docker/Dockerfile . +docker tag $DOCKER_REPO:$tag $DOCKER_REPO:latest +echo Tagged $DOCKER_REPO:latest + +if [ "$PUSH" = "push" ]; then + docker push $DOCKER_REPO:$tag + docker push $DOCKER_REPO:latest +fi diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 0000000..5d9e984 --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,16 @@ +version: "3" + +services: + + spectred: + container_name: spectred + image: spectrenetwork/spectred:latest + restart: unless-stopped + ports: + - "18110:18110/tcp" + - "18111:18111/tcp" + volumes: + - $HOME/.spectred:/app/data/ +# use managed volumes +# - spectred:/app/data/ + command: ./spectred --utxoindex --connect=mainnet-dnsseed-1.spectre-network.org --appdir=/app/data diff --git a/domain/consensus/consensus.go b/domain/consensus/consensus.go new file mode 100644 index 0000000..0c038a4 --- /dev/null +++ b/domain/consensus/consensus.go @@ -0,0 +1,1148 @@ +package consensus + +import ( + "math/big" + "sync" + + "github.com/spectre-project/spectred/util/mstime" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +type consensus struct { + lock *sync.Mutex + databaseContext model.DBManager + + genesisBlock *externalapi.DomainBlock + genesisHash *externalapi.DomainHash + + expectedDAAWindowDurationInMilliseconds int64 + + blockProcessor model.BlockProcessor + blockBuilder model.BlockBuilder + consensusStateManager model.ConsensusStateManager + transactionValidator model.TransactionValidator + syncManager model.SyncManager + pastMedianTimeManager model.PastMedianTimeManager + blockValidator model.BlockValidator + coinbaseManager model.CoinbaseManager + dagTopologyManagers []model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + difficultyManager model.DifficultyManager + ghostdagManagers []model.GHOSTDAGManager + headerTipsManager model.HeadersSelectedTipManager + mergeDepthManager model.MergeDepthManager + pruningManager model.PruningManager + reachabilityManager model.ReachabilityManager + finalityManager model.FinalityManager + pruningProofManager model.PruningProofManager + + acceptanceDataStore model.AcceptanceDataStore + blockStore model.BlockStore + blockHeaderStore model.BlockHeaderStore + pruningStore model.PruningStore + ghostdagDataStores []model.GHOSTDAGDataStore + blockRelationStores []model.BlockRelationStore + blockStatusStore model.BlockStatusStore + consensusStateStore model.ConsensusStateStore + headersSelectedTipStore model.HeaderSelectedTipStore + multisetStore model.MultisetStore + reachabilityDataStore model.ReachabilityDataStore + utxoDiffStore model.UTXODiffStore + finalityStore model.FinalityStore + headersSelectedChainStore model.HeadersSelectedChainStore + daaBlocksStore model.DAABlocksStore + blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore + + consensusEventsChan chan externalapi.ConsensusEvent + virtualNotUpdated bool +} + +// In order to prevent a situation that the consensus lock is held for too much time, we +// release the lock each time we resolve 100 blocks. +// Note: `virtualResolveChunk` should be smaller than `params.FinalityDuration` in order to avoid a situation +// where UpdatePruningPointByVirtual skips a pruning point. +const virtualResolveChunk = 100 + +func (s *consensus) ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, validateUTXO bool) error { + s.lock.Lock() + defer s.lock.Unlock() + + _, _, err := s.blockProcessor.ValidateAndInsertBlockWithTrustedData(block, validateUTXO) + if err != nil { + return err + } + return nil +} + +// Init initializes consensus +func (s *consensus) Init(skipAddingGenesis bool) error { + s.lock.Lock() + defer s.lock.Unlock() + + onEnd := logger.LogAndMeasureExecutionTime(log, "Init") + defer onEnd() + + stagingArea := model.NewStagingArea() + + exists, err := s.blockStatusStore.Exists(s.databaseContext, stagingArea, model.VirtualGenesisBlockHash) + if err != nil { + return err + } + + // There should always be a virtual genesis block. Initially only the genesis points to this block, but + // on a node with pruned header all blocks without known parents points to it. + if !exists { + s.blockStatusStore.Stage(stagingArea, model.VirtualGenesisBlockHash, externalapi.StatusUTXOValid) + err = s.reachabilityManager.Init(stagingArea) + if err != nil { + return err + } + + for _, dagTopologyManager := range s.dagTopologyManagers { + err = dagTopologyManager.SetParents(stagingArea, model.VirtualGenesisBlockHash, nil) + if err != nil { + return err + } + } + + s.consensusStateStore.StageTips(stagingArea, []*externalapi.DomainHash{model.VirtualGenesisBlockHash}) + for _, ghostdagDataStore := range s.ghostdagDataStores { + ghostdagDataStore.Stage(stagingArea, model.VirtualGenesisBlockHash, externalapi.NewBlockGHOSTDAGData( + 0, + big.NewInt(0), + nil, + nil, + nil, + nil, + ), false) + } + + err = staging.CommitAllChanges(s.databaseContext, stagingArea) + if err != nil { + return err + } + } + + // The genesis should be added to the DAG if it's a fresh consensus, unless said otherwise (on a + // case where the consensus is used for a pruned headers node). + if !skipAddingGenesis && s.blockStore.Count(stagingArea) == 0 { + genesisWithTrustedData := &externalapi.BlockWithTrustedData{ + Block: s.genesisBlock, + DAAWindow: nil, + GHOSTDAGData: []*externalapi.BlockGHOSTDAGDataHashPair{ + { + GHOSTDAGData: externalapi.NewBlockGHOSTDAGData(0, big.NewInt(0), model.VirtualGenesisBlockHash, nil, nil, make(map[externalapi.DomainHash]externalapi.KType)), + Hash: s.genesisHash, + }, + }, + } + _, _, err = s.blockProcessor.ValidateAndInsertBlockWithTrustedData(genesisWithTrustedData, true) + if err != nil { + return err + } + } + + return nil +} + +func (s *consensus) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningManager.PruningPointAndItsAnticone() +} + +// BuildBlock builds a block over the current state, with the transactions +// selected by the given transactionSelector +func (s *consensus) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, error) { + + s.lock.Lock() + defer s.lock.Unlock() + + block, _, err := s.blockBuilder.BuildBlock(coinbaseData, transactions) + return block, err +} + +// BuildBlockTemplate builds a block over the current state, with the transactions +// selected by the given transactionSelector plus metadata information related to +// coinbase rewards and node sync status +func (s *consensus) BuildBlockTemplate(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlockTemplate, error) { + + s.lock.Lock() + defer s.lock.Unlock() + + block, hasRedReward, err := s.blockBuilder.BuildBlock(coinbaseData, transactions) + if err != nil { + return nil, err + } + + isNearlySynced, err := s.isNearlySyncedNoLock() + if err != nil { + return nil, err + } + + return &externalapi.DomainBlockTemplate{ + Block: block, + CoinbaseData: coinbaseData, + CoinbaseHasRedReward: hasRedReward, + IsNearlySynced: isNearlySynced, + }, nil +} + +// ValidateAndInsertBlock validates the given block and, if valid, applies it +// to the current state +func (s *consensus) ValidateAndInsertBlock(block *externalapi.DomainBlock, updateVirtual bool) error { + if updateVirtual { + s.lock.Lock() + if s.virtualNotUpdated { + // We enter the loop in locked state + for { + _, isCompletelyResolved, err := s.resolveVirtualChunkNoLock(virtualResolveChunk) + if err != nil { + s.lock.Unlock() + return err + } + if isCompletelyResolved { + // Make sure we enter the block insertion function w/o releasing the lock. + // Otherwise, we might actually enter it in `s.virtualNotUpdated == true` state + _, err = s.validateAndInsertBlockNoLock(block, updateVirtual) + // Finally, unlock for the last iteration and return + s.lock.Unlock() + if err != nil { + return err + } + return nil + } + // Unlock to allow other threads to enter consensus + s.lock.Unlock() + // Lock for the next iteration + s.lock.Lock() + } + } + _, err := s.validateAndInsertBlockNoLock(block, updateVirtual) + s.lock.Unlock() + if err != nil { + return err + } + return nil + } + + return s.validateAndInsertBlockWithLock(block, updateVirtual) +} + +func (s *consensus) validateAndInsertBlockWithLock(block *externalapi.DomainBlock, updateVirtual bool) error { + s.lock.Lock() + defer s.lock.Unlock() + + _, err := s.validateAndInsertBlockNoLock(block, updateVirtual) + if err != nil { + return err + } + return nil +} + +func (s *consensus) validateAndInsertBlockNoLock(block *externalapi.DomainBlock, updateVirtual bool) (*externalapi.VirtualChangeSet, error) { + virtualChangeSet, blockStatus, err := s.blockProcessor.ValidateAndInsertBlock(block, updateVirtual) + if err != nil { + return nil, err + } + + // If block has a body, and yet virtual was not updated -- signify that virtual is in non-updated state + if !updateVirtual && blockStatus != externalapi.StatusHeaderOnly { + s.virtualNotUpdated = true + } + + err = s.sendBlockAddedEvent(block, blockStatus) + if err != nil { + return nil, err + } + + err = s.sendVirtualChangedEvent(virtualChangeSet, updateVirtual) + if err != nil { + return nil, err + } + + return virtualChangeSet, nil +} + +func (s *consensus) sendBlockAddedEvent(block *externalapi.DomainBlock, blockStatus externalapi.BlockStatus) error { + if s.consensusEventsChan != nil { + if blockStatus == externalapi.StatusHeaderOnly || blockStatus == externalapi.StatusInvalid { + return nil + } + + if len(s.consensusEventsChan) == cap(s.consensusEventsChan) { + return errors.Errorf("consensusEventsChan is full") + } + s.consensusEventsChan <- &externalapi.BlockAdded{Block: block} + } + return nil +} + +func (s *consensus) sendVirtualChangedEvent(virtualChangeSet *externalapi.VirtualChangeSet, wasVirtualUpdated bool) error { + if !wasVirtualUpdated || s.consensusEventsChan == nil || virtualChangeSet == nil { + return nil + } + + if len(s.consensusEventsChan) == cap(s.consensusEventsChan) { + return errors.Errorf("consensusEventsChan is full") + } + + stagingArea := model.NewStagingArea() + virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return err + } + + virtualSelectedParentGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, virtualGHOSTDAGData.SelectedParent(), false) + if err != nil { + return err + } + + virtualDAAScore, err := s.daaBlocksStore.DAAScore(s.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + // Populate the change set with additional data before sending + virtualChangeSet.VirtualSelectedParentBlueScore = virtualSelectedParentGHOSTDAGData.BlueScore() + virtualChangeSet.VirtualDAAScore = virtualDAAScore + + s.consensusEventsChan <- virtualChangeSet + return nil +} + +// ValidateTransactionAndPopulateWithConsensusData validates the given transaction +// and populates it with any missing consensus data +func (s *consensus) ValidateTransactionAndPopulateWithConsensusData(transaction *externalapi.DomainTransaction) error { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + daaScore, err := s.daaBlocksStore.DAAScore(s.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + err = s.transactionValidator.ValidateTransactionInIsolation(transaction, daaScore) + if err != nil { + return err + } + + err = s.consensusStateManager.PopulateTransactionWithUTXOEntries(stagingArea, transaction) + if err != nil { + return err + } + + virtualPastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + err = s.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash, virtualPastMedianTime) + if err != nil { + return err + } + return s.transactionValidator.ValidateTransactionInContextAndPopulateFee( + stagingArea, transaction, model.VirtualBlockHash) +} + +func (s *consensus) GetBlock(blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + block, err := s.blockStore.Block(s.databaseContext, stagingArea, blockHash) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return nil, false, nil + } + return nil, false, err + } + return block, true, nil +} + +func (s *consensus) GetBlockEvenIfHeaderOnly(blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + block, err := s.blockStore.Block(s.databaseContext, stagingArea, blockHash) + if err == nil { + return block, nil + } + if !errors.Is(err, database.ErrNotFound) { + return nil, err + } + + header, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, blockHash) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return nil, errors.Wrapf(err, "block %s does not exist", blockHash) + } + return nil, err + } + return &externalapi.DomainBlock{Header: header}, nil +} + +func (s *consensus) GetBlockHeader(blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + blockHeader, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, blockHash) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return nil, errors.Wrapf(err, "block header %s does not exist", blockHash) + } + return nil, err + } + return blockHeader, nil +} + +func (s *consensus) GetBlockInfo(blockHash *externalapi.DomainHash) (*externalapi.BlockInfo, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + blockInfo := &externalapi.BlockInfo{} + + exists, err := s.blockStatusStore.Exists(s.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + blockInfo.Exists = exists + if !exists { + return blockInfo, nil + } + + blockStatus, err := s.blockStatusStore.Get(s.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + blockInfo.BlockStatus = blockStatus + + // If the status is invalid, then we don't have the necessary reachability data to check if it's in PruningPoint.Future. + if blockStatus == externalapi.StatusInvalid { + return blockInfo, nil + } + + ghostdagData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + blockInfo.BlueScore = ghostdagData.BlueScore() + blockInfo.BlueWork = ghostdagData.BlueWork() + blockInfo.SelectedParent = ghostdagData.SelectedParent() + blockInfo.MergeSetBlues = ghostdagData.MergeSetBlues() + blockInfo.MergeSetReds = ghostdagData.MergeSetReds() + + return blockInfo, nil +} + +func (s *consensus) GetBlockRelations(blockHash *externalapi.DomainHash) ( + parents []*externalapi.DomainHash, children []*externalapi.DomainHash, err error) { + + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + blockRelation, err := s.blockRelationStores[0].BlockRelation(s.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, nil, err + } + + return blockRelation.Parents, blockRelation.Children, nil +} + +func (s *consensus) GetBlockAcceptanceData(blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return nil, err + } + + return s.acceptanceDataStore.Get(s.databaseContext, stagingArea, blockHash) +} + +func (s *consensus) GetBlocksAcceptanceData(blockHashes []*externalapi.DomainHash) ([]externalapi.AcceptanceData, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + blocksAcceptanceData := make([]externalapi.AcceptanceData, len(blockHashes)) + + for i, blockHash := range blockHashes { + err := s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return nil, err + } + + acceptanceData, err := s.acceptanceDataStore.Get(s.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + blocksAcceptanceData[i] = acceptanceData + } + + return blocksAcceptanceData, nil +} + +func (s *consensus) GetHashesBetween(lowHash, highHash *externalapi.DomainHash, maxBlocks uint64) ( + hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) { + + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err = s.validateBlockHashExists(stagingArea, lowHash) + if err != nil { + return nil, nil, err + } + err = s.validateBlockHashExists(stagingArea, highHash) + if err != nil { + return nil, nil, err + } + + return s.syncManager.GetHashesBetween(stagingArea, lowHash, highHash, maxBlocks) +} + +func (s *consensus) GetAnticone(blockHash, contextHash *externalapi.DomainHash, + maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err = s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return nil, err + } + err = s.validateBlockHashExists(stagingArea, contextHash) + if err != nil { + return nil, err + } + + return s.syncManager.GetAnticone(stagingArea, blockHash, contextHash, maxBlocks) +} + +func (s *consensus) GetMissingBlockBodyHashes(highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, highHash) + if err != nil { + return nil, err + } + + return s.syncManager.GetMissingBlockBodyHashes(stagingArea, highHash) +} + +func (s *consensus) GetPruningPointUTXOs(expectedPruningPointHash *externalapi.DomainHash, + fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) { + + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + pruningPointHash, err := s.pruningStore.PruningPoint(s.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if !expectedPruningPointHash.Equal(pruningPointHash) { + return nil, errors.Wrapf(ruleerrors.ErrWrongPruningPointHash, "expected pruning point %s but got %s", + expectedPruningPointHash, + pruningPointHash) + } + + pruningPointUTXOs, err := s.pruningStore.PruningPointUTXOs(s.databaseContext, fromOutpoint, limit) + if err != nil { + return nil, err + } + return pruningPointUTXOs, nil +} + +func (s *consensus) GetVirtualUTXOs(expectedVirtualParents []*externalapi.DomainHash, + fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) { + + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + virtualParents, err := s.dagTopologyManagers[0].Parents(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + if !externalapi.HashesEqual(expectedVirtualParents, virtualParents) { + return nil, errors.Wrapf(ruleerrors.ErrGetVirtualUTXOsWrongVirtualParents, "expected virtual parents %s but got %s", + expectedVirtualParents, + virtualParents) + } + + virtualUTXOs, err := s.consensusStateStore.VirtualUTXOs(s.databaseContext, fromOutpoint, limit) + if err != nil { + return nil, err + } + return virtualUTXOs, nil +} + +func (s *consensus) PruningPoint() (*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.pruningStore.PruningPoint(s.databaseContext, stagingArea) +} + +func (s *consensus) PruningPointHeaders() ([]externalapi.BlockHeader, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + lastPruningPointIndex, err := s.pruningStore.CurrentPruningPointIndex(s.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + headers := make([]externalapi.BlockHeader, 0, lastPruningPointIndex) + for i := uint64(0); i <= lastPruningPointIndex; i++ { + pruningPoint, err := s.pruningStore.PruningPointByIndex(s.databaseContext, stagingArea, i) + if err != nil { + return nil, err + } + + header, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, pruningPoint) + if err != nil { + return nil, err + } + + headers = append(headers, header) + } + + return headers, nil +} + +func (s *consensus) ClearImportedPruningPointData() error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningManager.ClearImportedPruningPointData() +} + +func (s *consensus) AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningManager.AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs) +} + +func (s *consensus) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainHash) error { + s.lock.Lock() + defer s.lock.Unlock() + + return s.blockProcessor.ValidateAndInsertImportedPruningPoint(newPruningPoint) +} + +func (s *consensus) GetVirtualSelectedParent() (*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + return virtualGHOSTDAGData.SelectedParent(), nil +} + +func (s *consensus) Tips() ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.consensusStateStore.Tips(stagingArea, s.databaseContext) +} + +func (s *consensus) GetVirtualInfo() (*externalapi.VirtualInfo, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + blockRelations, err := s.blockRelationStores[0].BlockRelation(s.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + bits, err := s.difficultyManager.RequiredDifficulty(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + pastMedianTime, err := s.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + + daaScore, err := s.daaBlocksStore.DAAScore(s.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + return &externalapi.VirtualInfo{ + ParentHashes: blockRelations.Parents, + Bits: bits, + PastMedianTime: pastMedianTime, + BlueScore: virtualGHOSTDAGData.BlueScore(), + DAAScore: daaScore, + }, nil +} + +func (s *consensus) GetVirtualDAAScore() (uint64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.daaBlocksStore.DAAScore(s.databaseContext, stagingArea, model.VirtualBlockHash) +} + +func (s *consensus) CreateBlockLocatorFromPruningPoint(highHash *externalapi.DomainHash, limit uint32) (externalapi.BlockLocator, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, highHash) + if err != nil { + return nil, err + } + + pruningPoint, err := s.pruningStore.PruningPoint(s.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + return s.syncManager.CreateBlockLocator(stagingArea, pruningPoint, highHash, limit) +} + +func (s *consensus) CreateFullHeadersSelectedChainBlockLocator() (externalapi.BlockLocator, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + lowHash, err := s.pruningStore.PruningPoint(s.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + highHash, err := s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + return s.syncManager.CreateHeadersSelectedChainBlockLocator(stagingArea, lowHash, highHash) +} + +func (s *consensus) CreateHeadersSelectedChainBlockLocator(lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.syncManager.CreateHeadersSelectedChainBlockLocator(stagingArea, lowHash, highHash) +} + +func (s *consensus) GetSyncInfo() (*externalapi.SyncInfo, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.syncManager.GetSyncInfo(stagingArea) +} + +func (s *consensus) IsValidPruningPoint(blockHash *externalapi.DomainHash) (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return false, err + } + + return s.pruningManager.IsValidPruningPoint(stagingArea, blockHash) +} + +func (s *consensus) ArePruningPointsViolatingFinality(pruningPoints []externalapi.BlockHeader) (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.pruningManager.ArePruningPointsViolatingFinality(stagingArea, pruningPoints) +} + +func (s *consensus) ImportPruningPoints(pruningPoints []externalapi.BlockHeader) error { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + err := s.consensusStateManager.ImportPruningPoints(stagingArea, pruningPoints) + if err != nil { + return err + } + + err = staging.CommitAllChanges(s.databaseContext, stagingArea) + if err != nil { + return err + } + + return nil +} + +func (s *consensus) GetVirtualSelectedParentChainFromBlock(blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return nil, err + } + + return s.consensusStateManager.GetVirtualSelectedParentChainFromBlock(stagingArea, blockHash) +} + +func (s *consensus) validateBlockHashExists(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + status, err := s.blockStatusStore.Get(s.databaseContext, stagingArea, blockHash) + if database.IsNotFoundError(err) { + return errors.Errorf("block %s does not exist", blockHash) + } + if err != nil { + return err + } + + if status == externalapi.StatusInvalid { + return errors.Errorf("block %s is invalid", blockHash) + } + return nil +} + +func (s *consensus) IsInSelectedParentChainOf(blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, blockHashA) + if err != nil { + return false, err + } + err = s.validateBlockHashExists(stagingArea, blockHashB) + if err != nil { + return false, err + } + + return s.dagTopologyManagers[0].IsInSelectedParentChainOf(stagingArea, blockHashA, blockHashB) +} + +func (s *consensus) GetHeadersSelectedTip() (*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + return s.headersSelectedTipStore.HeadersSelectedTip(s.databaseContext, stagingArea) +} + +func (s *consensus) Anticone(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + + err := s.validateBlockHashExists(stagingArea, blockHash) + if err != nil { + return nil, err + } + + tips, err := s.consensusStateStore.Tips(stagingArea, s.databaseContext) + if err != nil { + return nil, err + } + + return s.dagTraversalManager.AnticoneFromBlocks(stagingArea, tips, blockHash, 0) +} + +func (s *consensus) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.difficultyManager.EstimateNetworkHashesPerSecond(startHash, windowSize) +} + +func (s *consensus) PopulateMass(transaction *externalapi.DomainTransaction) { + s.transactionValidator.PopulateMass(transaction) +} + +func (s *consensus) ResolveVirtual(progressReportCallback func(uint64, uint64)) error { + virtualDAAScoreStart, err := s.GetVirtualDAAScore() + if err != nil { + return err + } + + for i := 0; ; i++ { + if i%10 == 0 && progressReportCallback != nil { + virtualDAAScore, err := s.GetVirtualDAAScore() + if err != nil { + return err + } + progressReportCallback(virtualDAAScoreStart, virtualDAAScore) + } + + _, isCompletelyResolved, err := s.resolveVirtualChunkWithLock(virtualResolveChunk) + if err != nil { + return err + } + if isCompletelyResolved { + break + } + } + + return nil +} + +func (s *consensus) resolveVirtualChunkWithLock(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.resolveVirtualChunkNoLock(maxBlocksToResolve) +} + +func (s *consensus) resolveVirtualChunkNoLock(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) { + virtualChangeSet, isCompletelyResolved, err := s.consensusStateManager.ResolveVirtual(maxBlocksToResolve) + if err != nil { + return nil, false, err + } + s.virtualNotUpdated = !isCompletelyResolved + + stagingArea := model.NewStagingArea() + err = s.pruningManager.UpdatePruningPointByVirtual(stagingArea) + if err != nil { + return nil, false, err + } + + err = staging.CommitAllChanges(s.databaseContext, stagingArea) + if err != nil { + return nil, false, err + } + + err = s.pruningManager.UpdatePruningPointIfRequired() + if err != nil { + return nil, false, err + } + + err = s.sendVirtualChangedEvent(virtualChangeSet, true) + if err != nil { + return nil, false, err + } + + return virtualChangeSet, isCompletelyResolved, nil +} + +func (s *consensus) BuildPruningPointProof() (*externalapi.PruningPointProof, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningProofManager.BuildPruningPointProof(model.NewStagingArea()) +} + +func (s *consensus) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { + s.lock.Lock() + defer s.lock.Unlock() + + log.Infof("Validating the pruning point proof") + err := s.pruningProofManager.ValidatePruningPointProof(pruningPointProof) + if err != nil { + return err + } + + log.Infof("Done validating the pruning point proof") + return nil +} + +func (s *consensus) ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { + s.lock.Lock() + defer s.lock.Unlock() + + log.Infof("Applying the pruning point proof") + err := s.pruningProofManager.ApplyPruningPointProof(pruningPointProof) + if err != nil { + return err + } + + log.Infof("Done applying the pruning point proof") + return nil +} + +func (s *consensus) BlockDAAWindowHashes(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + return s.dagTraversalManager.DAABlockWindow(stagingArea, blockHash) +} + +func (s *consensus) TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *externalapi.DomainHash, daaBlockWindowIndex uint64) (*externalapi.TrustedDataDataDAAHeader, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + header, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, daaBlockHash) + if err != nil { + return nil, err + } + + ghostdagData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, daaBlockHash, false) + isNotFoundError := database.IsNotFoundError(err) + if !isNotFoundError && err != nil { + return nil, err + } + + if !isNotFoundError { + return &externalapi.TrustedDataDataDAAHeader{ + Header: header, + GHOSTDAGData: ghostdagData, + }, nil + } + + ghostdagDataHashPair, err := s.blocksWithTrustedDataDAAWindowStore.DAAWindowBlock(s.databaseContext, stagingArea, trustedBlockHash, daaBlockWindowIndex) + if err != nil { + return nil, err + } + + return &externalapi.TrustedDataDataDAAHeader{ + Header: header, + GHOSTDAGData: ghostdagDataHashPair.GHOSTDAGData, + }, nil +} + +func (s *consensus) TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.pruningManager.TrustedBlockAssociatedGHOSTDAGDataBlockHashes(model.NewStagingArea(), blockHash) +} + +func (s *consensus) TrustedGHOSTDAGData(blockHash *externalapi.DomainHash) (*externalapi.BlockGHOSTDAGData, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + ghostdagData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, false) + isNotFoundError := database.IsNotFoundError(err) + if isNotFoundError || ghostdagData.SelectedParent().Equal(model.VirtualGenesisBlockHash) { + return s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, blockHash, true) + } + + return ghostdagData, nil +} + +func (s *consensus) IsChainBlock(blockHash *externalapi.DomainHash) (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return false, err + } + + return s.dagTopologyManagers[0].IsInSelectedParentChainOf(stagingArea, blockHash, virtualGHOSTDAGData.SelectedParent()) +} + +func (s *consensus) VirtualMergeDepthRoot() (*externalapi.DomainHash, error) { + s.lock.Lock() + defer s.lock.Unlock() + + stagingArea := model.NewStagingArea() + return s.mergeDepthManager.VirtualMergeDepthRoot(stagingArea) +} + +// IsNearlySynced returns whether this consensus is considered synced or close to being synced. This info +// is used to determine if it's ok to use a block template from this node for mining purposes. +func (s *consensus) IsNearlySynced() (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.isNearlySyncedNoLock() +} + +func (s *consensus) isNearlySyncedNoLock() (bool, error) { + stagingArea := model.NewStagingArea() + virtualGHOSTDAGData, err := s.ghostdagDataStores[0].Get(s.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return false, err + } + + if virtualGHOSTDAGData.SelectedParent().Equal(s.genesisHash) { + return false, nil + } + + virtualSelectedParentHeader, err := s.blockHeaderStore.BlockHeader(s.databaseContext, stagingArea, virtualGHOSTDAGData.SelectedParent()) + if err != nil { + return false, err + } + + now := mstime.Now().UnixMilliseconds() + // As a heuristic, we allow the node to mine if he is likely to be within the current DAA window of fully synced nodes. + // Such blocks contribute to security by maintaining the current difficulty despite possibly being slightly out of sync. + if now-virtualSelectedParentHeader.TimeInMilliseconds() < s.expectedDAAWindowDurationInMilliseconds { + log.Debugf("The selected tip timestamp is recent (%d), so IsNearlySynced returns true", + virtualSelectedParentHeader.TimeInMilliseconds()) + return true, nil + } + + log.Debugf("The selected tip timestamp is old (%d), so IsNearlySynced returns false", + virtualSelectedParentHeader.TimeInMilliseconds()) + return false, nil +} diff --git a/domain/consensus/consensus_test.go b/domain/consensus/consensus_test.go new file mode 100644 index 0000000..799727e --- /dev/null +++ b/domain/consensus/consensus_test.go @@ -0,0 +1,76 @@ +package consensus_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestConsensus_GetBlockInfo(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestConsensus_GetBlockInfo") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + invalidBlock, _, err := consensus.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + newHeader := invalidBlock.Header.ToMutable() + newHeader.SetTimeInMilliseconds(0) + invalidBlock.Header = newHeader.ToImmutable() + err = consensus.ValidateAndInsertBlock(invalidBlock, true) + if !errors.Is(err, ruleerrors.ErrTimeTooOld) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrTimeTooOld, err) + } + + info, err := consensus.GetBlockInfo(consensushashing.BlockHash(invalidBlock)) + if err != nil { + t.Fatalf("Failed to get block info: %v", err) + } + + if !info.Exists { + t.Fatal("The block is missing") + } + if info.BlockStatus != externalapi.StatusInvalid { + t.Fatalf("Expected block status: %s, instead got: %s", externalapi.StatusInvalid, info.BlockStatus) + } + + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + validBlock, err := consensus.BuildBlock(&emptyCoinbase, nil) + if err != nil { + t.Fatalf("consensus.BuildBlock with an empty coinbase shouldn't fail: %v", err) + } + + err = consensus.ValidateAndInsertBlock(validBlock, true) + if err != nil { + t.Fatalf("consensus.ValidateAndInsertBlock with a block straight from consensus.BuildBlock should not fail: %v", err) + } + + info, err = consensus.GetBlockInfo(consensushashing.BlockHash(validBlock)) + if err != nil { + t.Fatalf("Failed to get block info: %v", err) + } + + if !info.Exists { + t.Fatal("The block is missing") + } + if info.BlockStatus != externalapi.StatusUTXOValid { + t.Fatalf("Expected block status: %s, instead got: %s", externalapi.StatusUTXOValid, info.BlockStatus) + } + + }) +} diff --git a/domain/consensus/constructors.go b/domain/consensus/constructors.go new file mode 100644 index 0000000..beb27fb --- /dev/null +++ b/domain/consensus/constructors.go @@ -0,0 +1,27 @@ +package consensus + +import ( + "math/big" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// GHOSTDAGManagerConstructor is the function signature for a constructor of a type implementing model.GHOSTDAGManager +type GHOSTDAGManagerConstructor func( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + ghostdagDataStore model.GHOSTDAGDataStore, + headerStore model.BlockHeaderStore, + k externalapi.KType, + genesisHash *externalapi.DomainHash) model.GHOSTDAGManager + +// DifficultyManagerConstructor is the function signature for a constructor of a type implementing model.DifficultyManager +type DifficultyManagerConstructor func(model.DBReader, model.GHOSTDAGManager, model.GHOSTDAGDataStore, + model.BlockHeaderStore, model.DAABlocksStore, model.DAGTopologyManager, model.DAGTraversalManager, *big.Int, int, bool, time.Duration, + *externalapi.DomainHash, uint32) model.DifficultyManager + +// PastMedianTimeManagerConstructor is the function signature for a constructor of a type implementing model.PastMedianTimeManager +type PastMedianTimeManagerConstructor func(int, model.DBReader, model.DAGTraversalManager, model.BlockHeaderStore, + model.GHOSTDAGDataStore, *externalapi.DomainHash) model.PastMedianTimeManager diff --git a/domain/consensus/database/binaryserialization/common.go b/domain/consensus/database/binaryserialization/common.go new file mode 100644 index 0000000..af499ca --- /dev/null +++ b/domain/consensus/database/binaryserialization/common.go @@ -0,0 +1,24 @@ +package binaryserialization + +import ( + "encoding/binary" + "github.com/pkg/errors" +) + +const uint64Length = 8 + +// SerializeUint64 serializes a uint64 +func SerializeUint64(value uint64) []byte { + var keyBytes [uint64Length]byte + binary.LittleEndian.PutUint64(keyBytes[:], value) + return keyBytes[:] +} + +// DeserializeUint64 deserializes bytes to uint64 +func DeserializeUint64(valueBytes []byte) (uint64, error) { + if len(valueBytes) != uint64Length { + return 0, errors.Errorf("the given value is %d bytes so it cannot be deserialized into uint64", + len(valueBytes)) + } + return binary.LittleEndian.Uint64(valueBytes), nil +} diff --git a/domain/consensus/database/binaryserialization/hash.go b/domain/consensus/database/binaryserialization/hash.go new file mode 100644 index 0000000..b12f8a7 --- /dev/null +++ b/domain/consensus/database/binaryserialization/hash.go @@ -0,0 +1,47 @@ +package binaryserialization + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// SerializeHash serializes hash to a slice of bytes +func SerializeHash(hash *externalapi.DomainHash) []byte { + return hash.ByteSlice() +} + +// DeserializeHash deserializes a slice of bytes to a hash +func DeserializeHash(hashBytes []byte) (*externalapi.DomainHash, error) { + return externalapi.NewDomainHashFromByteSlice(hashBytes) +} + +// SerializeHashes serializes a slice of hashes to a slice of bytes +func SerializeHashes(hashes []*externalapi.DomainHash) []byte { + buff := make([]byte, len(hashes)*externalapi.DomainHashSize) + for i, hash := range hashes { + copy(buff[externalapi.DomainHashSize*i:], hash.ByteSlice()) + } + + return buff +} + +// DeserializeHashes deserializes a slice of bytes to a slice of hashes +func DeserializeHashes(hashesBytes []byte) ([]*externalapi.DomainHash, error) { + if len(hashesBytes)%externalapi.DomainHashSize != 0 { + return nil, errors.Errorf("The length of hashBytes is not divisible by externalapi.DomainHashSize (%d)", + externalapi.DomainHashSize) + } + + numHashes := len(hashesBytes) / externalapi.DomainHashSize + hashes := make([]*externalapi.DomainHash, numHashes) + for i := 0; i < numHashes; i++ { + var err error + start := i * externalapi.DomainHashSize + end := i*externalapi.DomainHashSize + externalapi.DomainHashSize + hashes[i], err = externalapi.NewDomainHashFromByteSlice(hashesBytes[start:end]) + if err != nil { + return nil, err + } + } + return hashes, nil +} diff --git a/domain/consensus/database/bucket.go b/domain/consensus/database/bucket.go new file mode 100644 index 0000000..2b1afdf --- /dev/null +++ b/domain/consensus/database/bucket.go @@ -0,0 +1,39 @@ +package database + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func dbBucketToDatabaseBucket(bucket model.DBBucket) *database.Bucket { + if bucket, ok := bucket.(dbBucket); ok { + return bucket.bucket + } + // This assumes that MakeBucket(src).Path() == src. which is not promised anywhere. + return database.MakeBucket(bucket.Path()) +} + +// MakeBucket creates a new Bucket using the given path of buckets. +func MakeBucket(path []byte) model.DBBucket { + return dbBucket{bucket: database.MakeBucket(path)} +} + +type dbBucket struct { + bucket *database.Bucket +} + +func (d dbBucket) Bucket(bucketBytes []byte) model.DBBucket { + return newDBBucket(d.bucket.Bucket(bucketBytes)) +} + +func (d dbBucket) Key(suffix []byte) model.DBKey { + return newDBKey(d.bucket.Key(suffix)) +} + +func (d dbBucket) Path() []byte { + return d.bucket.Path() +} + +func newDBBucket(bucket *database.Bucket) model.DBBucket { + return dbBucket{bucket: bucket} +} diff --git a/domain/consensus/database/cursor.go b/domain/consensus/database/cursor.go new file mode 100644 index 0000000..3d8a30d --- /dev/null +++ b/domain/consensus/database/cursor.go @@ -0,0 +1,70 @@ +package database + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type dbCursor struct { + cursor database.Cursor + isClosed bool +} + +func (d dbCursor) Next() bool { + if d.isClosed { + panic("Tried using a closed DBCursor") + } + + return d.cursor.Next() +} + +func (d dbCursor) First() bool { + if d.isClosed { + panic("Tried using a closed DBCursor") + } + return d.cursor.First() +} + +func (d dbCursor) Seek(key model.DBKey) error { + if d.isClosed { + return errors.New("Tried using a closed DBCursor") + } + return d.cursor.Seek(dbKeyToDatabaseKey(key)) +} + +func (d dbCursor) Key() (model.DBKey, error) { + if d.isClosed { + return nil, errors.New("Tried using a closed DBCursor") + } + key, err := d.cursor.Key() + if err != nil { + return nil, err + } + + return newDBKey(key), nil +} + +func (d dbCursor) Value() ([]byte, error) { + if d.isClosed { + return nil, errors.New("Tried using a closed DBCursor") + } + return d.cursor.Value() +} + +func (d dbCursor) Close() error { + if d.isClosed { + return errors.New("Tried using a closed DBCursor") + } + d.isClosed = true + err := d.cursor.Close() + if err != nil { + return err + } + d.cursor = nil + return nil +} + +func newDBCursor(cursor database.Cursor) model.DBCursor { + return &dbCursor{cursor: cursor} +} diff --git a/domain/consensus/database/dbmanager.go b/domain/consensus/database/dbmanager.go new file mode 100644 index 0000000..929e93e --- /dev/null +++ b/domain/consensus/database/dbmanager.go @@ -0,0 +1,48 @@ +package database + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type dbManager struct { + db database.Database +} + +func (dbw *dbManager) Get(key model.DBKey) ([]byte, error) { + return dbw.db.Get(dbKeyToDatabaseKey(key)) +} + +func (dbw *dbManager) Has(key model.DBKey) (bool, error) { + return dbw.db.Has(dbKeyToDatabaseKey(key)) +} + +func (dbw *dbManager) Put(key model.DBKey, value []byte) error { + return dbw.db.Put(dbKeyToDatabaseKey(key), value) +} + +func (dbw *dbManager) Delete(key model.DBKey) error { + return dbw.db.Delete(dbKeyToDatabaseKey(key)) +} + +func (dbw *dbManager) Cursor(bucket model.DBBucket) (model.DBCursor, error) { + cursor, err := dbw.db.Cursor(dbBucketToDatabaseBucket(bucket)) + if err != nil { + return nil, err + } + + return newDBCursor(cursor), nil +} + +func (dbw *dbManager) Begin() (model.DBTransaction, error) { + transaction, err := dbw.db.Begin() + if err != nil { + return nil, err + } + return newDBTransaction(transaction), nil +} + +// New returns wraps the given database as an instance of model.DBManager +func New(db database.Database) model.DBManager { + return &dbManager{db: db} +} diff --git a/domain/consensus/database/errors.go b/domain/consensus/database/errors.go new file mode 100644 index 0000000..bee37cd --- /dev/null +++ b/domain/consensus/database/errors.go @@ -0,0 +1,14 @@ +package database + +import ( + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +// ErrNotFound denotes that the requested item was not +// found in the database. +var ErrNotFound = database.ErrNotFound + +// IsNotFoundError checks whether an error is an ErrNotFound. +func IsNotFoundError(err error) bool { + return database.IsNotFoundError(err) +} diff --git a/domain/consensus/database/key.go b/domain/consensus/database/key.go new file mode 100644 index 0000000..777daff --- /dev/null +++ b/domain/consensus/database/key.go @@ -0,0 +1,36 @@ +package database + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func dbKeyToDatabaseKey(key model.DBKey) *database.Key { + if key, ok := key.(dbKey); ok { + return key.key + } + if key, ok := key.(*dbKey); ok { + return key.key + } + return dbBucketToDatabaseBucket(key.Bucket()).Key(key.Suffix()) +} + +type dbKey struct { + key *database.Key +} + +func (d dbKey) Bytes() []byte { + return d.key.Bytes() +} + +func (d dbKey) Bucket() model.DBBucket { + return newDBBucket(d.key.Bucket()) +} + +func (d dbKey) Suffix() []byte { + return d.key.Suffix() +} + +func newDBKey(key *database.Key) model.DBKey { + return dbKey{key: key} +} diff --git a/domain/consensus/database/serialization/README.md b/domain/consensus/database/serialization/README.md new file mode 100644 index 0000000..83685d4 --- /dev/null +++ b/domain/consensus/database/serialization/README.md @@ -0,0 +1,6 @@ +# Serialization + +1. Download and place in your PATH: https://github.com/protocolbuffers/protobuf/releases/download/v3.12.3/protoc-3.12.3-linux-x86_64.zip +2. `go get github.com/golang/protobuf/protoc-gen-go` +3. `go get google.golang.org/grpc/cmd/protoc-gen-go-grpc` +4. In the protowire directory: `go generate .` diff --git a/domain/consensus/database/serialization/acceptancedata.go b/domain/consensus/database/serialization/acceptancedata.go new file mode 100644 index 0000000..534c256 --- /dev/null +++ b/domain/consensus/database/serialization/acceptancedata.go @@ -0,0 +1,90 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DomainAcceptanceDataToDbAcceptanceData converts model.AcceptanceData to DbAcceptanceData +func DomainAcceptanceDataToDbAcceptanceData(domainAcceptanceData externalapi.AcceptanceData) *DbAcceptanceData { + dbBlockAcceptanceData := make([]*DbBlockAcceptanceData, len(domainAcceptanceData)) + for i, blockAcceptanceData := range domainAcceptanceData { + dbTransactionAcceptanceData := make([]*DbTransactionAcceptanceData, + len(blockAcceptanceData.TransactionAcceptanceData)) + + for j, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData { + dbTransaction := DomainTransactionToDbTransaction(transactionAcceptanceData.Transaction) + + dbTransactionInputUTXOEntries := make([]*DbUtxoEntry, len(transactionAcceptanceData.TransactionInputUTXOEntries)) + for k, transactionInputUTXOEntry := range transactionAcceptanceData.TransactionInputUTXOEntries { + dbTransactionInputUTXOEntries[k] = UTXOEntryToDBUTXOEntry(transactionInputUTXOEntry) + } + + dbTransactionAcceptanceData[j] = &DbTransactionAcceptanceData{ + Transaction: dbTransaction, + Fee: transactionAcceptanceData.Fee, + IsAccepted: transactionAcceptanceData.IsAccepted, + TransactionInputUtxoEntries: dbTransactionInputUTXOEntries, + } + } + + blockHash := DomainHashToDbHash(blockAcceptanceData.BlockHash) + + dbBlockAcceptanceData[i] = &DbBlockAcceptanceData{ + BlockHash: blockHash, + TransactionAcceptanceData: dbTransactionAcceptanceData, + } + } + + return &DbAcceptanceData{ + BlockAcceptanceData: dbBlockAcceptanceData, + } +} + +// DbAcceptanceDataToDomainAcceptanceData converts DbAcceptanceData to model.AcceptanceData +func DbAcceptanceDataToDomainAcceptanceData(dbAcceptanceData *DbAcceptanceData) (externalapi.AcceptanceData, error) { + domainAcceptanceData := make(externalapi.AcceptanceData, len(dbAcceptanceData.BlockAcceptanceData)) + for i, dbBlockAcceptanceData := range dbAcceptanceData.BlockAcceptanceData { + domainTransactionAcceptanceData := make([]*externalapi.TransactionAcceptanceData, + len(dbBlockAcceptanceData.TransactionAcceptanceData)) + + for j, dbTransactionAcceptanceData := range dbBlockAcceptanceData.TransactionAcceptanceData { + domainTransaction, err := DbTransactionToDomainTransaction(dbTransactionAcceptanceData.Transaction) + if err != nil { + return nil, err + } + + domainTransactionInputUTXOEntries := make([]externalapi.UTXOEntry, len(dbTransactionAcceptanceData.TransactionInputUtxoEntries)) + for k, transactionInputUTXOEntry := range dbTransactionAcceptanceData.TransactionInputUtxoEntries { + domainTransactionInputUTXOEntry, err := DBUTXOEntryToUTXOEntry(transactionInputUTXOEntry) + if err != nil { + return nil, err + } + domainTransactionInputUTXOEntries[k] = domainTransactionInputUTXOEntry + + // For consistency's sake, we fill up the transaction input's + // UTXOEntry field as well, since that's how the acceptanceData + // must have arrived when it was originally serialized + domainTransaction.Inputs[k].UTXOEntry = domainTransactionInputUTXOEntry + } + + domainTransactionAcceptanceData[j] = &externalapi.TransactionAcceptanceData{ + Transaction: domainTransaction, + Fee: dbTransactionAcceptanceData.Fee, + IsAccepted: dbTransactionAcceptanceData.IsAccepted, + TransactionInputUTXOEntries: domainTransactionInputUTXOEntries, + } + } + + blockHash, err := DbHashToDomainHash(dbBlockAcceptanceData.BlockHash) + if err != nil { + return nil, err + } + + domainAcceptanceData[i] = &externalapi.BlockAcceptanceData{ + BlockHash: blockHash, + TransactionAcceptanceData: domainTransactionAcceptanceData, + } + } + + return domainAcceptanceData, nil +} diff --git a/domain/consensus/database/serialization/block.go b/domain/consensus/database/serialization/block.go new file mode 100644 index 0000000..1af4434 --- /dev/null +++ b/domain/consensus/database/serialization/block.go @@ -0,0 +1,40 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DomainBlockToDbBlock converts DomainBlocks to DbBlock +func DomainBlockToDbBlock(domainBlock *externalapi.DomainBlock) *DbBlock { + dbTransactions := make([]*DbTransaction, len(domainBlock.Transactions)) + for i, domainTransaction := range domainBlock.Transactions { + dbTransactions[i] = DomainTransactionToDbTransaction(domainTransaction) + } + + return &DbBlock{ + Header: DomainBlockHeaderToDbBlockHeader(domainBlock.Header), + Transactions: dbTransactions, + } +} + +// DbBlockToDomainBlock converts DbBlock to DomainBlock +func DbBlockToDomainBlock(dbBlock *DbBlock) (*externalapi.DomainBlock, error) { + domainBlockHeader, err := DbBlockHeaderToDomainBlockHeader(dbBlock.Header) + if err != nil { + return nil, err + } + + domainTransactions := make([]*externalapi.DomainTransaction, len(dbBlock.Transactions)) + for i, dbTransaction := range dbBlock.Transactions { + var err error + domainTransactions[i], err = DbTransactionToDomainTransaction(dbTransaction) + if err != nil { + return nil, err + } + } + + return &externalapi.DomainBlock{ + Header: domainBlockHeader, + Transactions: domainTransactions, + }, nil +} diff --git a/domain/consensus/database/serialization/block_ghostdag_data.go b/domain/consensus/database/serialization/block_ghostdag_data.go new file mode 100644 index 0000000..4e2e459 --- /dev/null +++ b/domain/consensus/database/serialization/block_ghostdag_data.go @@ -0,0 +1,60 @@ +package serialization + +import ( + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// BlockGHOSTDAGDataToDBBlockGHOSTDAGData converts BlockGHOSTDAGData to DbBlockGhostdagData +func BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData *externalapi.BlockGHOSTDAGData) *DbBlockGhostdagData { + var selectedParent *DbHash + if blockGHOSTDAGData.SelectedParent() != nil { + selectedParent = DomainHashToDbHash(blockGHOSTDAGData.SelectedParent()) + } + + return &DbBlockGhostdagData{ + BlueScore: blockGHOSTDAGData.BlueScore(), + BlueWork: blockGHOSTDAGData.BlueWork().Bytes(), + SelectedParent: selectedParent, + MergeSetBlues: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetBlues()), + MergeSetReds: DomainHashesToDbHashes(blockGHOSTDAGData.MergeSetReds()), + BluesAnticoneSizes: bluesAnticoneSizesToDBBluesAnticoneSizes(blockGHOSTDAGData.BluesAnticoneSizes()), + } +} + +// DBBlockGHOSTDAGDataToBlockGHOSTDAGData converts DbBlockGhostdagData to BlockGHOSTDAGData +func DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData *DbBlockGhostdagData) (*externalapi.BlockGHOSTDAGData, error) { + var selectedParent *externalapi.DomainHash + if dbBlockGHOSTDAGData.SelectedParent != nil { + var err error + selectedParent, err = DbHashToDomainHash(dbBlockGHOSTDAGData.SelectedParent) + if err != nil { + return nil, err + } + } + + mergetSetBlues, err := DbHashesToDomainHashes(dbBlockGHOSTDAGData.MergeSetBlues) + if err != nil { + return nil, err + } + + mergetSetReds, err := DbHashesToDomainHashes(dbBlockGHOSTDAGData.MergeSetReds) + if err != nil { + return nil, err + } + + bluesAnticoneSizes, err := dbBluesAnticoneSizesToBluesAnticoneSizes(dbBlockGHOSTDAGData.BluesAnticoneSizes) + if err != nil { + return nil, err + } + + return externalapi.NewBlockGHOSTDAGData( + dbBlockGHOSTDAGData.BlueScore, + new(big.Int).SetBytes(dbBlockGHOSTDAGData.BlueWork), + selectedParent, + mergetSetBlues, + mergetSetReds, + bluesAnticoneSizes, + ), nil +} diff --git a/domain/consensus/database/serialization/blockheader.go b/domain/consensus/database/serialization/blockheader.go new file mode 100644 index 0000000..456eeb5 --- /dev/null +++ b/domain/consensus/database/serialization/blockheader.go @@ -0,0 +1,71 @@ +package serialization + +import ( + "math" + "math/big" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" +) + +// DomainBlockHeaderToDbBlockHeader converts BlockHeader to DbBlockHeader +func DomainBlockHeaderToDbBlockHeader(domainBlockHeader externalapi.BlockHeader) *DbBlockHeader { + return &DbBlockHeader{ + Version: uint32(domainBlockHeader.Version()), + Parents: DomainParentsToDbParents(domainBlockHeader.Parents()), + HashMerkleRoot: DomainHashToDbHash(domainBlockHeader.HashMerkleRoot()), + AcceptedIDMerkleRoot: DomainHashToDbHash(domainBlockHeader.AcceptedIDMerkleRoot()), + UtxoCommitment: DomainHashToDbHash(domainBlockHeader.UTXOCommitment()), + TimeInMilliseconds: domainBlockHeader.TimeInMilliseconds(), + Bits: domainBlockHeader.Bits(), + Nonce: domainBlockHeader.Nonce(), + DaaScore: domainBlockHeader.DAAScore(), + BlueScore: domainBlockHeader.BlueScore(), + BlueWork: domainBlockHeader.BlueWork().Bytes(), + PruningPoint: DomainHashToDbHash(domainBlockHeader.PruningPoint()), + } +} + +// DbBlockHeaderToDomainBlockHeader converts DbBlockHeader to BlockHeader +func DbBlockHeaderToDomainBlockHeader(dbBlockHeader *DbBlockHeader) (externalapi.BlockHeader, error) { + parents, err := DbParentsToDomainParents(dbBlockHeader.Parents) + if err != nil { + return nil, err + } + hashMerkleRoot, err := DbHashToDomainHash(dbBlockHeader.HashMerkleRoot) + if err != nil { + return nil, err + } + acceptedIDMerkleRoot, err := DbHashToDomainHash(dbBlockHeader.AcceptedIDMerkleRoot) + if err != nil { + return nil, err + } + utxoCommitment, err := DbHashToDomainHash(dbBlockHeader.UtxoCommitment) + if err != nil { + return nil, err + } + if dbBlockHeader.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid header version - bigger then uint16") + } + + pruningPoint, err := DbHashToDomainHash(dbBlockHeader.PruningPoint) + if err != nil { + return nil, err + } + + return blockheader.NewImmutableBlockHeader( + uint16(dbBlockHeader.Version), + parents, + hashMerkleRoot, + acceptedIDMerkleRoot, + utxoCommitment, + dbBlockHeader.TimeInMilliseconds, + dbBlockHeader.Bits, + dbBlockHeader.Nonce, + dbBlockHeader.DaaScore, + dbBlockHeader.BlueScore, + new(big.Int).SetBytes(dbBlockHeader.BlueWork), + pruningPoint, + ), nil +} diff --git a/domain/consensus/database/serialization/blocklevelparents.go b/domain/consensus/database/serialization/blocklevelparents.go new file mode 100644 index 0000000..e954174 --- /dev/null +++ b/domain/consensus/database/serialization/blocklevelparents.go @@ -0,0 +1,49 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DbBlockLevelParentsToDomainBlockLevelParents converts a DbBlockLevelParents to a BlockLevelParents +func DbBlockLevelParentsToDomainBlockLevelParents(dbBlockLevelParents *DbBlockLevelParents) (externalapi.BlockLevelParents, error) { + domainBlockLevelParents := make(externalapi.BlockLevelParents, len(dbBlockLevelParents.ParentHashes)) + for i, parentHash := range dbBlockLevelParents.ParentHashes { + var err error + domainBlockLevelParents[i], err = externalapi.NewDomainHashFromByteSlice(parentHash.Hash) + if err != nil { + return nil, err + } + } + return domainBlockLevelParents, nil +} + +// DomainBlockLevelParentsToDbBlockLevelParents converts a BlockLevelParents to a DbBlockLevelParents +func DomainBlockLevelParentsToDbBlockLevelParents(domainBlockLevelParents externalapi.BlockLevelParents) *DbBlockLevelParents { + parentHashes := make([]*DbHash, len(domainBlockLevelParents)) + for i, parentHash := range domainBlockLevelParents { + parentHashes[i] = &DbHash{Hash: parentHash.ByteSlice()} + } + return &DbBlockLevelParents{ParentHashes: parentHashes} +} + +// DomainParentsToDbParents converts a slice of BlockLevelParents to a slice of DbBlockLevelParents +func DomainParentsToDbParents(domainParents []externalapi.BlockLevelParents) []*DbBlockLevelParents { + dbParents := make([]*DbBlockLevelParents, len(domainParents)) + for i, domainBlockLevelParents := range domainParents { + dbParents[i] = DomainBlockLevelParentsToDbBlockLevelParents(domainBlockLevelParents) + } + return dbParents +} + +// DbParentsToDomainParents converts a slice of DbBlockLevelParents to a slice of BlockLevelParents +func DbParentsToDomainParents(dbParents []*DbBlockLevelParents) ([]externalapi.BlockLevelParents, error) { + domainParents := make([]externalapi.BlockLevelParents, len(dbParents)) + for i, domainBlockLevelParents := range dbParents { + var err error + domainParents[i], err = DbBlockLevelParentsToDomainBlockLevelParents(domainBlockLevelParents) + if err != nil { + return nil, err + } + } + return domainParents, nil +} diff --git a/domain/consensus/database/serialization/blockrelations.go b/domain/consensus/database/serialization/blockrelations.go new file mode 100644 index 0000000..c7c72b8 --- /dev/null +++ b/domain/consensus/database/serialization/blockrelations.go @@ -0,0 +1,30 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" +) + +// DomainBlockRelationsToDbBlockRelations converts model.BlockRelations to DbBlockRelations +func DomainBlockRelationsToDbBlockRelations(domainBlockRelations *model.BlockRelations) *DbBlockRelations { + return &DbBlockRelations{ + Parents: DomainHashesToDbHashes(domainBlockRelations.Parents), + Children: DomainHashesToDbHashes(domainBlockRelations.Children), + } +} + +// DbBlockRelationsToDomainBlockRelations converts DbBlockRelations to model.BlockRelations +func DbBlockRelationsToDomainBlockRelations(dbBlockRelations *DbBlockRelations) (*model.BlockRelations, error) { + domainParentHashes, err := DbHashesToDomainHashes(dbBlockRelations.Parents) + if err != nil { + return nil, err + } + domainChildHashes, err := DbHashesToDomainHashes(dbBlockRelations.Children) + if err != nil { + return nil, err + } + + return &model.BlockRelations{ + Parents: domainParentHashes, + Children: domainChildHashes, + }, nil +} diff --git a/domain/consensus/database/serialization/blockstatus.go b/domain/consensus/database/serialization/blockstatus.go new file mode 100644 index 0000000..db97cac --- /dev/null +++ b/domain/consensus/database/serialization/blockstatus.go @@ -0,0 +1,17 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DomainBlockStatusToDbBlockStatus converts model.BlockStatus to DbBlockStatus +func DomainBlockStatusToDbBlockStatus(domainBlockStatus externalapi.BlockStatus) *DbBlockStatus { + return &DbBlockStatus{ + Status: uint32(domainBlockStatus), + } +} + +// DbBlockStatusToDomainBlockStatus converts DbBlockStatus to model.BlockStatus +func DbBlockStatusToDomainBlockStatus(dbBlockStatus *DbBlockStatus) externalapi.BlockStatus { + return externalapi.BlockStatus(dbBlockStatus.Status) +} diff --git a/domain/consensus/database/serialization/blues_anticone_sizes.go b/domain/consensus/database/serialization/blues_anticone_sizes.go new file mode 100644 index 0000000..10a457a --- /dev/null +++ b/domain/consensus/database/serialization/blues_anticone_sizes.go @@ -0,0 +1,47 @@ +package serialization + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func bluesAnticoneSizesToDBBluesAnticoneSizes(bluesAnticoneSizes map[externalapi.DomainHash]externalapi.KType) []*DbBluesAnticoneSizes { + dbBluesAnticoneSizes := make([]*DbBluesAnticoneSizes, len(bluesAnticoneSizes)) + i := 0 + for hash, anticoneSize := range bluesAnticoneSizes { + hashCopy := hash + dbBluesAnticoneSizes[i] = &DbBluesAnticoneSizes{ + BlueHash: DomainHashToDbHash(&hashCopy), + AnticoneSize: uint32(anticoneSize), + } + i++ + } + + return dbBluesAnticoneSizes +} + +func dbBluesAnticoneSizesToBluesAnticoneSizes(dbBluesAnticoneSizes []*DbBluesAnticoneSizes) (map[externalapi.DomainHash]externalapi.KType, error) { + bluesAnticoneSizes := make(map[externalapi.DomainHash]externalapi.KType, len(dbBluesAnticoneSizes)) + + for _, data := range dbBluesAnticoneSizes { + hash, err := DbHashToDomainHash(data.BlueHash) + if err != nil { + return nil, err + } + + bluesAnticoneSizes[*hash], err = uint32ToKType(data.AnticoneSize) + if err != nil { + return nil, err + } + } + + return bluesAnticoneSizes, nil +} + +func uint32ToKType(n uint32) (externalapi.KType, error) { + convertedN := externalapi.KType(n) + if uint32(convertedN) != n { + return 0, errors.Errorf("cannot convert %d to KType without losing data", n) + } + return convertedN, nil +} diff --git a/domain/consensus/database/serialization/blues_anticone_sizes_test.go b/domain/consensus/database/serialization/blues_anticone_sizes_test.go new file mode 100644 index 0000000..6b14ac6 --- /dev/null +++ b/domain/consensus/database/serialization/blues_anticone_sizes_test.go @@ -0,0 +1,28 @@ +package serialization + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestBlueAnticoneSizesSize tests that no data can be loss when converting externalapi.KType to the corresponding type in +// DbBluesAnticoneSizes +func TestKType(t *testing.T) { + k := externalapi.KType(0) + k-- + + if k < externalapi.KType(0) { + t.Fatalf("KType must be unsigned") + } + + // Setting maxKType to maximum value of KType. + // As we verify above that KType is unsigned we can be sure that maxKType is indeed the maximum value of KType. + maxKType := ^externalapi.KType(0) + dbBluesAnticoneSizes := DbBluesAnticoneSizes{ + AnticoneSize: uint32(maxKType), + } + if externalapi.KType(dbBluesAnticoneSizes.AnticoneSize) != maxKType { + t.Fatalf("convert from uint32 to KType losses data") + } +} diff --git a/domain/consensus/database/serialization/daa_block.go b/domain/consensus/database/serialization/daa_block.go new file mode 100644 index 0000000..663cfc5 --- /dev/null +++ b/domain/consensus/database/serialization/daa_block.go @@ -0,0 +1,31 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// BlockGHOSTDAGDataHashPairToDbBlockGhostdagDataHashPair converts *externalapi.BlockGHOSTDAGDataHashPair to *DbBlockGHOSTDAGDataHashPair +func BlockGHOSTDAGDataHashPairToDbBlockGhostdagDataHashPair(pair *externalapi.BlockGHOSTDAGDataHashPair) *DbBlockGHOSTDAGDataHashPair { + return &DbBlockGHOSTDAGDataHashPair{ + Hash: DomainHashToDbHash(pair.Hash), + GhostdagData: BlockGHOSTDAGDataToDBBlockGHOSTDAGData(pair.GHOSTDAGData), + } +} + +// DbBlockGHOSTDAGDataHashPairToBlockGHOSTDAGDataHashPair converts *DbBlockGHOSTDAGDataHashPair to *externalapi.BlockGHOSTDAGDataHashPair +func DbBlockGHOSTDAGDataHashPairToBlockGHOSTDAGDataHashPair(dbPair *DbBlockGHOSTDAGDataHashPair) (*externalapi.BlockGHOSTDAGDataHashPair, error) { + hash, err := DbHashToDomainHash(dbPair.Hash) + if err != nil { + return nil, err + } + + ghostdagData, err := DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbPair.GhostdagData) + if err != nil { + return nil, err + } + + return &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: hash, + GHOSTDAGData: ghostdagData, + }, nil +} diff --git a/domain/consensus/database/serialization/dbobjects.pb.go b/domain/consensus/database/serialization/dbobjects.pb.go new file mode 100644 index 0000000..0cd13d9 --- /dev/null +++ b/domain/consensus/database/serialization/dbobjects.pb.go @@ -0,0 +1,2473 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: dbobjects.proto + +package serialization + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DbBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *DbBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Transactions []*DbTransaction `protobuf:"bytes,2,rep,name=transactions,proto3" json:"transactions,omitempty"` +} + +func (x *DbBlock) Reset() { + *x = DbBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlock) ProtoMessage() {} + +func (x *DbBlock) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlock.ProtoReflect.Descriptor instead. +func (*DbBlock) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{0} +} + +func (x *DbBlock) GetHeader() *DbBlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *DbBlock) GetTransactions() []*DbTransaction { + if x != nil { + return x.Transactions + } + return nil +} + +type DbBlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Parents []*DbBlockLevelParents `protobuf:"bytes,2,rep,name=parents,proto3" json:"parents,omitempty"` + HashMerkleRoot *DbHash `protobuf:"bytes,3,opt,name=hashMerkleRoot,proto3" json:"hashMerkleRoot,omitempty"` + AcceptedIDMerkleRoot *DbHash `protobuf:"bytes,4,opt,name=acceptedIDMerkleRoot,proto3" json:"acceptedIDMerkleRoot,omitempty"` + UtxoCommitment *DbHash `protobuf:"bytes,5,opt,name=utxoCommitment,proto3" json:"utxoCommitment,omitempty"` + TimeInMilliseconds int64 `protobuf:"varint,6,opt,name=timeInMilliseconds,proto3" json:"timeInMilliseconds,omitempty"` + Bits uint32 `protobuf:"varint,7,opt,name=bits,proto3" json:"bits,omitempty"` + Nonce uint64 `protobuf:"varint,8,opt,name=nonce,proto3" json:"nonce,omitempty"` + DaaScore uint64 `protobuf:"varint,9,opt,name=daaScore,proto3" json:"daaScore,omitempty"` + BlueWork []byte `protobuf:"bytes,10,opt,name=blueWork,proto3" json:"blueWork,omitempty"` + PruningPoint *DbHash `protobuf:"bytes,12,opt,name=pruningPoint,proto3" json:"pruningPoint,omitempty"` + BlueScore uint64 `protobuf:"varint,13,opt,name=blueScore,proto3" json:"blueScore,omitempty"` +} + +func (x *DbBlockHeader) Reset() { + *x = DbBlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockHeader) ProtoMessage() {} + +func (x *DbBlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockHeader.ProtoReflect.Descriptor instead. +func (*DbBlockHeader) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{1} +} + +func (x *DbBlockHeader) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *DbBlockHeader) GetParents() []*DbBlockLevelParents { + if x != nil { + return x.Parents + } + return nil +} + +func (x *DbBlockHeader) GetHashMerkleRoot() *DbHash { + if x != nil { + return x.HashMerkleRoot + } + return nil +} + +func (x *DbBlockHeader) GetAcceptedIDMerkleRoot() *DbHash { + if x != nil { + return x.AcceptedIDMerkleRoot + } + return nil +} + +func (x *DbBlockHeader) GetUtxoCommitment() *DbHash { + if x != nil { + return x.UtxoCommitment + } + return nil +} + +func (x *DbBlockHeader) GetTimeInMilliseconds() int64 { + if x != nil { + return x.TimeInMilliseconds + } + return 0 +} + +func (x *DbBlockHeader) GetBits() uint32 { + if x != nil { + return x.Bits + } + return 0 +} + +func (x *DbBlockHeader) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +func (x *DbBlockHeader) GetDaaScore() uint64 { + if x != nil { + return x.DaaScore + } + return 0 +} + +func (x *DbBlockHeader) GetBlueWork() []byte { + if x != nil { + return x.BlueWork + } + return nil +} + +func (x *DbBlockHeader) GetPruningPoint() *DbHash { + if x != nil { + return x.PruningPoint + } + return nil +} + +func (x *DbBlockHeader) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +type DbBlockLevelParents struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentHashes []*DbHash `protobuf:"bytes,1,rep,name=parentHashes,proto3" json:"parentHashes,omitempty"` +} + +func (x *DbBlockLevelParents) Reset() { + *x = DbBlockLevelParents{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockLevelParents) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockLevelParents) ProtoMessage() {} + +func (x *DbBlockLevelParents) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockLevelParents.ProtoReflect.Descriptor instead. +func (*DbBlockLevelParents) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{2} +} + +func (x *DbBlockLevelParents) GetParentHashes() []*DbHash { + if x != nil { + return x.ParentHashes + } + return nil +} + +type DbHash struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (x *DbHash) Reset() { + *x = DbHash{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbHash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbHash) ProtoMessage() {} + +func (x *DbHash) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbHash.ProtoReflect.Descriptor instead. +func (*DbHash) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{3} +} + +func (x *DbHash) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +type DbTransaction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Inputs []*DbTransactionInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*DbTransactionOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + LockTime uint64 `protobuf:"varint,4,opt,name=lockTime,proto3" json:"lockTime,omitempty"` + SubnetworkID *DbSubnetworkId `protobuf:"bytes,5,opt,name=subnetworkID,proto3" json:"subnetworkID,omitempty"` + Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"` + Payload []byte `protobuf:"bytes,8,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *DbTransaction) Reset() { + *x = DbTransaction{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTransaction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTransaction) ProtoMessage() {} + +func (x *DbTransaction) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTransaction.ProtoReflect.Descriptor instead. +func (*DbTransaction) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{4} +} + +func (x *DbTransaction) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *DbTransaction) GetInputs() []*DbTransactionInput { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *DbTransaction) GetOutputs() []*DbTransactionOutput { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *DbTransaction) GetLockTime() uint64 { + if x != nil { + return x.LockTime + } + return 0 +} + +func (x *DbTransaction) GetSubnetworkID() *DbSubnetworkId { + if x != nil { + return x.SubnetworkID + } + return nil +} + +func (x *DbTransaction) GetGas() uint64 { + if x != nil { + return x.Gas + } + return 0 +} + +func (x *DbTransaction) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type DbTransactionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousOutpoint *DbOutpoint `protobuf:"bytes,1,opt,name=previousOutpoint,proto3" json:"previousOutpoint,omitempty"` + SignatureScript []byte `protobuf:"bytes,2,opt,name=signatureScript,proto3" json:"signatureScript,omitempty"` + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` + SigOpCount uint32 `protobuf:"varint,4,opt,name=sigOpCount,proto3" json:"sigOpCount,omitempty"` +} + +func (x *DbTransactionInput) Reset() { + *x = DbTransactionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTransactionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTransactionInput) ProtoMessage() {} + +func (x *DbTransactionInput) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTransactionInput.ProtoReflect.Descriptor instead. +func (*DbTransactionInput) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{5} +} + +func (x *DbTransactionInput) GetPreviousOutpoint() *DbOutpoint { + if x != nil { + return x.PreviousOutpoint + } + return nil +} + +func (x *DbTransactionInput) GetSignatureScript() []byte { + if x != nil { + return x.SignatureScript + } + return nil +} + +func (x *DbTransactionInput) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *DbTransactionInput) GetSigOpCount() uint32 { + if x != nil { + return x.SigOpCount + } + return 0 +} + +type DbOutpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionID *DbTransactionId `protobuf:"bytes,1,opt,name=transactionID,proto3" json:"transactionID,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *DbOutpoint) Reset() { + *x = DbOutpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbOutpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbOutpoint) ProtoMessage() {} + +func (x *DbOutpoint) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbOutpoint.ProtoReflect.Descriptor instead. +func (*DbOutpoint) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{6} +} + +func (x *DbOutpoint) GetTransactionID() *DbTransactionId { + if x != nil { + return x.TransactionID + } + return nil +} + +func (x *DbOutpoint) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +type DbTransactionId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId []byte `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` +} + +func (x *DbTransactionId) Reset() { + *x = DbTransactionId{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTransactionId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTransactionId) ProtoMessage() {} + +func (x *DbTransactionId) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTransactionId.ProtoReflect.Descriptor instead. +func (*DbTransactionId) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{7} +} + +func (x *DbTransactionId) GetTransactionId() []byte { + if x != nil { + return x.TransactionId + } + return nil +} + +type DbTransactionOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + ScriptPublicKey *DbScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` +} + +func (x *DbTransactionOutput) Reset() { + *x = DbTransactionOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTransactionOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTransactionOutput) ProtoMessage() {} + +func (x *DbTransactionOutput) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTransactionOutput.ProtoReflect.Descriptor instead. +func (*DbTransactionOutput) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{8} +} + +func (x *DbTransactionOutput) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *DbTransactionOutput) GetScriptPublicKey() *DbScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +type DbSubnetworkId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubnetworkId []byte `protobuf:"bytes,1,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` +} + +func (x *DbSubnetworkId) Reset() { + *x = DbSubnetworkId{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbSubnetworkId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbSubnetworkId) ProtoMessage() {} + +func (x *DbSubnetworkId) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbSubnetworkId.ProtoReflect.Descriptor instead. +func (*DbSubnetworkId) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{9} +} + +func (x *DbSubnetworkId) GetSubnetworkId() []byte { + if x != nil { + return x.SubnetworkId + } + return nil +} + +type DbAcceptanceData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockAcceptanceData []*DbBlockAcceptanceData `protobuf:"bytes,1,rep,name=blockAcceptanceData,proto3" json:"blockAcceptanceData,omitempty"` +} + +func (x *DbAcceptanceData) Reset() { + *x = DbAcceptanceData{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbAcceptanceData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbAcceptanceData) ProtoMessage() {} + +func (x *DbAcceptanceData) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbAcceptanceData.ProtoReflect.Descriptor instead. +func (*DbAcceptanceData) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{10} +} + +func (x *DbAcceptanceData) GetBlockAcceptanceData() []*DbBlockAcceptanceData { + if x != nil { + return x.BlockAcceptanceData + } + return nil +} + +type DbBlockAcceptanceData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionAcceptanceData []*DbTransactionAcceptanceData `protobuf:"bytes,1,rep,name=transactionAcceptanceData,proto3" json:"transactionAcceptanceData,omitempty"` + BlockHash *DbHash `protobuf:"bytes,2,opt,name=blockHash,proto3" json:"blockHash,omitempty"` +} + +func (x *DbBlockAcceptanceData) Reset() { + *x = DbBlockAcceptanceData{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockAcceptanceData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockAcceptanceData) ProtoMessage() {} + +func (x *DbBlockAcceptanceData) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockAcceptanceData.ProtoReflect.Descriptor instead. +func (*DbBlockAcceptanceData) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{11} +} + +func (x *DbBlockAcceptanceData) GetTransactionAcceptanceData() []*DbTransactionAcceptanceData { + if x != nil { + return x.TransactionAcceptanceData + } + return nil +} + +func (x *DbBlockAcceptanceData) GetBlockHash() *DbHash { + if x != nil { + return x.BlockHash + } + return nil +} + +type DbTransactionAcceptanceData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Transaction *DbTransaction `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` + Fee uint64 `protobuf:"varint,2,opt,name=fee,proto3" json:"fee,omitempty"` + IsAccepted bool `protobuf:"varint,3,opt,name=isAccepted,proto3" json:"isAccepted,omitempty"` + TransactionInputUtxoEntries []*DbUtxoEntry `protobuf:"bytes,4,rep,name=transactionInputUtxoEntries,proto3" json:"transactionInputUtxoEntries,omitempty"` +} + +func (x *DbTransactionAcceptanceData) Reset() { + *x = DbTransactionAcceptanceData{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTransactionAcceptanceData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTransactionAcceptanceData) ProtoMessage() {} + +func (x *DbTransactionAcceptanceData) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTransactionAcceptanceData.ProtoReflect.Descriptor instead. +func (*DbTransactionAcceptanceData) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{12} +} + +func (x *DbTransactionAcceptanceData) GetTransaction() *DbTransaction { + if x != nil { + return x.Transaction + } + return nil +} + +func (x *DbTransactionAcceptanceData) GetFee() uint64 { + if x != nil { + return x.Fee + } + return 0 +} + +func (x *DbTransactionAcceptanceData) GetIsAccepted() bool { + if x != nil { + return x.IsAccepted + } + return false +} + +func (x *DbTransactionAcceptanceData) GetTransactionInputUtxoEntries() []*DbUtxoEntry { + if x != nil { + return x.TransactionInputUtxoEntries + } + return nil +} + +type DbBlockRelations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Parents []*DbHash `protobuf:"bytes,1,rep,name=parents,proto3" json:"parents,omitempty"` + Children []*DbHash `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"` +} + +func (x *DbBlockRelations) Reset() { + *x = DbBlockRelations{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockRelations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockRelations) ProtoMessage() {} + +func (x *DbBlockRelations) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockRelations.ProtoReflect.Descriptor instead. +func (*DbBlockRelations) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{13} +} + +func (x *DbBlockRelations) GetParents() []*DbHash { + if x != nil { + return x.Parents + } + return nil +} + +func (x *DbBlockRelations) GetChildren() []*DbHash { + if x != nil { + return x.Children + } + return nil +} + +type DbBlockStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status uint32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *DbBlockStatus) Reset() { + *x = DbBlockStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockStatus) ProtoMessage() {} + +func (x *DbBlockStatus) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockStatus.ProtoReflect.Descriptor instead. +func (*DbBlockStatus) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{14} +} + +func (x *DbBlockStatus) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +type DbBlockGhostdagData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlueScore uint64 `protobuf:"varint,1,opt,name=blueScore,proto3" json:"blueScore,omitempty"` + BlueWork []byte `protobuf:"bytes,2,opt,name=blueWork,proto3" json:"blueWork,omitempty"` + SelectedParent *DbHash `protobuf:"bytes,3,opt,name=selectedParent,proto3" json:"selectedParent,omitempty"` + MergeSetBlues []*DbHash `protobuf:"bytes,4,rep,name=mergeSetBlues,proto3" json:"mergeSetBlues,omitempty"` + MergeSetReds []*DbHash `protobuf:"bytes,5,rep,name=mergeSetReds,proto3" json:"mergeSetReds,omitempty"` + BluesAnticoneSizes []*DbBluesAnticoneSizes `protobuf:"bytes,6,rep,name=bluesAnticoneSizes,proto3" json:"bluesAnticoneSizes,omitempty"` +} + +func (x *DbBlockGhostdagData) Reset() { + *x = DbBlockGhostdagData{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockGhostdagData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockGhostdagData) ProtoMessage() {} + +func (x *DbBlockGhostdagData) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockGhostdagData.ProtoReflect.Descriptor instead. +func (*DbBlockGhostdagData) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{15} +} + +func (x *DbBlockGhostdagData) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +func (x *DbBlockGhostdagData) GetBlueWork() []byte { + if x != nil { + return x.BlueWork + } + return nil +} + +func (x *DbBlockGhostdagData) GetSelectedParent() *DbHash { + if x != nil { + return x.SelectedParent + } + return nil +} + +func (x *DbBlockGhostdagData) GetMergeSetBlues() []*DbHash { + if x != nil { + return x.MergeSetBlues + } + return nil +} + +func (x *DbBlockGhostdagData) GetMergeSetReds() []*DbHash { + if x != nil { + return x.MergeSetReds + } + return nil +} + +func (x *DbBlockGhostdagData) GetBluesAnticoneSizes() []*DbBluesAnticoneSizes { + if x != nil { + return x.BluesAnticoneSizes + } + return nil +} + +type DbBluesAnticoneSizes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlueHash *DbHash `protobuf:"bytes,1,opt,name=blueHash,proto3" json:"blueHash,omitempty"` + AnticoneSize uint32 `protobuf:"varint,2,opt,name=anticoneSize,proto3" json:"anticoneSize,omitempty"` +} + +func (x *DbBluesAnticoneSizes) Reset() { + *x = DbBluesAnticoneSizes{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBluesAnticoneSizes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBluesAnticoneSizes) ProtoMessage() {} + +func (x *DbBluesAnticoneSizes) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBluesAnticoneSizes.ProtoReflect.Descriptor instead. +func (*DbBluesAnticoneSizes) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{16} +} + +func (x *DbBluesAnticoneSizes) GetBlueHash() *DbHash { + if x != nil { + return x.BlueHash + } + return nil +} + +func (x *DbBluesAnticoneSizes) GetAnticoneSize() uint32 { + if x != nil { + return x.AnticoneSize + } + return 0 +} + +type DbMultiset struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Multiset []byte `protobuf:"bytes,1,opt,name=multiset,proto3" json:"multiset,omitempty"` +} + +func (x *DbMultiset) Reset() { + *x = DbMultiset{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbMultiset) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbMultiset) ProtoMessage() {} + +func (x *DbMultiset) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbMultiset.ProtoReflect.Descriptor instead. +func (*DbMultiset) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{17} +} + +func (x *DbMultiset) GetMultiset() []byte { + if x != nil { + return x.Multiset + } + return nil +} + +type DbUtxoSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Items []*DbUtxoCollectionItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` +} + +func (x *DbUtxoSet) Reset() { + *x = DbUtxoSet{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbUtxoSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbUtxoSet) ProtoMessage() {} + +func (x *DbUtxoSet) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbUtxoSet.ProtoReflect.Descriptor instead. +func (*DbUtxoSet) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{18} +} + +func (x *DbUtxoSet) GetItems() []*DbUtxoCollectionItem { + if x != nil { + return x.Items + } + return nil +} + +type DbUtxoCollectionItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Outpoint *DbOutpoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` + UtxoEntry *DbUtxoEntry `protobuf:"bytes,2,opt,name=utxoEntry,proto3" json:"utxoEntry,omitempty"` +} + +func (x *DbUtxoCollectionItem) Reset() { + *x = DbUtxoCollectionItem{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbUtxoCollectionItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbUtxoCollectionItem) ProtoMessage() {} + +func (x *DbUtxoCollectionItem) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbUtxoCollectionItem.ProtoReflect.Descriptor instead. +func (*DbUtxoCollectionItem) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{19} +} + +func (x *DbUtxoCollectionItem) GetOutpoint() *DbOutpoint { + if x != nil { + return x.Outpoint + } + return nil +} + +func (x *DbUtxoCollectionItem) GetUtxoEntry() *DbUtxoEntry { + if x != nil { + return x.UtxoEntry + } + return nil +} + +type DbScriptPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Script []byte `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *DbScriptPublicKey) Reset() { + *x = DbScriptPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbScriptPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbScriptPublicKey) ProtoMessage() {} + +func (x *DbScriptPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbScriptPublicKey.ProtoReflect.Descriptor instead. +func (*DbScriptPublicKey) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{20} +} + +func (x *DbScriptPublicKey) GetScript() []byte { + if x != nil { + return x.Script + } + return nil +} + +func (x *DbScriptPublicKey) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +type DbUtxoEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + ScriptPublicKey *DbScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` + BlockDaaScore uint64 `protobuf:"varint,3,opt,name=blockDaaScore,proto3" json:"blockDaaScore,omitempty"` + IsCoinbase bool `protobuf:"varint,4,opt,name=isCoinbase,proto3" json:"isCoinbase,omitempty"` +} + +func (x *DbUtxoEntry) Reset() { + *x = DbUtxoEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbUtxoEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbUtxoEntry) ProtoMessage() {} + +func (x *DbUtxoEntry) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbUtxoEntry.ProtoReflect.Descriptor instead. +func (*DbUtxoEntry) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{21} +} + +func (x *DbUtxoEntry) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *DbUtxoEntry) GetScriptPublicKey() *DbScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +func (x *DbUtxoEntry) GetBlockDaaScore() uint64 { + if x != nil { + return x.BlockDaaScore + } + return 0 +} + +func (x *DbUtxoEntry) GetIsCoinbase() bool { + if x != nil { + return x.IsCoinbase + } + return false +} + +type DbReachabilityData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Children []*DbHash `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty"` + Parent *DbHash `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` + Interval *DbReachabilityInterval `protobuf:"bytes,3,opt,name=interval,proto3" json:"interval,omitempty"` + FutureCoveringSet []*DbHash `protobuf:"bytes,4,rep,name=futureCoveringSet,proto3" json:"futureCoveringSet,omitempty"` +} + +func (x *DbReachabilityData) Reset() { + *x = DbReachabilityData{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbReachabilityData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbReachabilityData) ProtoMessage() {} + +func (x *DbReachabilityData) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbReachabilityData.ProtoReflect.Descriptor instead. +func (*DbReachabilityData) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{22} +} + +func (x *DbReachabilityData) GetChildren() []*DbHash { + if x != nil { + return x.Children + } + return nil +} + +func (x *DbReachabilityData) GetParent() *DbHash { + if x != nil { + return x.Parent + } + return nil +} + +func (x *DbReachabilityData) GetInterval() *DbReachabilityInterval { + if x != nil { + return x.Interval + } + return nil +} + +func (x *DbReachabilityData) GetFutureCoveringSet() []*DbHash { + if x != nil { + return x.FutureCoveringSet + } + return nil +} + +type DbReachabilityInterval struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (x *DbReachabilityInterval) Reset() { + *x = DbReachabilityInterval{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbReachabilityInterval) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbReachabilityInterval) ProtoMessage() {} + +func (x *DbReachabilityInterval) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbReachabilityInterval.ProtoReflect.Descriptor instead. +func (*DbReachabilityInterval) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{23} +} + +func (x *DbReachabilityInterval) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *DbReachabilityInterval) GetEnd() uint64 { + if x != nil { + return x.End + } + return 0 +} + +type DbUtxoDiff struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ToAdd []*DbUtxoCollectionItem `protobuf:"bytes,1,rep,name=toAdd,proto3" json:"toAdd,omitempty"` + ToRemove []*DbUtxoCollectionItem `protobuf:"bytes,2,rep,name=toRemove,proto3" json:"toRemove,omitempty"` +} + +func (x *DbUtxoDiff) Reset() { + *x = DbUtxoDiff{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbUtxoDiff) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbUtxoDiff) ProtoMessage() {} + +func (x *DbUtxoDiff) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbUtxoDiff.ProtoReflect.Descriptor instead. +func (*DbUtxoDiff) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{24} +} + +func (x *DbUtxoDiff) GetToAdd() []*DbUtxoCollectionItem { + if x != nil { + return x.ToAdd + } + return nil +} + +func (x *DbUtxoDiff) GetToRemove() []*DbUtxoCollectionItem { + if x != nil { + return x.ToRemove + } + return nil +} + +type DbTips struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tips []*DbHash `protobuf:"bytes,1,rep,name=tips,proto3" json:"tips,omitempty"` +} + +func (x *DbTips) Reset() { + *x = DbTips{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbTips) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbTips) ProtoMessage() {} + +func (x *DbTips) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbTips.ProtoReflect.Descriptor instead. +func (*DbTips) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{25} +} + +func (x *DbTips) GetTips() []*DbHash { + if x != nil { + return x.Tips + } + return nil +} + +type DbBlockCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *DbBlockCount) Reset() { + *x = DbBlockCount{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockCount) ProtoMessage() {} + +func (x *DbBlockCount) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockCount.ProtoReflect.Descriptor instead. +func (*DbBlockCount) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{26} +} + +func (x *DbBlockCount) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +type DbBlockHeaderCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *DbBlockHeaderCount) Reset() { + *x = DbBlockHeaderCount{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockHeaderCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockHeaderCount) ProtoMessage() {} + +func (x *DbBlockHeaderCount) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockHeaderCount.ProtoReflect.Descriptor instead. +func (*DbBlockHeaderCount) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{27} +} + +func (x *DbBlockHeaderCount) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +type DbBlockGHOSTDAGDataHashPair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash *DbHash `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + GhostdagData *DbBlockGhostdagData `protobuf:"bytes,2,opt,name=GhostdagData,proto3" json:"GhostdagData,omitempty"` +} + +func (x *DbBlockGHOSTDAGDataHashPair) Reset() { + *x = DbBlockGHOSTDAGDataHashPair{} + if protoimpl.UnsafeEnabled { + mi := &file_dbobjects_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DbBlockGHOSTDAGDataHashPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DbBlockGHOSTDAGDataHashPair) ProtoMessage() {} + +func (x *DbBlockGHOSTDAGDataHashPair) ProtoReflect() protoreflect.Message { + mi := &file_dbobjects_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DbBlockGHOSTDAGDataHashPair.ProtoReflect.Descriptor instead. +func (*DbBlockGHOSTDAGDataHashPair) Descriptor() ([]byte, []int) { + return file_dbobjects_proto_rawDescGZIP(), []int{28} +} + +func (x *DbBlockGHOSTDAGDataHashPair) GetHash() *DbHash { + if x != nil { + return x.Hash + } + return nil +} + +func (x *DbBlockGHOSTDAGDataHashPair) GetGhostdagData() *DbBlockGhostdagData { + if x != nil { + return x.GhostdagData + } + return nil +} + +var File_dbobjects_proto protoreflect.FileDescriptor + +var file_dbobjects_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x64, 0x62, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x81, 0x01, 0x0a, 0x07, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x34, 0x0a, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x9b, 0x04, 0x0a, 0x0d, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3c, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x50, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3d, + 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x68, + 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x49, 0x0a, + 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x44, 0x4d, 0x65, 0x72, 0x6b, 0x6c, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x44, 0x4d, 0x65, + 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x3d, 0x0a, 0x0e, 0x75, 0x74, 0x78, 0x6f, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x75, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x12, 0x74, 0x69, 0x6d, 0x65, 0x49, + 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x12, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x4d, 0x69, 0x6c, 0x6c, 0x69, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x69, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x62, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, + 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x39, 0x0a, 0x0c, 0x70, 0x72, 0x75, + 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, + 0x72, 0x65, 0x22, 0x50, 0x0a, 0x13, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x22, 0x1c, 0x0a, 0x06, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, + 0x73, 0x68, 0x22, 0xad, 0x02, 0x0a, 0x0d, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, + 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, + 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, + 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x07, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x53, 0x75, 0x62, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xc1, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x10, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x4f, 0x70, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x4f, + 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x68, 0x0a, 0x0a, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x0d, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x22, 0x37, 0x0a, 0x0f, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x77, 0x0a, 0x13, 0x44, 0x62, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x44, 0x62, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x22, 0x34, 0x0a, 0x0e, 0x44, 0x62, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x22, 0x6a, 0x0a, 0x10, 0x44, 0x62, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x56, 0x0a, 0x13, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x13, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x22, 0xb6, 0x01, 0x0a, 0x15, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x68, + 0x0a, 0x19, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x19, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0xed, 0x01, + 0x0a, 0x1b, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3e, 0x0a, + 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, + 0x03, 0x66, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x66, 0x65, 0x65, 0x12, + 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, + 0x5c, 0x0a, 0x1b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x1b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x76, 0x0a, + 0x10, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0d, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xdb, + 0x02, 0x0a, 0x13, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, + 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, + 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, + 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x3b, 0x0a, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0d, 0x6d, + 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0c, + 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x12, 0x53, 0x0a, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, + 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, + 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, + 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22, 0x6d, 0x0a, 0x14, + 0x44, 0x62, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x62, + 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x44, + 0x62, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x73, 0x65, 0x74, 0x22, 0x46, 0x0a, 0x09, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x53, + 0x65, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x87, 0x01, + 0x0a, 0x14, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x35, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x4f, 0x75, 0x74, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x38, 0x0a, + 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, + 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x45, 0x0a, 0x11, 0x44, 0x62, 0x53, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb7, + 0x01, 0x0a, 0x0b, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4a, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x44, 0x62, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, + 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, + 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, + 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x44, 0x62, 0x52, + 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x31, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, + 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x41, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x52, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, + 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x44, 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x11, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x43, 0x6f, + 0x76, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x74, 0x22, 0x40, 0x0a, 0x16, 0x44, 0x62, 0x52, + 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x0a, + 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x44, 0x69, 0x66, 0x66, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, + 0x41, 0x64, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, + 0x74, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x3f, 0x0a, 0x08, 0x74, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x55, 0x74, 0x78, 0x6f, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x08, 0x74, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x33, 0x0a, 0x06, 0x44, 0x62, 0x54, 0x69, 0x70, 0x73, + 0x12, 0x29, 0x0a, 0x04, 0x74, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, + 0x62, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x74, 0x69, 0x70, 0x73, 0x22, 0x24, 0x0a, 0x0c, 0x44, + 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x2a, 0x0a, 0x12, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x90, 0x01, + 0x0a, 0x1b, 0x44, 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x48, 0x4f, 0x53, 0x54, 0x44, 0x41, + 0x47, 0x44, 0x61, 0x74, 0x61, 0x48, 0x61, 0x73, 0x68, 0x50, 0x61, 0x69, 0x72, 0x12, 0x29, 0x0a, + 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x62, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x46, 0x0a, 0x0c, 0x47, 0x68, 0x6f, 0x73, + 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, + 0x62, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x0c, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, + 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_dbobjects_proto_rawDescOnce sync.Once + file_dbobjects_proto_rawDescData = file_dbobjects_proto_rawDesc +) + +func file_dbobjects_proto_rawDescGZIP() []byte { + file_dbobjects_proto_rawDescOnce.Do(func() { + file_dbobjects_proto_rawDescData = protoimpl.X.CompressGZIP(file_dbobjects_proto_rawDescData) + }) + return file_dbobjects_proto_rawDescData +} + +var file_dbobjects_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_dbobjects_proto_goTypes = []interface{}{ + (*DbBlock)(nil), // 0: serialization.DbBlock + (*DbBlockHeader)(nil), // 1: serialization.DbBlockHeader + (*DbBlockLevelParents)(nil), // 2: serialization.DbBlockLevelParents + (*DbHash)(nil), // 3: serialization.DbHash + (*DbTransaction)(nil), // 4: serialization.DbTransaction + (*DbTransactionInput)(nil), // 5: serialization.DbTransactionInput + (*DbOutpoint)(nil), // 6: serialization.DbOutpoint + (*DbTransactionId)(nil), // 7: serialization.DbTransactionId + (*DbTransactionOutput)(nil), // 8: serialization.DbTransactionOutput + (*DbSubnetworkId)(nil), // 9: serialization.DbSubnetworkId + (*DbAcceptanceData)(nil), // 10: serialization.DbAcceptanceData + (*DbBlockAcceptanceData)(nil), // 11: serialization.DbBlockAcceptanceData + (*DbTransactionAcceptanceData)(nil), // 12: serialization.DbTransactionAcceptanceData + (*DbBlockRelations)(nil), // 13: serialization.DbBlockRelations + (*DbBlockStatus)(nil), // 14: serialization.DbBlockStatus + (*DbBlockGhostdagData)(nil), // 15: serialization.DbBlockGhostdagData + (*DbBluesAnticoneSizes)(nil), // 16: serialization.DbBluesAnticoneSizes + (*DbMultiset)(nil), // 17: serialization.DbMultiset + (*DbUtxoSet)(nil), // 18: serialization.DbUtxoSet + (*DbUtxoCollectionItem)(nil), // 19: serialization.DbUtxoCollectionItem + (*DbScriptPublicKey)(nil), // 20: serialization.DbScriptPublicKey + (*DbUtxoEntry)(nil), // 21: serialization.DbUtxoEntry + (*DbReachabilityData)(nil), // 22: serialization.DbReachabilityData + (*DbReachabilityInterval)(nil), // 23: serialization.DbReachabilityInterval + (*DbUtxoDiff)(nil), // 24: serialization.DbUtxoDiff + (*DbTips)(nil), // 25: serialization.DbTips + (*DbBlockCount)(nil), // 26: serialization.DbBlockCount + (*DbBlockHeaderCount)(nil), // 27: serialization.DbBlockHeaderCount + (*DbBlockGHOSTDAGDataHashPair)(nil), // 28: serialization.DbBlockGHOSTDAGDataHashPair +} +var file_dbobjects_proto_depIdxs = []int32{ + 1, // 0: serialization.DbBlock.header:type_name -> serialization.DbBlockHeader + 4, // 1: serialization.DbBlock.transactions:type_name -> serialization.DbTransaction + 2, // 2: serialization.DbBlockHeader.parents:type_name -> serialization.DbBlockLevelParents + 3, // 3: serialization.DbBlockHeader.hashMerkleRoot:type_name -> serialization.DbHash + 3, // 4: serialization.DbBlockHeader.acceptedIDMerkleRoot:type_name -> serialization.DbHash + 3, // 5: serialization.DbBlockHeader.utxoCommitment:type_name -> serialization.DbHash + 3, // 6: serialization.DbBlockHeader.pruningPoint:type_name -> serialization.DbHash + 3, // 7: serialization.DbBlockLevelParents.parentHashes:type_name -> serialization.DbHash + 5, // 8: serialization.DbTransaction.inputs:type_name -> serialization.DbTransactionInput + 8, // 9: serialization.DbTransaction.outputs:type_name -> serialization.DbTransactionOutput + 9, // 10: serialization.DbTransaction.subnetworkID:type_name -> serialization.DbSubnetworkId + 6, // 11: serialization.DbTransactionInput.previousOutpoint:type_name -> serialization.DbOutpoint + 7, // 12: serialization.DbOutpoint.transactionID:type_name -> serialization.DbTransactionId + 20, // 13: serialization.DbTransactionOutput.scriptPublicKey:type_name -> serialization.DbScriptPublicKey + 11, // 14: serialization.DbAcceptanceData.blockAcceptanceData:type_name -> serialization.DbBlockAcceptanceData + 12, // 15: serialization.DbBlockAcceptanceData.transactionAcceptanceData:type_name -> serialization.DbTransactionAcceptanceData + 3, // 16: serialization.DbBlockAcceptanceData.blockHash:type_name -> serialization.DbHash + 4, // 17: serialization.DbTransactionAcceptanceData.transaction:type_name -> serialization.DbTransaction + 21, // 18: serialization.DbTransactionAcceptanceData.transactionInputUtxoEntries:type_name -> serialization.DbUtxoEntry + 3, // 19: serialization.DbBlockRelations.parents:type_name -> serialization.DbHash + 3, // 20: serialization.DbBlockRelations.children:type_name -> serialization.DbHash + 3, // 21: serialization.DbBlockGhostdagData.selectedParent:type_name -> serialization.DbHash + 3, // 22: serialization.DbBlockGhostdagData.mergeSetBlues:type_name -> serialization.DbHash + 3, // 23: serialization.DbBlockGhostdagData.mergeSetReds:type_name -> serialization.DbHash + 16, // 24: serialization.DbBlockGhostdagData.bluesAnticoneSizes:type_name -> serialization.DbBluesAnticoneSizes + 3, // 25: serialization.DbBluesAnticoneSizes.blueHash:type_name -> serialization.DbHash + 19, // 26: serialization.DbUtxoSet.items:type_name -> serialization.DbUtxoCollectionItem + 6, // 27: serialization.DbUtxoCollectionItem.outpoint:type_name -> serialization.DbOutpoint + 21, // 28: serialization.DbUtxoCollectionItem.utxoEntry:type_name -> serialization.DbUtxoEntry + 20, // 29: serialization.DbUtxoEntry.scriptPublicKey:type_name -> serialization.DbScriptPublicKey + 3, // 30: serialization.DbReachabilityData.children:type_name -> serialization.DbHash + 3, // 31: serialization.DbReachabilityData.parent:type_name -> serialization.DbHash + 23, // 32: serialization.DbReachabilityData.interval:type_name -> serialization.DbReachabilityInterval + 3, // 33: serialization.DbReachabilityData.futureCoveringSet:type_name -> serialization.DbHash + 19, // 34: serialization.DbUtxoDiff.toAdd:type_name -> serialization.DbUtxoCollectionItem + 19, // 35: serialization.DbUtxoDiff.toRemove:type_name -> serialization.DbUtxoCollectionItem + 3, // 36: serialization.DbTips.tips:type_name -> serialization.DbHash + 3, // 37: serialization.DbBlockGHOSTDAGDataHashPair.hash:type_name -> serialization.DbHash + 15, // 38: serialization.DbBlockGHOSTDAGDataHashPair.GhostdagData:type_name -> serialization.DbBlockGhostdagData + 39, // [39:39] is the sub-list for method output_type + 39, // [39:39] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_dbobjects_proto_init() } +func file_dbobjects_proto_init() { + if File_dbobjects_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_dbobjects_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockLevelParents); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbHash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTransaction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTransactionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbOutpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTransactionId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTransactionOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbSubnetworkId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbAcceptanceData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockAcceptanceData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTransactionAcceptanceData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockRelations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockGhostdagData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBluesAnticoneSizes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbMultiset); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbUtxoSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbUtxoCollectionItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbScriptPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbUtxoEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbReachabilityData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbReachabilityInterval); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbUtxoDiff); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbTips); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockHeaderCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_dbobjects_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DbBlockGHOSTDAGDataHashPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_dbobjects_proto_rawDesc, + NumEnums: 0, + NumMessages: 29, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_dbobjects_proto_goTypes, + DependencyIndexes: file_dbobjects_proto_depIdxs, + MessageInfos: file_dbobjects_proto_msgTypes, + }.Build() + File_dbobjects_proto = out.File + file_dbobjects_proto_rawDesc = nil + file_dbobjects_proto_goTypes = nil + file_dbobjects_proto_depIdxs = nil +} diff --git a/domain/consensus/database/serialization/dbobjects.proto b/domain/consensus/database/serialization/dbobjects.proto new file mode 100644 index 0000000..e553e97 --- /dev/null +++ b/domain/consensus/database/serialization/dbobjects.proto @@ -0,0 +1,165 @@ +syntax = "proto3"; +package serialization; + +option go_package = "github.com/spectre-project/spectred/serialization"; + +message DbBlock { + DbBlockHeader header = 1; + repeated DbTransaction transactions = 2; +} + +message DbBlockHeader { + uint32 version = 1; + repeated DbBlockLevelParents parents = 2; + DbHash hashMerkleRoot = 3; + DbHash acceptedIDMerkleRoot = 4; + DbHash utxoCommitment = 5; + int64 timeInMilliseconds = 6; + uint32 bits = 7; + uint64 nonce = 8; + uint64 daaScore = 9; + bytes blueWork = 10; + DbHash pruningPoint = 12; + uint64 blueScore = 13; +} + +message DbBlockLevelParents { + repeated DbHash parentHashes = 1; +} + +message DbHash { + bytes hash = 1; +} + +message DbTransaction { + uint32 version = 1; + repeated DbTransactionInput inputs = 2; + repeated DbTransactionOutput outputs = 3; + uint64 lockTime = 4; + DbSubnetworkId subnetworkID = 5; + uint64 gas = 6; + bytes payload = 8; +} + +message DbTransactionInput { + DbOutpoint previousOutpoint = 1; + bytes signatureScript = 2; + uint64 sequence = 3; + uint32 sigOpCount = 4; +} + +message DbOutpoint { + DbTransactionId transactionID = 1; + uint32 index = 2; +} + +message DbTransactionId { + bytes transactionId = 1; +} + +message DbTransactionOutput { + uint64 value = 1; + DbScriptPublicKey scriptPublicKey = 2; +} + +message DbSubnetworkId { + bytes subnetworkId = 1; +} + +message DbAcceptanceData { + repeated DbBlockAcceptanceData blockAcceptanceData = 1; +} + +message DbBlockAcceptanceData { + repeated DbTransactionAcceptanceData transactionAcceptanceData = 1; + DbHash blockHash = 2; +} + +message DbTransactionAcceptanceData { + DbTransaction transaction = 1; + uint64 fee = 2; + bool isAccepted = 3; + repeated DbUtxoEntry transactionInputUtxoEntries = 4; +} + +message DbBlockRelations { + repeated DbHash parents = 1; + repeated DbHash children = 2; +} + +message DbBlockStatus { + uint32 status = 1; +} + +message DbBlockGhostdagData { + uint64 blueScore = 1; + bytes blueWork = 2; + DbHash selectedParent = 3; + repeated DbHash mergeSetBlues = 4; + repeated DbHash mergeSetReds = 5; + repeated DbBluesAnticoneSizes bluesAnticoneSizes = 6; +} + +message DbBluesAnticoneSizes { + DbHash blueHash = 1; + uint32 anticoneSize = 2; +} + +message DbMultiset { + bytes multiset = 1; +} + +message DbUtxoSet { + repeated DbUtxoCollectionItem items = 1; +} + +message DbUtxoCollectionItem { + DbOutpoint outpoint = 1; + DbUtxoEntry utxoEntry = 2; +} + +message DbScriptPublicKey { + bytes script = 1; + uint32 version = 2; +} + +message DbUtxoEntry { + uint64 amount = 1; + DbScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; +} + +message DbReachabilityData { + repeated DbHash children = 1; + DbHash parent = 2; + DbReachabilityInterval interval = 3; + repeated DbHash futureCoveringSet = 4; +} + +message DbReachabilityInterval { + uint64 start = 1; + uint64 end = 2; +} + +message DbUtxoDiff { + repeated DbUtxoCollectionItem toAdd = 1; + repeated DbUtxoCollectionItem toRemove = 2; +} + +message DbTips { + repeated DbHash tips = 1; +} + +message DbBlockCount { + uint64 count = 1; +} + +message DbBlockHeaderCount { + uint64 count = 1; +} + +message DbBlockGHOSTDAGDataHashPair { + DbHash hash = 1; + DbBlockGhostdagData GhostdagData = 2; +} diff --git a/domain/consensus/database/serialization/generate.go b/domain/consensus/database/serialization/generate.go new file mode 100644 index 0000000..3427735 --- /dev/null +++ b/domain/consensus/database/serialization/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative dbobjects.proto + +package serialization diff --git a/domain/consensus/database/serialization/hash.go b/domain/consensus/database/serialization/hash.go new file mode 100644 index 0000000..e0b8684 --- /dev/null +++ b/domain/consensus/database/serialization/hash.go @@ -0,0 +1,37 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DbHashToDomainHash converts a DbHash to a DomainHash +func DbHashToDomainHash(dbHash *DbHash) (*externalapi.DomainHash, error) { + return externalapi.NewDomainHashFromByteSlice(dbHash.Hash) +} + +// DomainHashToDbHash converts a DomainHash to a DbHash +func DomainHashToDbHash(domainHash *externalapi.DomainHash) *DbHash { + return &DbHash{Hash: domainHash.ByteSlice()} +} + +// DomainHashesToDbHashes converts a slice of DomainHash to a slice of DbHash +func DomainHashesToDbHashes(domainHashes []*externalapi.DomainHash) []*DbHash { + dbHashes := make([]*DbHash, len(domainHashes)) + for i, domainHash := range domainHashes { + dbHashes[i] = DomainHashToDbHash(domainHash) + } + return dbHashes +} + +// DbHashesToDomainHashes converts a slice of DbHash to a slice of DomainHash +func DbHashesToDomainHashes(dbHashes []*DbHash) ([]*externalapi.DomainHash, error) { + domainHashes := make([]*externalapi.DomainHash, len(dbHashes)) + for i, domainHash := range dbHashes { + var err error + domainHashes[i], err = DbHashToDomainHash(domainHash) + if err != nil { + return nil, err + } + } + return domainHashes, nil +} diff --git a/domain/consensus/database/serialization/multiset.go b/domain/consensus/database/serialization/multiset.go new file mode 100644 index 0000000..9487516 --- /dev/null +++ b/domain/consensus/database/serialization/multiset.go @@ -0,0 +1,18 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/utils/multiset" +) + +// MultisetToDBMultiset converts Multiset to DbMultiset +func MultisetToDBMultiset(multiset model.Multiset) *DbMultiset { + return &DbMultiset{ + Multiset: multiset.Serialize(), + } +} + +// DBMultisetToMultiset converts DbMultiset to Multiset +func DBMultisetToMultiset(dbMultiset *DbMultiset) (model.Multiset, error) { + return multiset.FromBytes(dbMultiset.Multiset) +} diff --git a/domain/consensus/database/serialization/outpoint.go b/domain/consensus/database/serialization/outpoint.go new file mode 100644 index 0000000..1dd5ea8 --- /dev/null +++ b/domain/consensus/database/serialization/outpoint.go @@ -0,0 +1,26 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DomainOutpointToDbOutpoint converts DomainOutpoint to DbOutpoint +func DomainOutpointToDbOutpoint(domainOutpoint *externalapi.DomainOutpoint) *DbOutpoint { + return &DbOutpoint{ + TransactionID: DomainTransactionIDToDbTransactionID(&domainOutpoint.TransactionID), + Index: domainOutpoint.Index, + } +} + +// DbOutpointToDomainOutpoint converts DbOutpoint to DomainOutpoint +func DbOutpointToDomainOutpoint(dbOutpoint *DbOutpoint) (*externalapi.DomainOutpoint, error) { + domainTransactionID, err := DbTransactionIDToDomainTransactionID(dbOutpoint.TransactionID) + if err != nil { + return nil, err + } + + return &externalapi.DomainOutpoint{ + TransactionID: *domainTransactionID, + Index: dbOutpoint.Index, + }, nil +} diff --git a/domain/consensus/database/serialization/reachability_data.go b/domain/consensus/database/serialization/reachability_data.go new file mode 100644 index 0000000..63b2799 --- /dev/null +++ b/domain/consensus/database/serialization/reachability_data.go @@ -0,0 +1,49 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/reachabilitydata" +) + +// ReachablityDataToDBReachablityData converts ReachabilityData to DbReachabilityData +func ReachablityDataToDBReachablityData(reachabilityData model.ReachabilityData) *DbReachabilityData { + parent := reachabilityData.Parent() + var dbParent *DbHash + if parent != nil { + dbParent = DomainHashToDbHash(parent) + } + + return &DbReachabilityData{ + Children: DomainHashesToDbHashes(reachabilityData.Children()), + Parent: dbParent, + Interval: reachablityIntervalToDBReachablityInterval(reachabilityData.Interval()), + FutureCoveringSet: DomainHashesToDbHashes(reachabilityData.FutureCoveringSet()), + } +} + +// DBReachablityDataToReachablityData converts DbReachabilityData to ReachabilityData +func DBReachablityDataToReachablityData(dbReachabilityData *DbReachabilityData) (model.ReachabilityData, error) { + children, err := DbHashesToDomainHashes(dbReachabilityData.Children) + if err != nil { + return nil, err + } + + var parent *externalapi.DomainHash + if dbReachabilityData.Parent != nil { + var err error + parent, err = DbHashToDomainHash(dbReachabilityData.Parent) + if err != nil { + return nil, err + } + } + + interval := dbReachablityIntervalToReachablityInterval(dbReachabilityData.Interval) + + futureCoveringSet, err := DbHashesToDomainHashes(dbReachabilityData.FutureCoveringSet) + if err != nil { + return nil, err + } + + return reachabilitydata.New(children, parent, interval, futureCoveringSet), nil +} diff --git a/domain/consensus/database/serialization/reachablity_interval.go b/domain/consensus/database/serialization/reachablity_interval.go new file mode 100644 index 0000000..58606d0 --- /dev/null +++ b/domain/consensus/database/serialization/reachablity_interval.go @@ -0,0 +1,19 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" +) + +func reachablityIntervalToDBReachablityInterval(reachabilityInterval *model.ReachabilityInterval) *DbReachabilityInterval { + return &DbReachabilityInterval{ + Start: reachabilityInterval.Start, + End: reachabilityInterval.End, + } +} + +func dbReachablityIntervalToReachablityInterval(dbReachabilityInterval *DbReachabilityInterval) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: dbReachabilityInterval.Start, + End: dbReachabilityInterval.End, + } +} diff --git a/domain/consensus/database/serialization/subnetworkid.go b/domain/consensus/database/serialization/subnetworkid.go new file mode 100644 index 0000000..91131bc --- /dev/null +++ b/domain/consensus/database/serialization/subnetworkid.go @@ -0,0 +1,16 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +// DbSubnetworkIDToDomainSubnetworkID converts DbSubnetworkId to DomainSubnetworkID +func DbSubnetworkIDToDomainSubnetworkID(dbSubnetworkID *DbSubnetworkId) (*externalapi.DomainSubnetworkID, error) { + return subnetworks.FromBytes(dbSubnetworkID.SubnetworkId) +} + +// DomainSubnetworkIDToDbSubnetworkID converts DomainSubnetworkID to DbSubnetworkId +func DomainSubnetworkIDToDbSubnetworkID(domainSubnetworkID *externalapi.DomainSubnetworkID) *DbSubnetworkId { + return &DbSubnetworkId{SubnetworkId: domainSubnetworkID[:]} +} diff --git a/domain/consensus/database/serialization/tips.go b/domain/consensus/database/serialization/tips.go new file mode 100644 index 0000000..6459853 --- /dev/null +++ b/domain/consensus/database/serialization/tips.go @@ -0,0 +1,17 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TipsToDBTips converts a slice of hashes to DbTips +func TipsToDBTips(tips []*externalapi.DomainHash) *DbTips { + return &DbTips{ + Tips: DomainHashesToDbHashes(tips), + } +} + +// DBTipsToTips converts DbTips to a slice of hashes +func DBTipsToTips(dbTips *DbTips) ([]*externalapi.DomainHash, error) { + return DbHashesToDomainHashes(dbTips.Tips) +} diff --git a/domain/consensus/database/serialization/transaction.go b/domain/consensus/database/serialization/transaction.go new file mode 100644 index 0000000..1b81f5e --- /dev/null +++ b/domain/consensus/database/serialization/transaction.go @@ -0,0 +1,87 @@ +package serialization + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DomainTransactionToDbTransaction converts DomainTransaction to DbTransaction +func DomainTransactionToDbTransaction(domainTransaction *externalapi.DomainTransaction) *DbTransaction { + dbInputs := make([]*DbTransactionInput, len(domainTransaction.Inputs)) + for i, domainTransactionInput := range domainTransaction.Inputs { + dbInputs[i] = &DbTransactionInput{ + PreviousOutpoint: DomainOutpointToDbOutpoint(&domainTransactionInput.PreviousOutpoint), + SignatureScript: domainTransactionInput.SignatureScript, + Sequence: domainTransactionInput.Sequence, + SigOpCount: uint32(domainTransactionInput.SigOpCount), + } + } + + dbOutputs := make([]*DbTransactionOutput, len(domainTransaction.Outputs)) + for i, domainTransactionOutput := range domainTransaction.Outputs { + dbScriptPublicKey := ScriptPublicKeyToDBScriptPublicKey(domainTransactionOutput.ScriptPublicKey) + dbOutputs[i] = &DbTransactionOutput{ + Value: domainTransactionOutput.Value, + ScriptPublicKey: dbScriptPublicKey, + } + } + + return &DbTransaction{ + Version: uint32(domainTransaction.Version), + Inputs: dbInputs, + Outputs: dbOutputs, + LockTime: domainTransaction.LockTime, + SubnetworkID: DomainSubnetworkIDToDbSubnetworkID(&domainTransaction.SubnetworkID), + Gas: domainTransaction.Gas, + Payload: domainTransaction.Payload, + } +} + +// DbTransactionToDomainTransaction converts DbTransaction to DomainTransaction +func DbTransactionToDomainTransaction(dbTransaction *DbTransaction) (*externalapi.DomainTransaction, error) { + domainSubnetworkID, err := DbSubnetworkIDToDomainSubnetworkID(dbTransaction.SubnetworkID) + if err != nil { + return nil, err + } + + domainInputs := make([]*externalapi.DomainTransactionInput, len(dbTransaction.Inputs)) + for i, dbTransactionInput := range dbTransaction.Inputs { + domainPreviousOutpoint, err := DbOutpointToDomainOutpoint(dbTransactionInput.PreviousOutpoint) + if err != nil { + return nil, err + } + domainInputs[i] = &externalapi.DomainTransactionInput{ + PreviousOutpoint: *domainPreviousOutpoint, + SignatureScript: dbTransactionInput.SignatureScript, + Sequence: dbTransactionInput.Sequence, + SigOpCount: byte(dbTransactionInput.SigOpCount), + } + } + + domainOutputs := make([]*externalapi.DomainTransactionOutput, len(dbTransaction.Outputs)) + for i, dbTransactionOutput := range dbTransaction.Outputs { + scriptPublicKey, err := DBScriptPublicKeyToScriptPublicKey(dbTransactionOutput.ScriptPublicKey) + if err != nil { + return nil, err + } + domainOutputs[i] = &externalapi.DomainTransactionOutput{ + Value: dbTransactionOutput.Value, + ScriptPublicKey: scriptPublicKey, + } + } + + if dbTransaction.Version > math.MaxUint16 { + return nil, errors.Errorf("The transaction version is bigger then uint16.") + } + return &externalapi.DomainTransaction{ + Version: uint16(dbTransaction.Version), + Inputs: domainInputs, + Outputs: domainOutputs, + LockTime: dbTransaction.LockTime, + SubnetworkID: *domainSubnetworkID, + Gas: dbTransaction.Gas, + Payload: dbTransaction.Payload, + }, nil +} diff --git a/domain/consensus/database/serialization/transactionid.go b/domain/consensus/database/serialization/transactionid.go new file mode 100644 index 0000000..37c4156 --- /dev/null +++ b/domain/consensus/database/serialization/transactionid.go @@ -0,0 +1,16 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" +) + +// DbTransactionIDToDomainTransactionID converts DbTransactionId to DomainTransactionID +func DbTransactionIDToDomainTransactionID(dbTransactionID *DbTransactionId) (*externalapi.DomainTransactionID, error) { + return transactionid.FromBytes(dbTransactionID.TransactionId) +} + +// DomainTransactionIDToDbTransactionID converts DomainTransactionID to DbTransactionId +func DomainTransactionIDToDbTransactionID(domainTransactionID *externalapi.DomainTransactionID) *DbTransactionId { + return &DbTransactionId{TransactionId: domainTransactionID.ByteSlice()} +} diff --git a/domain/consensus/database/serialization/utxo_collection.go b/domain/consensus/database/serialization/utxo_collection.go new file mode 100644 index 0000000..152889b --- /dev/null +++ b/domain/consensus/database/serialization/utxo_collection.go @@ -0,0 +1,43 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +func utxoCollectionToDBUTXOCollection(utxoCollection externalapi.UTXOCollection) ([]*DbUtxoCollectionItem, error) { + items := make([]*DbUtxoCollectionItem, utxoCollection.Len()) + i := 0 + utxoIterator := utxoCollection.Iterator() + defer utxoIterator.Close() + for ok := utxoIterator.First(); ok; ok = utxoIterator.Next() { + outpoint, entry, err := utxoIterator.Get() + if err != nil { + return nil, err + } + + items[i] = &DbUtxoCollectionItem{ + Outpoint: DomainOutpointToDbOutpoint(outpoint), + UtxoEntry: UTXOEntryToDBUTXOEntry(entry), + } + i++ + } + + return items, nil +} + +func dbUTXOCollectionToUTXOCollection(items []*DbUtxoCollectionItem) (externalapi.UTXOCollection, error) { + utxoMap := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, len(items)) + for _, item := range items { + outpoint, err := DbOutpointToDomainOutpoint(item.Outpoint) + if err != nil { + return nil, err + } + utxoEntry, err := DBUTXOEntryToUTXOEntry(item.UtxoEntry) + if err != nil { + return nil, err + } + utxoMap[*outpoint] = utxoEntry + } + return utxo.NewUTXOCollection(utxoMap), nil +} diff --git a/domain/consensus/database/serialization/utxo_diff.go b/domain/consensus/database/serialization/utxo_diff.go new file mode 100644 index 0000000..ccff135 --- /dev/null +++ b/domain/consensus/database/serialization/utxo_diff.go @@ -0,0 +1,39 @@ +package serialization + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +// UTXODiffToDBUTXODiff converts UTXODiff to DbUtxoDiff +func UTXODiffToDBUTXODiff(diff externalapi.UTXODiff) (*DbUtxoDiff, error) { + toAdd, err := utxoCollectionToDBUTXOCollection(diff.ToAdd()) + if err != nil { + return nil, err + } + + toRemove, err := utxoCollectionToDBUTXOCollection(diff.ToRemove()) + if err != nil { + return nil, err + } + + return &DbUtxoDiff{ + ToAdd: toAdd, + ToRemove: toRemove, + }, nil +} + +// DBUTXODiffToUTXODiff converts DbUtxoDiff to UTXODiff +func DBUTXODiffToUTXODiff(diff *DbUtxoDiff) (externalapi.UTXODiff, error) { + toAdd, err := dbUTXOCollectionToUTXOCollection(diff.ToAdd) + if err != nil { + return nil, err + } + + toRemove, err := dbUTXOCollectionToUTXOCollection(diff.ToRemove) + if err != nil { + return nil, err + } + + return utxo.NewUTXODiffFromCollections(toAdd, toRemove) +} diff --git a/domain/consensus/database/serialization/utxo_entry.go b/domain/consensus/database/serialization/utxo_entry.go new file mode 100644 index 0000000..400a9b5 --- /dev/null +++ b/domain/consensus/database/serialization/utxo_entry.go @@ -0,0 +1,42 @@ +package serialization + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +// ScriptPublicKeyToDBScriptPublicKey converts ScriptPublicKey to DBScriptPublicKey +func ScriptPublicKeyToDBScriptPublicKey(scriptPublicKey *externalapi.ScriptPublicKey) *DbScriptPublicKey { + return &DbScriptPublicKey{Script: scriptPublicKey.Script, Version: uint32(scriptPublicKey.Version)} +} + +// DBScriptPublicKeyToScriptPublicKey convert DbScriptPublicKey ro ScriptPublicKey +func DBScriptPublicKeyToScriptPublicKey(dbScriptPublicKey *DbScriptPublicKey) (*externalapi.ScriptPublicKey, error) { + if dbScriptPublicKey.Version > math.MaxUint16 { + return nil, errors.Errorf("The version on ScriptPublicKey is bigger then uint16.") + } + return &externalapi.ScriptPublicKey{Script: dbScriptPublicKey.Script, Version: uint16(dbScriptPublicKey.Version)}, nil +} + +// UTXOEntryToDBUTXOEntry converts UTXOEntry to DbUtxoEntry +func UTXOEntryToDBUTXOEntry(utxoEntry externalapi.UTXOEntry) *DbUtxoEntry { + dbScriptPublicKey := ScriptPublicKeyToDBScriptPublicKey(utxoEntry.ScriptPublicKey()) + return &DbUtxoEntry{ + Amount: utxoEntry.Amount(), + ScriptPublicKey: dbScriptPublicKey, + BlockDaaScore: utxoEntry.BlockDAAScore(), + IsCoinbase: utxoEntry.IsCoinbase(), + } +} + +// DBUTXOEntryToUTXOEntry convert DbUtxoEntry ro UTXOEntry +func DBUTXOEntryToUTXOEntry(dbUtxoEntry *DbUtxoEntry) (externalapi.UTXOEntry, error) { + scriptPublicKey, err := DBScriptPublicKeyToScriptPublicKey(dbUtxoEntry.ScriptPublicKey) + if err != nil { + return nil, err + } + return utxo.NewUTXOEntry(dbUtxoEntry.Amount, scriptPublicKey, dbUtxoEntry.IsCoinbase, dbUtxoEntry.BlockDaaScore), nil +} diff --git a/domain/consensus/database/transaction.go b/domain/consensus/database/transaction.go new file mode 100644 index 0000000..848ba6e --- /dev/null +++ b/domain/consensus/database/transaction.go @@ -0,0 +1,50 @@ +package database + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type dbTransaction struct { + transaction database.Transaction +} + +func (d *dbTransaction) Get(key model.DBKey) ([]byte, error) { + return d.transaction.Get(dbKeyToDatabaseKey(key)) +} + +func (d *dbTransaction) Has(key model.DBKey) (bool, error) { + return d.transaction.Has(dbKeyToDatabaseKey(key)) +} + +func (d *dbTransaction) Cursor(bucket model.DBBucket) (model.DBCursor, error) { + cursor, err := d.transaction.Cursor(dbBucketToDatabaseBucket(bucket)) + if err != nil { + return nil, err + } + return newDBCursor(cursor), nil +} + +func (d *dbTransaction) Put(key model.DBKey, value []byte) error { + return d.transaction.Put(dbKeyToDatabaseKey(key), value) +} + +func (d *dbTransaction) Delete(key model.DBKey) error { + return d.transaction.Delete(dbKeyToDatabaseKey(key)) +} + +func (d *dbTransaction) Rollback() error { + return d.transaction.Rollback() +} + +func (d *dbTransaction) Commit() error { + return d.transaction.Commit() +} + +func (d *dbTransaction) RollbackUnlessClosed() error { + return d.transaction.RollbackUnlessClosed() +} + +func newDBTransaction(transaction database.Transaction) model.DBTransaction { + return &dbTransaction{transaction: transaction} +} diff --git a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go new file mode 100644 index 0000000..5e7bd0b --- /dev/null +++ b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go @@ -0,0 +1,50 @@ +package acceptancedatastore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type acceptanceDataStagingShard struct { + store *acceptanceDataStore + toAdd map[externalapi.DomainHash]externalapi.AcceptanceData + toDelete map[externalapi.DomainHash]struct{} +} + +func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard { + return stagingArea.GetOrCreateShard(ads.shardID, func() model.StagingShard { + return &acceptanceDataStagingShard{ + store: ads, + toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData), + toDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*acceptanceDataStagingShard) +} + +func (adss *acceptanceDataStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, acceptanceData := range adss.toAdd { + acceptanceDataBytes, err := adss.store.serializeAcceptanceData(acceptanceData) + if err != nil { + return err + } + err = dbTx.Put(adss.store.hashAsKey(&hash), acceptanceDataBytes) + if err != nil { + return err + } + adss.store.cache.Add(&hash, acceptanceData) + } + + for hash := range adss.toDelete { + err := dbTx.Delete(adss.store.hashAsKey(&hash)) + if err != nil { + return err + } + adss.store.cache.Remove(&hash) + } + + return nil +} + +func (adss *acceptanceDataStagingShard) isStaged() bool { + return len(adss.toAdd) != 0 || len(adss.toDelete) != 0 +} diff --git a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go new file mode 100644 index 0000000..014064e --- /dev/null +++ b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go @@ -0,0 +1,92 @@ +package acceptancedatastore + +import ( + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" + "google.golang.org/protobuf/proto" +) + +var bucketName = []byte("acceptance-data") + +// acceptanceDataStore represents a store of AcceptanceData +type acceptanceDataStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new AcceptanceDataStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.AcceptanceDataStore { + return &acceptanceDataStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +// Stage stages the given acceptanceData for the given blockHash +func (ads *acceptanceDataStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData) { + stagingShard := ads.stagingShard(stagingArea) + stagingShard.toAdd[*blockHash] = acceptanceData.Clone() +} + +func (ads *acceptanceDataStore) IsStaged(stagingArea *model.StagingArea) bool { + return ads.stagingShard(stagingArea).isStaged() +} + +// Get gets the acceptanceData associated with the given blockHash +func (ads *acceptanceDataStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) { + stagingShard := ads.stagingShard(stagingArea) + + if acceptanceData, ok := stagingShard.toAdd[*blockHash]; ok { + return acceptanceData.Clone(), nil + } + + if acceptanceData, ok := ads.cache.Get(blockHash); ok { + return acceptanceData.(externalapi.AcceptanceData).Clone(), nil + } + + acceptanceDataBytes, err := dbContext.Get(ads.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + + acceptanceData, err := ads.deserializeAcceptanceData(acceptanceDataBytes) + if err != nil { + return nil, err + } + ads.cache.Add(blockHash, acceptanceData) + return acceptanceData.Clone(), nil +} + +// Delete deletes the acceptanceData associated with the given blockHash +func (ads *acceptanceDataStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := ads.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + delete(stagingShard.toAdd, *blockHash) + return + } + stagingShard.toDelete[*blockHash] = struct{}{} +} + +func (ads *acceptanceDataStore) serializeAcceptanceData(acceptanceData externalapi.AcceptanceData) ([]byte, error) { + dbAcceptanceData := serialization.DomainAcceptanceDataToDbAcceptanceData(acceptanceData) + return proto.Marshal(dbAcceptanceData) +} + +func (ads *acceptanceDataStore) deserializeAcceptanceData(acceptanceDataBytes []byte) (externalapi.AcceptanceData, error) { + dbAcceptanceData := &serialization.DbAcceptanceData{} + err := proto.Unmarshal(acceptanceDataBytes, dbAcceptanceData) + if err != nil { + return nil, err + } + return serialization.DbAcceptanceDataToDomainAcceptanceData(dbAcceptanceData) +} + +func (ads *acceptanceDataStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return ads.bucket.Key(hash.ByteSlice()) +} diff --git a/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go b/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go new file mode 100644 index 0000000..b7f1657 --- /dev/null +++ b/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go @@ -0,0 +1,69 @@ +package blockheaderstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type blockHeaderStagingShard struct { + store *blockHeaderStore + toAdd map[externalapi.DomainHash]externalapi.BlockHeader + toDelete map[externalapi.DomainHash]struct{} +} + +func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard { + return stagingArea.GetOrCreateShard(bhs.shardID, func() model.StagingShard { + return &blockHeaderStagingShard{ + store: bhs, + toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader), + toDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*blockHeaderStagingShard) +} + +func (bhss *blockHeaderStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, header := range bhss.toAdd { + headerBytes, err := bhss.store.serializeHeader(header) + if err != nil { + return err + } + err = dbTx.Put(bhss.store.hashAsKey(&hash), headerBytes) + if err != nil { + return err + } + bhss.store.cache.Add(&hash, header) + } + + for hash := range bhss.toDelete { + err := dbTx.Delete(bhss.store.hashAsKey(&hash)) + if err != nil { + return err + } + bhss.store.cache.Remove(&hash) + } + + err := bhss.commitCount(dbTx) + if err != nil { + return err + } + + return nil +} + +func (bhss *blockHeaderStagingShard) commitCount(dbTx model.DBTransaction) error { + count := bhss.store.count(bhss) + countBytes, err := bhss.store.serializeHeaderCount(count) + if err != nil { + return err + } + err = dbTx.Put(bhss.store.countKey, countBytes) + if err != nil { + return err + } + bhss.store.countCached = count + return nil +} + +func (bhss *blockHeaderStagingShard) isStaged() bool { + return len(bhss.toAdd) != 0 || len(bhss.toDelete) != 0 +} diff --git a/domain/consensus/datastructures/blockheaderstore/block_header_store.go b/domain/consensus/datastructures/blockheaderstore/block_header_store.go new file mode 100644 index 0000000..6d6501d --- /dev/null +++ b/domain/consensus/datastructures/blockheaderstore/block_header_store.go @@ -0,0 +1,192 @@ +package blockheaderstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("block-headers") +var countKeyName = []byte("block-headers-count") + +// blockHeaderStore represents a store of blocks +type blockHeaderStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + countCached uint64 + bucket model.DBBucket + countKey model.DBKey +} + +// New instantiates a new BlockHeaderStore +func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) { + blockHeaderStore := &blockHeaderStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + countKey: prefixBucket.Key(countKeyName), + } + + err := blockHeaderStore.initializeCount(dbContext) + if err != nil { + return nil, err + } + + return blockHeaderStore, nil +} + +func (bhs *blockHeaderStore) initializeCount(dbContext model.DBReader) error { + count := uint64(0) + hasCountBytes, err := dbContext.Has(bhs.countKey) + if err != nil { + return err + } + if hasCountBytes { + countBytes, err := dbContext.Get(bhs.countKey) + if err != nil { + return err + } + count, err = bhs.deserializeHeaderCount(countBytes) + if err != nil { + return err + } + } + bhs.countCached = count + return nil +} + +// Stage stages the given block header for the given blockHash +func (bhs *blockHeaderStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockHeader externalapi.BlockHeader) { + stagingShard := bhs.stagingShard(stagingArea) + stagingShard.toAdd[*blockHash] = blockHeader +} + +func (bhs *blockHeaderStore) IsStaged(stagingArea *model.StagingArea) bool { + return bhs.stagingShard(stagingArea).isStaged() +} + +// BlockHeader gets the block header associated with the given blockHash +func (bhs *blockHeaderStore) BlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) { + + stagingShard := bhs.stagingShard(stagingArea) + + return bhs.blockHeader(dbContext, stagingShard, blockHash) +} + +func (bhs *blockHeaderStore) blockHeader(dbContext model.DBReader, stagingShard *blockHeaderStagingShard, + blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) { + + if header, ok := stagingShard.toAdd[*blockHash]; ok { + return header, nil + } + + if header, ok := bhs.cache.Get(blockHash); ok { + return header.(externalapi.BlockHeader), nil + } + + headerBytes, err := dbContext.Get(bhs.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + + header, err := bhs.deserializeHeader(headerBytes) + if err != nil { + return nil, err + } + bhs.cache.Add(blockHash, header) + return header, nil +} + +// HasBlock returns whether a block header with a given hash exists in the store. +func (bhs *blockHeaderStore) HasBlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + stagingShard := bhs.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + return true, nil + } + + if bhs.cache.Has(blockHash) { + return true, nil + } + + exists, err := dbContext.Has(bhs.hashAsKey(blockHash)) + if err != nil { + return false, err + } + + return exists, nil +} + +// BlockHeaders gets the block headers associated with the given blockHashes +func (bhs *blockHeaderStore) BlockHeaders(dbContext model.DBReader, stagingArea *model.StagingArea, + blockHashes []*externalapi.DomainHash) ([]externalapi.BlockHeader, error) { + + stagingShard := bhs.stagingShard(stagingArea) + + headers := make([]externalapi.BlockHeader, len(blockHashes)) + for i, hash := range blockHashes { + var err error + headers[i], err = bhs.blockHeader(dbContext, stagingShard, hash) + if err != nil { + return nil, err + } + } + return headers, nil +} + +// Delete deletes the block associated with the given blockHash +func (bhs *blockHeaderStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := bhs.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + delete(stagingShard.toAdd, *blockHash) + return + } + stagingShard.toDelete[*blockHash] = struct{}{} +} + +func (bhs *blockHeaderStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return bhs.bucket.Key(hash.ByteSlice()) +} + +func (bhs *blockHeaderStore) serializeHeader(header externalapi.BlockHeader) ([]byte, error) { + dbBlockHeader := serialization.DomainBlockHeaderToDbBlockHeader(header) + return proto.Marshal(dbBlockHeader) +} + +func (bhs *blockHeaderStore) deserializeHeader(headerBytes []byte) (externalapi.BlockHeader, error) { + dbBlockHeader := &serialization.DbBlockHeader{} + err := proto.Unmarshal(headerBytes, dbBlockHeader) + if err != nil { + return nil, err + } + return serialization.DbBlockHeaderToDomainBlockHeader(dbBlockHeader) +} + +func (bhs *blockHeaderStore) Count(stagingArea *model.StagingArea) uint64 { + stagingShard := bhs.stagingShard(stagingArea) + + return bhs.count(stagingShard) +} + +func (bhs *blockHeaderStore) count(stagingShard *blockHeaderStagingShard) uint64 { + return bhs.countCached + uint64(len(stagingShard.toAdd)) - uint64(len(stagingShard.toDelete)) +} + +func (bhs *blockHeaderStore) deserializeHeaderCount(countBytes []byte) (uint64, error) { + dbBlockHeaderCount := &serialization.DbBlockHeaderCount{} + err := proto.Unmarshal(countBytes, dbBlockHeaderCount) + if err != nil { + return 0, err + } + return dbBlockHeaderCount.Count, nil +} + +func (bhs *blockHeaderStore) serializeHeaderCount(count uint64) ([]byte, error) { + dbBlockHeaderCount := &serialization.DbBlockHeaderCount{Count: count} + return proto.Marshal(dbBlockHeaderCount) +} diff --git a/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go b/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go new file mode 100644 index 0000000..e6342b7 --- /dev/null +++ b/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go @@ -0,0 +1,40 @@ +package blockrelationstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type blockRelationStagingShard struct { + store *blockRelationStore + toAdd map[externalapi.DomainHash]*model.BlockRelations +} + +func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard { + return stagingArea.GetOrCreateShard(brs.shardID, func() model.StagingShard { + return &blockRelationStagingShard{ + store: brs, + toAdd: make(map[externalapi.DomainHash]*model.BlockRelations), + } + }).(*blockRelationStagingShard) +} + +func (brss *blockRelationStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, blockRelations := range brss.toAdd { + blockRelationBytes, err := brss.store.serializeBlockRelations(blockRelations) + if err != nil { + return err + } + err = dbTx.Put(brss.store.hashAsKey(&hash), blockRelationBytes) + if err != nil { + return err + } + brss.store.cache.Add(&hash, blockRelations) + } + + return nil +} + +func (brss *blockRelationStagingShard) isStaged() bool { + return len(brss.toAdd) != 0 +} diff --git a/domain/consensus/datastructures/blockrelationstore/block_relation_store.go b/domain/consensus/datastructures/blockrelationstore/block_relation_store.go new file mode 100644 index 0000000..764365e --- /dev/null +++ b/domain/consensus/datastructures/blockrelationstore/block_relation_store.go @@ -0,0 +1,99 @@ +package blockrelationstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("block-relations") + +// blockRelationStore represents a store of BlockRelations +type blockRelationStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new BlockRelationStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockRelationStore { + return &blockRelationStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +func (brs *blockRelationStore) StageBlockRelation(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockRelations *model.BlockRelations) { + stagingShard := brs.stagingShard(stagingArea) + + stagingShard.toAdd[*blockHash] = blockRelations.Clone() +} + +func (brs *blockRelationStore) IsStaged(stagingArea *model.StagingArea) bool { + return brs.stagingShard(stagingArea).isStaged() +} + +func (brs *blockRelationStore) BlockRelation(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*model.BlockRelations, error) { + stagingShard := brs.stagingShard(stagingArea) + + if blockRelations, ok := stagingShard.toAdd[*blockHash]; ok { + return blockRelations.Clone(), nil + } + + if blockRelations, ok := brs.cache.Get(blockHash); ok { + return blockRelations.(*model.BlockRelations).Clone(), nil + } + + blockRelationsBytes, err := dbContext.Get(brs.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + + blockRelations, err := brs.deserializeBlockRelations(blockRelationsBytes) + if err != nil { + return nil, err + } + brs.cache.Add(blockHash, blockRelations) + return blockRelations.Clone(), nil +} + +func (brs *blockRelationStore) Has(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + stagingShard := brs.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + return true, nil + } + + if brs.cache.Has(blockHash) { + return true, nil + } + + return dbContext.Has(brs.hashAsKey(blockHash)) +} + +func (brs *blockRelationStore) UnstageAll(stagingArea *model.StagingArea) { + stagingShard := brs.stagingShard(stagingArea) + stagingShard.toAdd = make(map[externalapi.DomainHash]*model.BlockRelations) +} + +func (brs *blockRelationStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return brs.bucket.Key(hash.ByteSlice()) +} + +func (brs *blockRelationStore) serializeBlockRelations(blockRelations *model.BlockRelations) ([]byte, error) { + dbBlockRelations := serialization.DomainBlockRelationsToDbBlockRelations(blockRelations) + return proto.Marshal(dbBlockRelations) +} + +func (brs *blockRelationStore) deserializeBlockRelations(blockRelationsBytes []byte) (*model.BlockRelations, error) { + dbBlockRelations := &serialization.DbBlockRelations{} + err := proto.Unmarshal(blockRelationsBytes, dbBlockRelations) + if err != nil { + return nil, err + } + return serialization.DbBlockRelationsToDomainBlockRelations(dbBlockRelations) +} diff --git a/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go b/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go new file mode 100644 index 0000000..d354749 --- /dev/null +++ b/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go @@ -0,0 +1,40 @@ +package blockstatusstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type blockStatusStagingShard struct { + store *blockStatusStore + toAdd map[externalapi.DomainHash]externalapi.BlockStatus +} + +func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard { + return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard { + return &blockStatusStagingShard{ + store: bss, + toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus), + } + }).(*blockStatusStagingShard) +} + +func (bsss *blockStatusStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, status := range bsss.toAdd { + blockStatusBytes, err := bsss.store.serializeBlockStatus(status) + if err != nil { + return err + } + err = dbTx.Put(bsss.store.hashAsKey(&hash), blockStatusBytes) + if err != nil { + return err + } + bsss.store.cache.Add(&hash, status) + } + + return nil +} + +func (bsss *blockStatusStagingShard) isStaged() bool { + return len(bsss.toAdd) != 0 +} diff --git a/domain/consensus/datastructures/blockstatusstore/block_status_store.go b/domain/consensus/datastructures/blockstatusstore/block_status_store.go new file mode 100644 index 0000000..d61c84a --- /dev/null +++ b/domain/consensus/datastructures/blockstatusstore/block_status_store.go @@ -0,0 +1,101 @@ +package blockstatusstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("block-statuses") + +// blockStatusStore represents a store of BlockStatuses +type blockStatusStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new BlockStatusStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockStatusStore { + return &blockStatusStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +// Stage stages the given blockStatus for the given blockHash +func (bss *blockStatusStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) { + stagingShard := bss.stagingShard(stagingArea) + stagingShard.toAdd[*blockHash] = blockStatus.Clone() +} + +func (bss *blockStatusStore) IsStaged(stagingArea *model.StagingArea) bool { + return bss.stagingShard(stagingArea).isStaged() +} + +// Get gets the blockStatus associated with the given blockHash +func (bss *blockStatusStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.BlockStatus, error) { + stagingShard := bss.stagingShard(stagingArea) + + if status, ok := stagingShard.toAdd[*blockHash]; ok { + return status, nil + } + + if status, ok := bss.cache.Get(blockHash); ok { + return status.(externalapi.BlockStatus), nil + } + + statusBytes, err := dbContext.Get(bss.hashAsKey(blockHash)) + if err != nil { + return 0, err + } + + status, err := bss.deserializeBlockStatus(statusBytes) + if err != nil { + return 0, err + } + bss.cache.Add(blockHash, status) + return status, nil +} + +// Exists returns true if the blockStatus for the given blockHash exists +func (bss *blockStatusStore) Exists(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + stagingShard := bss.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + return true, nil + } + + if bss.cache.Has(blockHash) { + return true, nil + } + + exists, err := dbContext.Has(bss.hashAsKey(blockHash)) + if err != nil { + return false, err + } + + return exists, nil +} + +func (bss *blockStatusStore) serializeBlockStatus(status externalapi.BlockStatus) ([]byte, error) { + dbBlockStatus := serialization.DomainBlockStatusToDbBlockStatus(status) + return proto.Marshal(dbBlockStatus) +} + +func (bss *blockStatusStore) deserializeBlockStatus(statusBytes []byte) (externalapi.BlockStatus, error) { + dbBlockStatus := &serialization.DbBlockStatus{} + err := proto.Unmarshal(statusBytes, dbBlockStatus) + if err != nil { + return 0, err + } + return serialization.DbBlockStatusToDomainBlockStatus(dbBlockStatus), nil +} + +func (bss *blockStatusStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return bss.bucket.Key(hash.ByteSlice()) +} diff --git a/domain/consensus/datastructures/blockstore/block_staging_shard.go b/domain/consensus/datastructures/blockstore/block_staging_shard.go new file mode 100644 index 0000000..ca0ad70 --- /dev/null +++ b/domain/consensus/datastructures/blockstore/block_staging_shard.go @@ -0,0 +1,69 @@ +package blockstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type blockStagingShard struct { + store *blockStore + toAdd map[externalapi.DomainHash]*externalapi.DomainBlock + toDelete map[externalapi.DomainHash]struct{} +} + +func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard { + return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard { + return &blockStagingShard{ + store: bs, + toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock), + toDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*blockStagingShard) +} + +func (bss *blockStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, block := range bss.toAdd { + blockBytes, err := bss.store.serializeBlock(block) + if err != nil { + return err + } + err = dbTx.Put(bss.store.hashAsKey(&hash), blockBytes) + if err != nil { + return err + } + bss.store.cache.Add(&hash, block) + } + + for hash := range bss.toDelete { + err := dbTx.Delete(bss.store.hashAsKey(&hash)) + if err != nil { + return err + } + bss.store.cache.Remove(&hash) + } + + err := bss.commitCount(dbTx) + if err != nil { + return err + } + + return nil +} + +func (bss *blockStagingShard) commitCount(dbTx model.DBTransaction) error { + count := bss.store.count(bss) + countBytes, err := bss.store.serializeBlockCount(count) + if err != nil { + return err + } + err = dbTx.Put(bss.store.countKey, countBytes) + if err != nil { + return err + } + bss.store.countCached = count + return nil +} + +func (bss *blockStagingShard) isStaged() bool { + return len(bss.toAdd) != 0 || len(bss.toDelete) != 0 +} diff --git a/domain/consensus/datastructures/blockstore/block_store.go b/domain/consensus/datastructures/blockstore/block_store.go new file mode 100644 index 0000000..f5a30f0 --- /dev/null +++ b/domain/consensus/datastructures/blockstore/block_store.go @@ -0,0 +1,239 @@ +package blockstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("blocks") + +// blockStore represents a store of blocks +type blockStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + countCached uint64 + bucket model.DBBucket + countKey model.DBKey +} + +// New instantiates a new BlockStore +func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockStore, error) { + blockStore := &blockStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + countKey: prefixBucket.Key([]byte("blocks-count")), + } + + err := blockStore.initializeCount(dbContext) + if err != nil { + return nil, err + } + + return blockStore, nil +} + +func (bs *blockStore) initializeCount(dbContext model.DBReader) error { + count := uint64(0) + hasCountBytes, err := dbContext.Has(bs.countKey) + if err != nil { + return err + } + if hasCountBytes { + countBytes, err := dbContext.Get(bs.countKey) + if err != nil { + return err + } + count, err = bs.deserializeBlockCount(countBytes) + if err != nil { + return err + } + } + bs.countCached = count + return nil +} + +// Stage stages the given block for the given blockHash +func (bs *blockStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) { + stagingShard := bs.stagingShard(stagingArea) + stagingShard.toAdd[*blockHash] = block.Clone() +} + +func (bs *blockStore) IsStaged(stagingArea *model.StagingArea) bool { + return bs.stagingShard(stagingArea).isStaged() +} + +// Block gets the block associated with the given blockHash +func (bs *blockStore) Block(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) { + stagingShard := bs.stagingShard(stagingArea) + + return bs.block(dbContext, stagingShard, blockHash) +} + +func (bs *blockStore) block(dbContext model.DBReader, stagingShard *blockStagingShard, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) { + if block, ok := stagingShard.toAdd[*blockHash]; ok { + return block.Clone(), nil + } + + if block, ok := bs.cache.Get(blockHash); ok { + return block.(*externalapi.DomainBlock).Clone(), nil + } + + blockBytes, err := dbContext.Get(bs.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + + block, err := bs.deserializeBlock(blockBytes) + if err != nil { + return nil, err + } + bs.cache.Add(blockHash, block) + return block.Clone(), nil +} + +// HasBlock returns whether a block with a given hash exists in the store. +func (bs *blockStore) HasBlock(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + stagingShard := bs.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + return true, nil + } + + if bs.cache.Has(blockHash) { + return true, nil + } + + exists, err := dbContext.Has(bs.hashAsKey(blockHash)) + if err != nil { + return false, err + } + + return exists, nil +} + +// Blocks gets the blocks associated with the given blockHashes +func (bs *blockStore) Blocks(dbContext model.DBReader, stagingArea *model.StagingArea, blockHashes []*externalapi.DomainHash) ([]*externalapi.DomainBlock, error) { + stagingShard := bs.stagingShard(stagingArea) + + blocks := make([]*externalapi.DomainBlock, len(blockHashes)) + for i, hash := range blockHashes { + var err error + blocks[i], err = bs.block(dbContext, stagingShard, hash) + if err != nil { + return nil, err + } + } + return blocks, nil +} + +// Delete deletes the block associated with the given blockHash +func (bs *blockStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := bs.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + delete(stagingShard.toAdd, *blockHash) + return + } + stagingShard.toDelete[*blockHash] = struct{}{} +} + +func (bs *blockStore) serializeBlock(block *externalapi.DomainBlock) ([]byte, error) { + dbBlock := serialization.DomainBlockToDbBlock(block) + return proto.Marshal(dbBlock) +} + +func (bs *blockStore) deserializeBlock(blockBytes []byte) (*externalapi.DomainBlock, error) { + dbBlock := &serialization.DbBlock{} + err := proto.Unmarshal(blockBytes, dbBlock) + if err != nil { + return nil, err + } + return serialization.DbBlockToDomainBlock(dbBlock) +} + +func (bs *blockStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return bs.bucket.Key(hash.ByteSlice()) +} + +func (bs *blockStore) Count(stagingArea *model.StagingArea) uint64 { + stagingShard := bs.stagingShard(stagingArea) + return bs.count(stagingShard) +} + +func (bs *blockStore) count(stagingShard *blockStagingShard) uint64 { + return bs.countCached + uint64(len(stagingShard.toAdd)) - uint64(len(stagingShard.toDelete)) +} + +func (bs *blockStore) deserializeBlockCount(countBytes []byte) (uint64, error) { + dbBlockCount := &serialization.DbBlockCount{} + err := proto.Unmarshal(countBytes, dbBlockCount) + if err != nil { + return 0, err + } + return dbBlockCount.Count, nil +} + +func (bs *blockStore) serializeBlockCount(count uint64) ([]byte, error) { + dbBlockCount := &serialization.DbBlockCount{Count: count} + return proto.Marshal(dbBlockCount) +} + +type allBlockHashesIterator struct { + cursor model.DBCursor + isClosed bool +} + +func (a allBlockHashesIterator) First() bool { + if a.isClosed { + panic("Tried using a closed AllBlockHashesIterator") + } + return a.cursor.First() +} + +func (a allBlockHashesIterator) Next() bool { + if a.isClosed { + panic("Tried using a closed AllBlockHashesIterator") + } + return a.cursor.Next() +} + +func (a allBlockHashesIterator) Get() (*externalapi.DomainHash, error) { + if a.isClosed { + return nil, errors.New("Tried using a closed AllBlockHashesIterator") + } + key, err := a.cursor.Key() + if err != nil { + return nil, err + } + + blockHashBytes := key.Suffix() + return externalapi.NewDomainHashFromByteSlice(blockHashBytes) +} + +func (a allBlockHashesIterator) Close() error { + if a.isClosed { + return errors.New("Tried using a closed AllBlockHashesIterator") + } + a.isClosed = true + err := a.cursor.Close() + if err != nil { + return err + } + a.cursor = nil + return nil +} + +func (bs *blockStore) AllBlockHashesIterator(dbContext model.DBReader) (model.BlockIterator, error) { + cursor, err := dbContext.Cursor(bs.bucket) + if err != nil { + return nil, err + } + + return &allBlockHashesIterator{cursor: cursor}, nil +} diff --git a/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_staging_shard.go b/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_staging_shard.go new file mode 100644 index 0000000..3b45804 --- /dev/null +++ b/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_staging_shard.go @@ -0,0 +1,44 @@ +package blockwindowheapslicestore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type shardKey struct { + hash externalapi.DomainHash + windowSize int +} + +type blockWindowHeapSliceStagingShard struct { + store *blockWindowHeapSliceStore + toAdd map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair +} + +func (bss *blockWindowHeapSliceStore) stagingShard(stagingArea *model.StagingArea) *blockWindowHeapSliceStagingShard { + return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard { + return &blockWindowHeapSliceStagingShard{ + store: bss, + toAdd: make(map[shardKey][]*externalapi.BlockGHOSTDAGDataHashPair), + } + }).(*blockWindowHeapSliceStagingShard) +} + +func (bsss *blockWindowHeapSliceStagingShard) Commit(_ model.DBTransaction) error { + for key, heapSlice := range bsss.toAdd { + bsss.store.cache.Add(&key.hash, key.windowSize, heapSlice) + } + + return nil +} + +func (bsss *blockWindowHeapSliceStagingShard) isStaged() bool { + return len(bsss.toAdd) != 0 +} + +func newShardKey(hash *externalapi.DomainHash, windowSize int) shardKey { + return shardKey{ + hash: *hash, + windowSize: windowSize, + } +} diff --git a/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_store.go b/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_store.go new file mode 100644 index 0000000..1c16103 --- /dev/null +++ b/domain/consensus/datastructures/blockwindowheapslicestore/block_window_heap_slice_store.go @@ -0,0 +1,47 @@ +package blockwindowheapslicestore + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/util/staging" +) + +type blockWindowHeapSliceStore struct { + shardID model.StagingShardID + cache *lrucachehashandwindowsizetoblockghostdagdatahashpairs.LRUCache +} + +// New instantiates a new WindowHeapSliceStore +func New(cacheSize int, preallocate bool) model.WindowHeapSliceStore { + return &blockWindowHeapSliceStore{ + shardID: staging.GenerateShardingID(), + cache: lrucachehashandwindowsizetoblockghostdagdatahashpairs.New(cacheSize, preallocate), + } +} + +// Stage stages the given blockStatus for the given blockHash +func (bss *blockWindowHeapSliceStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int, heapSlice []*externalapi.BlockGHOSTDAGDataHashPair) { + stagingShard := bss.stagingShard(stagingArea) + stagingShard.toAdd[newShardKey(blockHash, windowSize)] = heapSlice +} + +func (bss *blockWindowHeapSliceStore) IsStaged(stagingArea *model.StagingArea) bool { + return bss.stagingShard(stagingArea).isStaged() +} + +func (bss *blockWindowHeapSliceStore) Get(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) { + stagingShard := bss.stagingShard(stagingArea) + + if heapSlice, ok := stagingShard.toAdd[newShardKey(blockHash, windowSize)]; ok { + return heapSlice, nil + } + + if heapSlice, ok := bss.cache.Get(blockHash, windowSize); ok { + return heapSlice, nil + } + + return nil, errors.Wrap(database.ErrNotFound, "Window heap slice not found") +} diff --git a/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go b/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go new file mode 100644 index 0000000..d07f4f6 --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go @@ -0,0 +1,40 @@ +package consensusstatestore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type consensusStateStagingShard struct { + store *consensusStateStore + tipsStaging []*externalapi.DomainHash + virtualUTXODiffStaging externalapi.UTXODiff +} + +func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard { + return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard { + return &consensusStateStagingShard{ + store: bs, + tipsStaging: nil, + virtualUTXODiffStaging: nil, + } + }).(*consensusStateStagingShard) +} + +func (csss *consensusStateStagingShard) Commit(dbTx model.DBTransaction) error { + err := csss.commitTips(dbTx) + if err != nil { + return err + } + + err = csss.commitVirtualUTXODiff(dbTx) + if err != nil { + return err + } + + return nil +} + +func (csss *consensusStateStagingShard) isStaged() bool { + return csss.tipsStaging != nil || csss.virtualUTXODiffStaging != nil +} diff --git a/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go b/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go new file mode 100644 index 0000000..ea4fc69 --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go @@ -0,0 +1,35 @@ +package consensusstatestore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxolrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set") + +// consensusStateStore represents a store for the current consensus state +type consensusStateStore struct { + shardID model.StagingShardID + virtualUTXOSetCache *utxolrucache.LRUCache + tipsCache []*externalapi.DomainHash + tipsKey model.DBKey + utxoSetBucket model.DBBucket + importingPruningPointUTXOSetKey model.DBKey +} + +// New instantiates a new ConsensusStateStore +func New(prefixBucket model.DBBucket, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore { + return &consensusStateStore{ + shardID: staging.GenerateShardingID(), + virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate), + tipsKey: prefixBucket.Key(tipsKeyName), + importingPruningPointUTXOSetKey: prefixBucket.Key(importingPruningPointUTXOSetKeyName), + utxoSetBucket: prefixBucket.Bucket(utxoSetBucketName), + } +} + +func (css *consensusStateStore) IsStaged(stagingArea *model.StagingArea) bool { + return css.stagingShard(stagingArea).isStaged() +} diff --git a/domain/consensus/datastructures/consensusstatestore/tips.go b/domain/consensus/datastructures/consensusstatestore/tips.go new file mode 100644 index 0000000..230c13f --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/tips.go @@ -0,0 +1,75 @@ +package consensusstatestore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +var tipsKeyName = []byte("tips") + +func (css *consensusStateStore) Tips(stagingArea *model.StagingArea, dbContext model.DBReader) ([]*externalapi.DomainHash, error) { + stagingShard := css.stagingShard(stagingArea) + + if stagingShard.tipsStaging != nil { + return externalapi.CloneHashes(stagingShard.tipsStaging), nil + } + + if css.tipsCache != nil { + return externalapi.CloneHashes(css.tipsCache), nil + } + + tipsBytes, err := dbContext.Get(css.tipsKey) + if err != nil { + return nil, err + } + + tips, err := css.deserializeTips(tipsBytes) + if err != nil { + return nil, err + } + css.tipsCache = tips + return externalapi.CloneHashes(tips), nil +} + +func (css *consensusStateStore) StageTips(stagingArea *model.StagingArea, tipHashes []*externalapi.DomainHash) { + stagingShard := css.stagingShard(stagingArea) + + stagingShard.tipsStaging = externalapi.CloneHashes(tipHashes) +} + +func (css *consensusStateStore) serializeTips(tips []*externalapi.DomainHash) ([]byte, error) { + dbTips := serialization.TipsToDBTips(tips) + return proto.Marshal(dbTips) +} + +func (css *consensusStateStore) deserializeTips(tipsBytes []byte) ([]*externalapi.DomainHash, + error) { + + dbTips := &serialization.DbTips{} + err := proto.Unmarshal(tipsBytes, dbTips) + if err != nil { + return nil, err + } + + return serialization.DBTipsToTips(dbTips) +} + +func (csss *consensusStateStagingShard) commitTips(dbTx model.DBTransaction) error { + if csss.tipsStaging == nil { + return nil + } + + tipsBytes, err := csss.store.serializeTips(csss.tipsStaging) + if err != nil { + return err + } + err = dbTx.Put(csss.store.tipsKey, tipsBytes) + if err != nil { + return err + } + csss.store.tipsCache = csss.tipsStaging + + return nil +} diff --git a/domain/consensus/datastructures/consensusstatestore/utxo.go b/domain/consensus/datastructures/consensusstatestore/utxo.go new file mode 100644 index 0000000..e3502fc --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/utxo.go @@ -0,0 +1,269 @@ +package consensusstatestore + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +var utxoSetBucketName = []byte("virtual-utxo-set") + +func (css *consensusStateStore) utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) { + serializedOutpoint, err := serializeOutpoint(outpoint) + if err != nil { + return nil, err + } + + return css.utxoSetBucket.Key(serializedOutpoint), nil +} + +func (css *consensusStateStore) StageVirtualUTXODiff(stagingArea *model.StagingArea, virtualUTXODiff externalapi.UTXODiff) { + stagingShard := css.stagingShard(stagingArea) + + stagingShard.virtualUTXODiffStaging = virtualUTXODiff +} + +func (csss *consensusStateStagingShard) commitVirtualUTXODiff(dbTx model.DBTransaction) error { + if csss.virtualUTXODiffStaging == nil { + return nil + } + + toRemoveIterator := csss.virtualUTXODiffStaging.ToRemove().Iterator() + defer toRemoveIterator.Close() + for ok := toRemoveIterator.First(); ok; ok = toRemoveIterator.Next() { + toRemoveOutpoint, _, err := toRemoveIterator.Get() + if err != nil { + return err + } + + csss.store.virtualUTXOSetCache.Remove(toRemoveOutpoint) + + dbKey, err := csss.store.utxoKey(toRemoveOutpoint) + if err != nil { + return err + } + err = dbTx.Delete(dbKey) + if err != nil { + return err + } + } + + toAddIterator := csss.virtualUTXODiffStaging.ToAdd().Iterator() + defer toAddIterator.Close() + for ok := toAddIterator.First(); ok; ok = toAddIterator.Next() { + toAddOutpoint, toAddEntry, err := toAddIterator.Get() + if err != nil { + return err + } + + csss.store.virtualUTXOSetCache.Add(toAddOutpoint, toAddEntry) + + dbKey, err := csss.store.utxoKey(toAddOutpoint) + if err != nil { + return err + } + serializedEntry, err := serializeUTXOEntry(toAddEntry) + if err != nil { + return err + } + err = dbTx.Put(dbKey, serializedEntry) + if err != nil { + return err + } + } + + // Note: we don't discard the staging here since that's + // being done at the end of Commit() + return nil +} + +func (css *consensusStateStore) UTXOByOutpoint(dbContext model.DBReader, stagingArea *model.StagingArea, + outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error) { + + stagingShard := css.stagingShard(stagingArea) + + return css.utxoByOutpointFromStagedVirtualUTXODiff(dbContext, stagingShard, outpoint) +} + +func (css *consensusStateStore) utxoByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader, + stagingShard *consensusStateStagingShard, outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error) { + + if stagingShard.virtualUTXODiffStaging != nil { + if stagingShard.virtualUTXODiffStaging.ToRemove().Contains(outpoint) { + return nil, errors.Errorf("outpoint was not found") + } + if utxoEntry, ok := stagingShard.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok { + return utxoEntry, nil + } + } + + if entry, ok := css.virtualUTXOSetCache.Get(outpoint); ok { + return entry, nil + } + + key, err := css.utxoKey(outpoint) + if err != nil { + return nil, err + } + + serializedUTXOEntry, err := dbContext.Get(key) + if err != nil { + return nil, err + } + + entry, err := deserializeUTXOEntry(serializedUTXOEntry) + if err != nil { + return nil, err + } + + css.virtualUTXOSetCache.Add(outpoint, entry) + return entry, nil +} + +func (css *consensusStateStore) HasUTXOByOutpoint(dbContext model.DBReader, stagingArea *model.StagingArea, + outpoint *externalapi.DomainOutpoint) (bool, error) { + + stagingShard := css.stagingShard(stagingArea) + + return css.hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext, stagingShard, outpoint) +} + +func (css *consensusStateStore) hasUTXOByOutpointFromStagedVirtualUTXODiff(dbContext model.DBReader, + stagingShard *consensusStateStagingShard, outpoint *externalapi.DomainOutpoint) (bool, error) { + + if stagingShard.virtualUTXODiffStaging != nil { + if stagingShard.virtualUTXODiffStaging.ToRemove().Contains(outpoint) { + return false, nil + } + if _, ok := stagingShard.virtualUTXODiffStaging.ToAdd().Get(outpoint); ok { + return true, nil + } + } + + key, err := css.utxoKey(outpoint) + if err != nil { + return false, err + } + + return dbContext.Has(key) +} + +func (css *consensusStateStore) VirtualUTXOs(dbContext model.DBReader, fromOutpoint *externalapi.DomainOutpoint, limit int) ( + []*externalapi.OutpointAndUTXOEntryPair, error) { + + cursor, err := dbContext.Cursor(css.utxoSetBucket) + if err != nil { + return nil, err + } + defer cursor.Close() + + if fromOutpoint != nil { + serializedFromOutpoint, err := serializeOutpoint(fromOutpoint) + if err != nil { + return nil, err + } + seekKey := css.utxoSetBucket.Key(serializedFromOutpoint) + err = cursor.Seek(seekKey) + if err != nil { + return nil, err + } + } + + iterator := newCursorUTXOSetIterator(cursor) + defer iterator.Close() + + outpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, 0, limit) + for len(outpointAndUTXOEntryPairs) < limit && iterator.Next() { + outpoint, utxoEntry, err := iterator.Get() + if err != nil { + return nil, err + } + outpointAndUTXOEntryPairs = append(outpointAndUTXOEntryPairs, &externalapi.OutpointAndUTXOEntryPair{ + Outpoint: outpoint, + UTXOEntry: utxoEntry, + }) + } + return outpointAndUTXOEntryPairs, nil +} + +func (css *consensusStateStore) VirtualUTXOSetIterator(dbContext model.DBReader, stagingArea *model.StagingArea) ( + externalapi.ReadOnlyUTXOSetIterator, error) { + + stagingShard := css.stagingShard(stagingArea) + + cursor, err := dbContext.Cursor(css.utxoSetBucket) + if err != nil { + return nil, err + } + + mainIterator := newCursorUTXOSetIterator(cursor) + if stagingShard.virtualUTXODiffStaging != nil { + return utxo.IteratorWithDiff(mainIterator, stagingShard.virtualUTXODiffStaging) + } + + return mainIterator, nil +} + +type utxoSetIterator struct { + cursor model.DBCursor + isClosed bool +} + +func newCursorUTXOSetIterator(cursor model.DBCursor) externalapi.ReadOnlyUTXOSetIterator { + return &utxoSetIterator{cursor: cursor} +} + +func (u utxoSetIterator) First() bool { + if u.isClosed { + panic("Tried using a closed utxoSetIterator") + } + return u.cursor.First() +} + +func (u utxoSetIterator) Next() bool { + if u.isClosed { + panic("Tried using a closed utxoSetIterator") + } + return u.cursor.Next() +} + +func (u utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) { + if u.isClosed { + return nil, nil, errors.New("Tried using a closed utxoSetIterator") + } + key, err := u.cursor.Key() + if err != nil { + panic(err) + } + + utxoEntryBytes, err := u.cursor.Value() + if err != nil { + return nil, nil, err + } + + outpoint, err = deserializeOutpoint(key.Suffix()) + if err != nil { + return nil, nil, err + } + + utxoEntry, err = deserializeUTXOEntry(utxoEntryBytes) + if err != nil { + return nil, nil, err + } + + return outpoint, utxoEntry, nil +} + +func (u utxoSetIterator) Close() error { + if u.isClosed { + return errors.New("Tried using a closed utxoSetIterator") + } + u.isClosed = true + err := u.cursor.Close() + if err != nil { + return err + } + u.cursor = nil + return nil +} diff --git a/domain/consensus/datastructures/consensusstatestore/utxo_serialization.go b/domain/consensus/datastructures/consensusstatestore/utxo_serialization.go new file mode 100644 index 0000000..777b4f9 --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/utxo_serialization.go @@ -0,0 +1,34 @@ +package consensusstatestore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) { + return proto.Marshal(serialization.DomainOutpointToDbOutpoint(outpoint)) +} + +func serializeUTXOEntry(entry externalapi.UTXOEntry) ([]byte, error) { + return proto.Marshal(serialization.UTXOEntryToDBUTXOEntry(entry)) +} + +func deserializeOutpoint(outpointBytes []byte) (*externalapi.DomainOutpoint, error) { + dbOutpoint := &serialization.DbOutpoint{} + err := proto.Unmarshal(outpointBytes, dbOutpoint) + if err != nil { + return nil, err + } + + return serialization.DbOutpointToDomainOutpoint(dbOutpoint) +} + +func deserializeUTXOEntry(entryBytes []byte) (externalapi.UTXOEntry, error) { + dbEntry := &serialization.DbUtxoEntry{} + err := proto.Unmarshal(entryBytes, dbEntry) + if err != nil { + return nil, err + } + return serialization.DBUTXOEntryToUTXOEntry(dbEntry) +} diff --git a/domain/consensus/datastructures/consensusstatestore/virtual_utxo_set.go b/domain/consensus/datastructures/consensusstatestore/virtual_utxo_set.go new file mode 100644 index 0000000..a993b99 --- /dev/null +++ b/domain/consensus/datastructures/consensusstatestore/virtual_utxo_set.go @@ -0,0 +1,76 @@ +package consensusstatestore + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (css *consensusStateStore) StartImportingPruningPointUTXOSet(dbContext model.DBWriter) error { + return dbContext.Put(css.importingPruningPointUTXOSetKey, []byte{0}) +} + +func (css *consensusStateStore) HadStartedImportingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) { + return dbContext.Has(css.importingPruningPointUTXOSetKey) +} + +func (css *consensusStateStore) FinishImportingPruningPointUTXOSet(dbContext model.DBWriter) error { + return dbContext.Delete(css.importingPruningPointUTXOSetKey) +} + +func (css *consensusStateStore) ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbContext model.DBWriter, + pruningPointUTXOSetIterator externalapi.ReadOnlyUTXOSetIterator) error { + + hadStartedImportingPruningPointUTXOSet, err := css.HadStartedImportingPruningPointUTXOSet(dbContext) + if err != nil { + return err + } + if !hadStartedImportingPruningPointUTXOSet { + return errors.New("cannot import pruning point UTXO set " + + "without calling StartImportingPruningPointUTXOSet first") + } + + // Clear the cache + css.virtualUTXOSetCache.Clear() + + // Delete all the old UTXOs from the database + deleteCursor, err := dbContext.Cursor(css.utxoSetBucket) + if err != nil { + return err + } + defer deleteCursor.Close() + for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() { + key, err := deleteCursor.Key() + if err != nil { + return err + } + err = dbContext.Delete(key) + if err != nil { + return err + } + } + + // Insert all the new UTXOs into the database + for ok := pruningPointUTXOSetIterator.First(); ok; ok = pruningPointUTXOSetIterator.Next() { + outpoint, entry, err := pruningPointUTXOSetIterator.Get() + if err != nil { + return err + } + + key, err := css.utxoKey(outpoint) + if err != nil { + return err + } + serializedUTXOEntry, err := serializeUTXOEntry(entry) + if err != nil { + return err + } + + err = dbContext.Put(key, serializedUTXOEntry) + if err != nil { + return err + } + } + + return nil +} diff --git a/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go b/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go new file mode 100644 index 0000000..e05a830 --- /dev/null +++ b/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go @@ -0,0 +1,72 @@ +package daablocksstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type daaBlocksStagingShard struct { + store *daaBlocksStore + daaScoreToAdd map[externalapi.DomainHash]uint64 + daaAddedBlocksToAdd map[externalapi.DomainHash][]*externalapi.DomainHash + daaScoreToDelete map[externalapi.DomainHash]struct{} + daaAddedBlocksToDelete map[externalapi.DomainHash]struct{} +} + +func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard { + return stagingArea.GetOrCreateShard(daas.shardID, func() model.StagingShard { + return &daaBlocksStagingShard{ + store: daas, + daaScoreToAdd: make(map[externalapi.DomainHash]uint64), + daaAddedBlocksToAdd: make(map[externalapi.DomainHash][]*externalapi.DomainHash), + daaScoreToDelete: make(map[externalapi.DomainHash]struct{}), + daaAddedBlocksToDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*daaBlocksStagingShard) +} + +func (daass *daaBlocksStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, daaScore := range daass.daaScoreToAdd { + daaScoreBytes := binaryserialization.SerializeUint64(daaScore) + err := dbTx.Put(daass.store.daaScoreHashAsKey(&hash), daaScoreBytes) + if err != nil { + return err + } + daass.store.daaScoreLRUCache.Add(&hash, daaScore) + } + + for hash, addedBlocks := range daass.daaAddedBlocksToAdd { + addedBlocksBytes := binaryserialization.SerializeHashes(addedBlocks) + err := dbTx.Put(daass.store.daaAddedBlocksHashAsKey(&hash), addedBlocksBytes) + if err != nil { + return err + } + daass.store.daaAddedBlocksLRUCache.Add(&hash, addedBlocks) + } + + for hash := range daass.daaScoreToDelete { + err := dbTx.Delete(daass.store.daaScoreHashAsKey(&hash)) + if err != nil { + return err + } + daass.store.daaScoreLRUCache.Remove(&hash) + } + + for hash := range daass.daaAddedBlocksToDelete { + err := dbTx.Delete(daass.store.daaAddedBlocksHashAsKey(&hash)) + if err != nil { + return err + } + daass.store.daaAddedBlocksLRUCache.Remove(&hash) + } + + return nil +} + +func (daass *daaBlocksStagingShard) isStaged() bool { + return len(daass.daaScoreToAdd) != 0 || + len(daass.daaAddedBlocksToAdd) != 0 || + len(daass.daaScoreToDelete) != 0 || + len(daass.daaAddedBlocksToDelete) != 0 +} diff --git a/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go b/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go new file mode 100644 index 0000000..0e6d5db --- /dev/null +++ b/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go @@ -0,0 +1,120 @@ +package daablocksstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var daaScoreBucketName = []byte("daa-score") +var daaAddedBlocksBucketName = []byte("daa-added-blocks") + +// daaBlocksStore represents a store of DAABlocksStore +type daaBlocksStore struct { + shardID model.StagingShardID + daaScoreLRUCache *lrucache.LRUCache + daaAddedBlocksLRUCache *lrucache.LRUCache + daaScoreBucket model.DBBucket + daaAddedBlocksBucket model.DBBucket +} + +// New instantiates a new DAABlocksStore +func New(prefixBucket model.DBBucket, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore { + return &daaBlocksStore{ + shardID: staging.GenerateShardingID(), + daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate), + daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate), + daaScoreBucket: prefixBucket.Bucket(daaScoreBucketName), + daaAddedBlocksBucket: prefixBucket.Bucket(daaAddedBlocksBucketName), + } +} + +func (daas *daaBlocksStore) StageDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, daaScore uint64) { + stagingShard := daas.stagingShard(stagingArea) + + stagingShard.daaScoreToAdd[*blockHash] = daaScore +} + +func (daas *daaBlocksStore) StageBlockDAAAddedBlocks(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, addedBlocks []*externalapi.DomainHash) { + stagingShard := daas.stagingShard(stagingArea) + + stagingShard.daaAddedBlocksToAdd[*blockHash] = externalapi.CloneHashes(addedBlocks) +} + +func (daas *daaBlocksStore) IsStaged(stagingArea *model.StagingArea) bool { + return daas.stagingShard(stagingArea).isStaged() +} + +func (daas *daaBlocksStore) DAAScore(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint64, error) { + stagingShard := daas.stagingShard(stagingArea) + + if daaScore, ok := stagingShard.daaScoreToAdd[*blockHash]; ok { + return daaScore, nil + } + + if daaScore, ok := daas.daaScoreLRUCache.Get(blockHash); ok { + return daaScore.(uint64), nil + } + + daaScoreBytes, err := dbContext.Get(daas.daaScoreHashAsKey(blockHash)) + if err != nil { + return 0, err + } + + daaScore, err := binaryserialization.DeserializeUint64(daaScoreBytes) + if err != nil { + return 0, err + } + daas.daaScoreLRUCache.Add(blockHash, daaScore) + return daaScore, nil +} + +func (daas *daaBlocksStore) DAAAddedBlocks(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + stagingShard := daas.stagingShard(stagingArea) + + if addedBlocks, ok := stagingShard.daaAddedBlocksToAdd[*blockHash]; ok { + return externalapi.CloneHashes(addedBlocks), nil + } + + if addedBlocks, ok := daas.daaAddedBlocksLRUCache.Get(blockHash); ok { + return externalapi.CloneHashes(addedBlocks.([]*externalapi.DomainHash)), nil + } + + addedBlocksBytes, err := dbContext.Get(daas.daaAddedBlocksHashAsKey(blockHash)) + if err != nil { + return nil, err + } + + addedBlocks, err := binaryserialization.DeserializeHashes(addedBlocksBytes) + if err != nil { + return nil, err + } + daas.daaAddedBlocksLRUCache.Add(blockHash, addedBlocks) + return externalapi.CloneHashes(addedBlocks), nil +} + +func (daas *daaBlocksStore) daaScoreHashAsKey(hash *externalapi.DomainHash) model.DBKey { + return daas.daaScoreBucket.Key(hash.ByteSlice()) +} + +func (daas *daaBlocksStore) daaAddedBlocksHashAsKey(hash *externalapi.DomainHash) model.DBKey { + return daas.daaAddedBlocksBucket.Key(hash.ByteSlice()) +} + +func (daas *daaBlocksStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := daas.stagingShard(stagingArea) + + if _, ok := stagingShard.daaScoreToAdd[*blockHash]; ok { + delete(stagingShard.daaScoreToAdd, *blockHash) + } else { + stagingShard.daaAddedBlocksToDelete[*blockHash] = struct{}{} + } + + if _, ok := stagingShard.daaAddedBlocksToAdd[*blockHash]; ok { + delete(stagingShard.daaAddedBlocksToAdd, *blockHash) + } else { + stagingShard.daaAddedBlocksToDelete[*blockHash] = struct{}{} + } +} diff --git a/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go b/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go new file mode 100644 index 0000000..f392a2c --- /dev/null +++ b/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go @@ -0,0 +1,59 @@ +package daawindowstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type dbKey struct { + blockHash externalapi.DomainHash + index uint64 +} + +func newDBKey(blockHash *externalapi.DomainHash, index uint64) dbKey { + return dbKey{ + blockHash: *blockHash, + index: index, + } +} + +type daaWindowStagingShard struct { + store *daaWindowStore + toAdd map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair +} + +func (daaws *daaWindowStore) stagingShard(stagingArea *model.StagingArea) *daaWindowStagingShard { + return stagingArea.GetOrCreateShard(daaws.shardID, func() model.StagingShard { + return &daaWindowStagingShard{ + store: daaws, + toAdd: make(map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair), + } + }).(*daaWindowStagingShard) +} + +func (daawss *daaWindowStagingShard) Commit(dbTx model.DBTransaction) error { + for key, pair := range daawss.toAdd { + pairBytes, err := serializePair(pair) + if err != nil { + return err + } + + err = dbTx.Put(daawss.store.key(key), pairBytes) + if err != nil { + return err + } + daawss.store.cache.Add(&key.blockHash, key.index, pair) + } + + return nil +} + +func serializePair(pair *externalapi.BlockGHOSTDAGDataHashPair) ([]byte, error) { + return proto.Marshal(serialization.BlockGHOSTDAGDataHashPairToDbBlockGhostdagDataHashPair(pair)) +} + +func (daawss *daaWindowStagingShard) isStaged() bool { + return len(daawss.toAdd) == 0 +} diff --git a/domain/consensus/datastructures/daawindowstore/daa_window_store.go b/domain/consensus/datastructures/daawindowstore/daa_window_store.go new file mode 100644 index 0000000..089eaa8 --- /dev/null +++ b/domain/consensus/datastructures/daawindowstore/daa_window_store.go @@ -0,0 +1,96 @@ +package daawindowstore + +import ( + "encoding/binary" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("daa-window") + +type daaWindowStore struct { + shardID model.StagingShardID + cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache + bucket model.DBBucket +} + +// New instantiates a new BlocksWithTrustedDataDAAWindowStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore { + return &daaWindowStore{ + shardID: staging.GenerateShardingID(), + cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +func (daaws *daaWindowStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, index uint64, pair *externalapi.BlockGHOSTDAGDataHashPair) { + stagingShard := daaws.stagingShard(stagingArea) + + key := newDBKey(blockHash, index) + if _, ok := stagingShard.toAdd[key]; !ok { + stagingShard.toAdd[key] = pair + } + +} + +var errDAAWindowBlockNotFound = errors.Wrap(database.ErrNotFound, "DAA window block not found") + +func (daaws *daaWindowStore) DAAWindowBlock(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, index uint64) (*externalapi.BlockGHOSTDAGDataHashPair, error) { + stagingShard := daaws.stagingShard(stagingArea) + + dbKey := newDBKey(blockHash, index) + if pair, ok := stagingShard.toAdd[dbKey]; ok { + return pair, nil + } + + if pair, ok := daaws.cache.Get(blockHash, index); ok { + if pair == nil { + return nil, errDAAWindowBlockNotFound + } + + return pair, nil + } + + pairBytes, err := dbContext.Get(daaws.key(dbKey)) + if database.IsNotFoundError(err) { + daaws.cache.Add(blockHash, index, nil) + } + if err != nil { + return nil, err + } + + pair, err := deserializePairBytes(pairBytes) + if err != nil { + return nil, err + } + + daaws.cache.Add(blockHash, index, pair) + return pair, nil +} + +func deserializePairBytes(pairBytes []byte) (*externalapi.BlockGHOSTDAGDataHashPair, error) { + dbPair := &serialization.DbBlockGHOSTDAGDataHashPair{} + err := proto.Unmarshal(pairBytes, dbPair) + if err != nil { + return nil, err + } + + return serialization.DbBlockGHOSTDAGDataHashPairToBlockGHOSTDAGDataHashPair(dbPair) +} + +func (daaws *daaWindowStore) IsStaged(stagingArea *model.StagingArea) bool { + return daaws.stagingShard(stagingArea).isStaged() +} + +func (daaws *daaWindowStore) key(key dbKey) model.DBKey { + keyIndexBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(keyIndexBytes, key.index) + return daaws.bucket.Bucket(key.blockHash.ByteSlice()).Key(keyIndexBytes) +} diff --git a/domain/consensus/datastructures/finalitystore/finality_staging_shard.go b/domain/consensus/datastructures/finalitystore/finality_staging_shard.go new file mode 100644 index 0000000..1d851db --- /dev/null +++ b/domain/consensus/datastructures/finalitystore/finality_staging_shard.go @@ -0,0 +1,36 @@ +package finalitystore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type finalityStagingShard struct { + store *finalityStore + toAdd map[externalapi.DomainHash]*externalapi.DomainHash +} + +func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard { + return stagingArea.GetOrCreateShard(fs.shardID, func() model.StagingShard { + return &finalityStagingShard{ + store: fs, + toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash), + } + }).(*finalityStagingShard) +} + +func (fss *finalityStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, finalityPointHash := range fss.toAdd { + err := dbTx.Put(fss.store.hashAsKey(&hash), finalityPointHash.ByteSlice()) + if err != nil { + return err + } + fss.store.cache.Add(&hash, finalityPointHash) + } + + return nil +} + +func (fss *finalityStagingShard) isStaged() bool { + return len(fss.toAdd) == 0 +} diff --git a/domain/consensus/datastructures/finalitystore/finality_store.go b/domain/consensus/datastructures/finalitystore/finality_store.go new file mode 100644 index 0000000..4c3a6dd --- /dev/null +++ b/domain/consensus/datastructures/finalitystore/finality_store.go @@ -0,0 +1,63 @@ +package finalitystore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("finality-points") + +type finalityStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new FinalityStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.FinalityStore { + return &finalityStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +func (fs *finalityStore) StageFinalityPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash) { + stagingShard := fs.stagingShard(stagingArea) + + stagingShard.toAdd[*blockHash] = finalityPointHash +} + +func (fs *finalityStore) FinalityPoint(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + stagingShard := fs.stagingShard(stagingArea) + + if finalityPointHash, ok := stagingShard.toAdd[*blockHash]; ok { + return finalityPointHash, nil + } + + if finalityPointHash, ok := fs.cache.Get(blockHash); ok { + return finalityPointHash.(*externalapi.DomainHash), nil + } + + finalityPointHashBytes, err := dbContext.Get(fs.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + finalityPointHash, err := externalapi.NewDomainHashFromByteSlice(finalityPointHashBytes) + if err != nil { + return nil, err + } + + fs.cache.Add(blockHash, finalityPointHash) + return finalityPointHash, nil +} + +func (fs *finalityStore) IsStaged(stagingArea *model.StagingArea) bool { + return fs.stagingShard(stagingArea).isStaged() +} + +func (fs *finalityStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return fs.bucket.Key(hash.ByteSlice()) +} diff --git a/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go b/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go new file mode 100644 index 0000000..06e3f76 --- /dev/null +++ b/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go @@ -0,0 +1,52 @@ +package ghostdagdatastore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type key struct { + hash externalapi.DomainHash + isTrustedData bool +} + +func newKey(hash *externalapi.DomainHash, isTrustedData bool) key { + return key{ + hash: *hash, + isTrustedData: isTrustedData, + } +} + +type ghostdagDataStagingShard struct { + store *ghostdagDataStore + toAdd map[key]*externalapi.BlockGHOSTDAGData +} + +func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard { + return stagingArea.GetOrCreateShard(gds.shardID, func() model.StagingShard { + return &ghostdagDataStagingShard{ + store: gds, + toAdd: make(map[key]*externalapi.BlockGHOSTDAGData), + } + }).(*ghostdagDataStagingShard) +} + +func (gdss *ghostdagDataStagingShard) Commit(dbTx model.DBTransaction) error { + for key, blockGHOSTDAGData := range gdss.toAdd { + blockGhostdagDataBytes, err := gdss.store.serializeBlockGHOSTDAGData(blockGHOSTDAGData) + if err != nil { + return err + } + err = dbTx.Put(gdss.store.serializeKey(key), blockGhostdagDataBytes) + if err != nil { + return err + } + gdss.store.cache.Add(&key.hash, key.isTrustedData, blockGHOSTDAGData) + } + + return nil +} + +func (gdss *ghostdagDataStagingShard) isStaged() bool { + return len(gdss.toAdd) != 0 +} diff --git a/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go b/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go new file mode 100644 index 0000000..f05c08f --- /dev/null +++ b/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go @@ -0,0 +1,97 @@ +package ghostdagdatastore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucacheghostdagdata" + "github.com/spectre-project/spectred/util/staging" +) + +var ghostdagDataBucketName = []byte("block-ghostdag-data") +var trustedDataBucketName = []byte("block-with-trusted-data-ghostdag-data") + +// ghostdagDataStore represents a store of BlockGHOSTDAGData +type ghostdagDataStore struct { + shardID model.StagingShardID + cache *lrucacheghostdagdata.LRUCache + ghostdagDataBucket model.DBBucket + trustedDataBucket model.DBBucket +} + +// New instantiates a new GHOSTDAGDataStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.GHOSTDAGDataStore { + return &ghostdagDataStore{ + shardID: staging.GenerateShardingID(), + cache: lrucacheghostdagdata.New(cacheSize, preallocate), + ghostdagDataBucket: prefixBucket.Bucket(ghostdagDataBucketName), + trustedDataBucket: prefixBucket.Bucket(trustedDataBucketName), + } +} + +// Stage stages the given blockGHOSTDAGData for the given blockHash +func (gds *ghostdagDataStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + blockGHOSTDAGData *externalapi.BlockGHOSTDAGData, isTrustedData bool) { + + stagingShard := gds.stagingShard(stagingArea) + + stagingShard.toAdd[newKey(blockHash, isTrustedData)] = blockGHOSTDAGData +} + +func (gds *ghostdagDataStore) IsStaged(stagingArea *model.StagingArea) bool { + return gds.stagingShard(stagingArea).isStaged() +} + +// Get gets the blockGHOSTDAGData associated with the given blockHash +func (gds *ghostdagDataStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isTrustedData bool) (*externalapi.BlockGHOSTDAGData, error) { + stagingShard := gds.stagingShard(stagingArea) + + key := newKey(blockHash, isTrustedData) + if blockGHOSTDAGData, ok := stagingShard.toAdd[key]; ok { + return blockGHOSTDAGData, nil + } + + if blockGHOSTDAGData, ok := gds.cache.Get(blockHash, isTrustedData); ok { + return blockGHOSTDAGData, nil + } + + blockGHOSTDAGDataBytes, err := dbContext.Get(gds.serializeKey(key)) + if err != nil { + return nil, err + } + + blockGHOSTDAGData, err := gds.deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes) + if err != nil { + return nil, err + } + gds.cache.Add(blockHash, isTrustedData, blockGHOSTDAGData) + return blockGHOSTDAGData, nil +} + +func (gds *ghostdagDataStore) UnstageAll(stagingArea *model.StagingArea) { + stagingShard := gds.stagingShard(stagingArea) + + stagingShard.toAdd = make(map[key]*externalapi.BlockGHOSTDAGData) +} + +func (gds *ghostdagDataStore) serializeKey(k key) model.DBKey { + if k.isTrustedData { + return gds.trustedDataBucket.Key(k.hash.ByteSlice()) + } + return gds.ghostdagDataBucket.Key(k.hash.ByteSlice()) +} + +func (gds *ghostdagDataStore) serializeBlockGHOSTDAGData(blockGHOSTDAGData *externalapi.BlockGHOSTDAGData) ([]byte, error) { + return proto.Marshal(serialization.BlockGHOSTDAGDataToDBBlockGHOSTDAGData(blockGHOSTDAGData)) +} + +func (gds *ghostdagDataStore) deserializeBlockGHOSTDAGData(blockGHOSTDAGDataBytes []byte) (*externalapi.BlockGHOSTDAGData, error) { + dbBlockGHOSTDAGData := &serialization.DbBlockGhostdagData{} + err := proto.Unmarshal(blockGHOSTDAGDataBytes, dbBlockGHOSTDAGData) + if err != nil { + return nil, err + } + + return serialization.DBBlockGHOSTDAGDataToBlockGHOSTDAGData(dbBlockGHOSTDAGData) +} diff --git a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go new file mode 100644 index 0000000..a05d64f --- /dev/null +++ b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go @@ -0,0 +1,87 @@ +package headersselectedchainstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type headersSelectedChainStagingShard struct { + store *headersSelectedChainStore + addedByHash map[externalapi.DomainHash]uint64 + removedByHash map[externalapi.DomainHash]struct{} + addedByIndex map[uint64]*externalapi.DomainHash + removedByIndex map[uint64]struct{} +} + +func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard { + return stagingArea.GetOrCreateShard(hscs.shardID, func() model.StagingShard { + return &headersSelectedChainStagingShard{ + store: hscs, + addedByHash: make(map[externalapi.DomainHash]uint64), + removedByHash: make(map[externalapi.DomainHash]struct{}), + addedByIndex: make(map[uint64]*externalapi.DomainHash), + removedByIndex: make(map[uint64]struct{}), + } + }).(*headersSelectedChainStagingShard) +} + +func (hscss *headersSelectedChainStagingShard) Commit(dbTx model.DBTransaction) error { + if !hscss.isStaged() { + return nil + } + + for hash := range hscss.removedByHash { + hashCopy := hash + err := dbTx.Delete(hscss.store.hashAsKey(&hashCopy)) + if err != nil { + return err + } + hscss.store.cacheByHash.Remove(&hashCopy) + } + + for index := range hscss.removedByIndex { + err := dbTx.Delete(hscss.store.indexAsKey(index)) + if err != nil { + return err + } + hscss.store.cacheByIndex.Remove(index) + } + + highestIndex := uint64(0) + for hash, index := range hscss.addedByHash { + hashCopy := hash + err := dbTx.Put(hscss.store.hashAsKey(&hashCopy), hscss.store.serializeIndex(index)) + if err != nil { + return err + } + + err = dbTx.Put(hscss.store.indexAsKey(index), binaryserialization.SerializeHash(&hashCopy)) + if err != nil { + return err + } + + hscss.store.cacheByHash.Add(&hashCopy, index) + hscss.store.cacheByIndex.Add(index, &hashCopy) + + if index > highestIndex { + highestIndex = index + } + } + + err := dbTx.Put(hscss.store.highestChainBlockIndexKey, hscss.store.serializeIndex(highestIndex)) + if err != nil { + return err + } + + hscss.store.cacheHighestChainBlockIndex = highestIndex + + return nil +} + +func (hscss *headersSelectedChainStagingShard) isStaged() bool { + return len(hscss.addedByHash) != 0 || + len(hscss.removedByHash) != 0 || + len(hscss.addedByIndex) != 0 || + len(hscss.addedByIndex) != 0 +} diff --git a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go new file mode 100644 index 0000000..b0302f2 --- /dev/null +++ b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go @@ -0,0 +1,180 @@ +package headersselectedchainstore + +import ( + "encoding/binary" + + "github.com/spectre-project/spectred/util/staging" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucacheuint64tohash" +) + +var bucketChainBlockHashByIndexName = []byte("chain-block-hash-by-index") +var bucketChainBlockIndexByHashName = []byte("chain-block-index-by-hash") +var highestChainBlockIndexKeyName = []byte("highest-chain-block-index") + +type headersSelectedChainStore struct { + shardID model.StagingShardID + cacheByIndex *lrucacheuint64tohash.LRUCache + cacheByHash *lrucache.LRUCache + cacheHighestChainBlockIndex uint64 + bucketChainBlockHashByIndex model.DBBucket + bucketChainBlockIndexByHash model.DBBucket + highestChainBlockIndexKey model.DBKey +} + +// New instantiates a new HeadersSelectedChainStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.HeadersSelectedChainStore { + return &headersSelectedChainStore{ + shardID: staging.GenerateShardingID(), + cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate), + cacheByHash: lrucache.New(cacheSize, preallocate), + bucketChainBlockHashByIndex: prefixBucket.Bucket(bucketChainBlockHashByIndexName), + bucketChainBlockIndexByHash: prefixBucket.Bucket(bucketChainBlockIndexByHashName), + highestChainBlockIndexKey: prefixBucket.Key(highestChainBlockIndexKeyName), + } +} + +// Stage stages the given chain changes +func (hscs *headersSelectedChainStore) Stage(dbContext model.DBReader, stagingArea *model.StagingArea, chainChanges *externalapi.SelectedChainPath) error { + stagingShard := hscs.stagingShard(stagingArea) + + if hscs.IsStaged(stagingArea) { + return errors.Errorf("can't stage when there's already staged data") + } + + for _, blockHash := range chainChanges.Removed { + index, err := hscs.GetIndexByHash(dbContext, stagingArea, blockHash) + if err != nil { + return err + } + + stagingShard.removedByIndex[index] = struct{}{} + stagingShard.removedByHash[*blockHash] = struct{}{} + } + + currentIndex := uint64(0) + highestChainBlockIndex, exists, err := hscs.highestChainBlockIndex(dbContext) + if err != nil { + return err + } + + if exists { + currentIndex = highestChainBlockIndex - uint64(len(chainChanges.Removed)) + 1 + } + + for _, blockHash := range chainChanges.Added { + stagingShard.addedByIndex[currentIndex] = blockHash + stagingShard.addedByHash[*blockHash] = currentIndex + currentIndex++ + } + + return nil +} + +func (hscs *headersSelectedChainStore) IsStaged(stagingArea *model.StagingArea) bool { + return hscs.stagingShard(stagingArea).isStaged() +} + +// Get gets the chain block index for the given blockHash +func (hscs *headersSelectedChainStore) GetIndexByHash(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint64, error) { + stagingShard := hscs.stagingShard(stagingArea) + + if index, ok := stagingShard.addedByHash[*blockHash]; ok { + return index, nil + } + + if _, ok := stagingShard.removedByHash[*blockHash]; ok { + return 0, errors.Wrapf(database.ErrNotFound, "couldn't find block %s", blockHash) + } + + if index, ok := hscs.cacheByHash.Get(blockHash); ok { + return index.(uint64), nil + } + + indexBytes, err := dbContext.Get(hscs.hashAsKey(blockHash)) + if err != nil { + return 0, err + } + + index, err := hscs.deserializeIndex(indexBytes) + if err != nil { + return 0, err + } + + hscs.cacheByHash.Add(blockHash, index) + return index, nil +} + +func (hscs *headersSelectedChainStore) GetHashByIndex(dbContext model.DBReader, stagingArea *model.StagingArea, index uint64) (*externalapi.DomainHash, error) { + stagingShard := hscs.stagingShard(stagingArea) + + if blockHash, ok := stagingShard.addedByIndex[index]; ok { + return blockHash, nil + } + + if _, ok := stagingShard.removedByIndex[index]; ok { + return nil, errors.Wrapf(database.ErrNotFound, "couldn't find chain block with index %d", index) + } + + if blockHash, ok := hscs.cacheByIndex.Get(index); ok { + return blockHash, nil + } + + hashBytes, err := dbContext.Get(hscs.indexAsKey(index)) + if err != nil { + return nil, err + } + + blockHash, err := binaryserialization.DeserializeHash(hashBytes) + if err != nil { + return nil, err + } + hscs.cacheByIndex.Add(index, blockHash) + return blockHash, nil +} + +func (hscs *headersSelectedChainStore) serializeIndex(index uint64) []byte { + return binaryserialization.SerializeUint64(index) +} + +func (hscs *headersSelectedChainStore) deserializeIndex(indexBytes []byte) (uint64, error) { + return binaryserialization.DeserializeUint64(indexBytes) +} + +func (hscs *headersSelectedChainStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return hscs.bucketChainBlockIndexByHash.Key(hash.ByteSlice()) +} + +func (hscs *headersSelectedChainStore) indexAsKey(index uint64) model.DBKey { + var keyBytes [8]byte + binary.BigEndian.PutUint64(keyBytes[:], index) + return hscs.bucketChainBlockHashByIndex.Key(keyBytes[:]) +} + +func (hscs *headersSelectedChainStore) highestChainBlockIndex(dbContext model.DBReader) (uint64, bool, error) { + if hscs.cacheHighestChainBlockIndex != 0 { + return hscs.cacheHighestChainBlockIndex, true, nil + } + + indexBytes, err := dbContext.Get(hscs.highestChainBlockIndexKey) + if err != nil { + if errors.Is(err, database.ErrNotFound) { + return 0, false, nil + } + return 0, false, err + } + + index, err := hscs.deserializeIndex(indexBytes) + if err != nil { + return 0, false, err + } + + hscs.cacheHighestChainBlockIndex = index + return index, true, nil +} diff --git a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go new file mode 100644 index 0000000..fd7370d --- /dev/null +++ b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go @@ -0,0 +1,42 @@ +package headersselectedtipstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type headersSelectedTipStagingShard struct { + store *headerSelectedTipStore + newSelectedTip *externalapi.DomainHash +} + +func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard { + return stagingArea.GetOrCreateShard(hsts.shardID, func() model.StagingShard { + return &headersSelectedTipStagingShard{ + store: hsts, + newSelectedTip: nil, + } + }).(*headersSelectedTipStagingShard) +} + +func (hstss *headersSelectedTipStagingShard) Commit(dbTx model.DBTransaction) error { + if hstss.newSelectedTip == nil { + return nil + } + + selectedTipBytes, err := hstss.store.serializeHeadersSelectedTip(hstss.newSelectedTip) + if err != nil { + return err + } + err = dbTx.Put(hstss.store.key, selectedTipBytes) + if err != nil { + return err + } + hstss.store.cache = hstss.newSelectedTip + + return nil +} + +func (hstss *headersSelectedTipStagingShard) isStaged() bool { + return hstss.newSelectedTip != nil +} diff --git a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go new file mode 100644 index 0000000..6284643 --- /dev/null +++ b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go @@ -0,0 +1,88 @@ +package headersselectedtipstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/staging" +) + +var keyName = []byte("headers-selected-tip") + +type headerSelectedTipStore struct { + shardID model.StagingShardID + cache *externalapi.DomainHash + key model.DBKey +} + +// New instantiates a new HeaderSelectedTipStore +func New(prefixBucket model.DBBucket) model.HeaderSelectedTipStore { + return &headerSelectedTipStore{ + shardID: staging.GenerateShardingID(), + key: prefixBucket.Key(keyName), + } +} + +func (hsts *headerSelectedTipStore) Has(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) { + stagingShard := hsts.stagingShard(stagingArea) + + if stagingShard.newSelectedTip != nil { + return true, nil + } + + if hsts.cache != nil { + return true, nil + } + + return dbContext.Has(hsts.key) +} + +func (hsts *headerSelectedTipStore) Stage(stagingArea *model.StagingArea, selectedTip *externalapi.DomainHash) { + stagingShard := hsts.stagingShard(stagingArea) + stagingShard.newSelectedTip = selectedTip +} + +func (hsts *headerSelectedTipStore) IsStaged(stagingArea *model.StagingArea) bool { + return hsts.stagingShard(stagingArea).isStaged() +} + +func (hsts *headerSelectedTipStore) HeadersSelectedTip(dbContext model.DBReader, stagingArea *model.StagingArea) ( + *externalapi.DomainHash, error) { + + stagingShard := hsts.stagingShard(stagingArea) + + if stagingShard.newSelectedTip != nil { + return stagingShard.newSelectedTip, nil + } + + if hsts.cache != nil { + return hsts.cache, nil + } + + selectedTipBytes, err := dbContext.Get(hsts.key) + if err != nil { + return nil, err + } + + selectedTip, err := hsts.deserializeHeadersSelectedTip(selectedTipBytes) + if err != nil { + return nil, err + } + hsts.cache = selectedTip + return hsts.cache, nil +} + +func (hsts *headerSelectedTipStore) serializeHeadersSelectedTip(selectedTip *externalapi.DomainHash) ([]byte, error) { + return proto.Marshal(serialization.DomainHashToDbHash(selectedTip)) +} + +func (hsts *headerSelectedTipStore) deserializeHeadersSelectedTip(selectedTipBytes []byte) (*externalapi.DomainHash, error) { + dbHash := &serialization.DbHash{} + err := proto.Unmarshal(selectedTipBytes, dbHash) + if err != nil { + return nil, err + } + + return serialization.DbHashToDomainHash(dbHash) +} diff --git a/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_staging_shard.go b/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_staging_shard.go new file mode 100644 index 0000000..92dd0d0 --- /dev/null +++ b/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_staging_shard.go @@ -0,0 +1,36 @@ +package mergedepthrootstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type mergeDepthRootStagingShard struct { + store *mergeDepthRootStore + toAdd map[externalapi.DomainHash]*externalapi.DomainHash +} + +func (mdrs *mergeDepthRootStore) stagingShard(stagingArea *model.StagingArea) *mergeDepthRootStagingShard { + return stagingArea.GetOrCreateShard(mdrs.shardID, func() model.StagingShard { + return &mergeDepthRootStagingShard{ + store: mdrs, + toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash), + } + }).(*mergeDepthRootStagingShard) +} + +func (mdrss *mergeDepthRootStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, mergeDepthRoot := range mdrss.toAdd { + err := dbTx.Put(mdrss.store.hashAsKey(&hash), mergeDepthRoot.ByteSlice()) + if err != nil { + return err + } + mdrss.store.cache.Add(&hash, mergeDepthRoot) + } + + return nil +} + +func (mdrss *mergeDepthRootStagingShard) isStaged() bool { + return len(mdrss.toAdd) == 0 +} diff --git a/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_store.go b/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_store.go new file mode 100644 index 0000000..d8020a2 --- /dev/null +++ b/domain/consensus/datastructures/mergedepthrootstore/merge_depth_root_store.go @@ -0,0 +1,63 @@ +package mergedepthrootstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("merge-depth-roots") + +type mergeDepthRootStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new MergeDepthRootStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.MergeDepthRootStore { + return &mergeDepthRootStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +func (mdrs *mergeDepthRootStore) StageMergeDepthRoot(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, root *externalapi.DomainHash) { + stagingShard := mdrs.stagingShard(stagingArea) + + stagingShard.toAdd[*blockHash] = root +} + +func (mdrs *mergeDepthRootStore) MergeDepthRoot(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + stagingShard := mdrs.stagingShard(stagingArea) + + if root, ok := stagingShard.toAdd[*blockHash]; ok { + return root, nil + } + + if root, ok := mdrs.cache.Get(blockHash); ok { + return root.(*externalapi.DomainHash), nil + } + + rootBytes, err := dbContext.Get(mdrs.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + root, err := externalapi.NewDomainHashFromByteSlice(rootBytes) + if err != nil { + return nil, err + } + + mdrs.cache.Add(blockHash, root) + return root, nil +} + +func (mdrs *mergeDepthRootStore) IsStaged(stagingArea *model.StagingArea) bool { + return mdrs.stagingShard(stagingArea).isStaged() +} + +func (mdrs *mergeDepthRootStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return mdrs.bucket.Key(hash.ByteSlice()) +} diff --git a/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go b/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go new file mode 100644 index 0000000..afd4f72 --- /dev/null +++ b/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go @@ -0,0 +1,50 @@ +package multisetstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type multisetStagingShard struct { + store *multisetStore + toAdd map[externalapi.DomainHash]model.Multiset + toDelete map[externalapi.DomainHash]struct{} +} + +func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard { + return stagingArea.GetOrCreateShard(ms.shardID, func() model.StagingShard { + return &multisetStagingShard{ + store: ms, + toAdd: make(map[externalapi.DomainHash]model.Multiset), + toDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*multisetStagingShard) +} + +func (mss *multisetStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, multiset := range mss.toAdd { + multisetBytes, err := mss.store.serializeMultiset(multiset) + if err != nil { + return err + } + err = dbTx.Put(mss.store.hashAsKey(&hash), multisetBytes) + if err != nil { + return err + } + mss.store.cache.Add(&hash, multiset) + } + + for hash := range mss.toDelete { + err := dbTx.Delete(mss.store.hashAsKey(&hash)) + if err != nil { + return err + } + mss.store.cache.Remove(&hash) + } + + return nil +} + +func (mss *multisetStagingShard) isStaged() bool { + return len(mss.toAdd) != 0 || len(mss.toDelete) != 0 +} diff --git a/domain/consensus/datastructures/multisetstore/multiset_store.go b/domain/consensus/datastructures/multisetstore/multiset_store.go new file mode 100644 index 0000000..8cd48e9 --- /dev/null +++ b/domain/consensus/datastructures/multisetstore/multiset_store.go @@ -0,0 +1,93 @@ +package multisetstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var bucketName = []byte("multisets") + +// multisetStore represents a store of Multisets +type multisetStore struct { + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket +} + +// New instantiates a new MultisetStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.MultisetStore { + return &multisetStore{ + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), + } +} + +// Stage stages the given multiset for the given blockHash +func (ms *multisetStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, multiset model.Multiset) { + stagingShard := ms.stagingShard(stagingArea) + + stagingShard.toAdd[*blockHash] = multiset.Clone() +} + +func (ms *multisetStore) IsStaged(stagingArea *model.StagingArea) bool { + return ms.stagingShard(stagingArea).isStaged() +} + +// Get gets the multiset associated with the given blockHash +func (ms *multisetStore) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.Multiset, error) { + stagingShard := ms.stagingShard(stagingArea) + + if multiset, ok := stagingShard.toAdd[*blockHash]; ok { + return multiset.Clone(), nil + } + + if multiset, ok := ms.cache.Get(blockHash); ok { + return multiset.(model.Multiset).Clone(), nil + } + + multisetBytes, err := dbContext.Get(ms.hashAsKey(blockHash)) + if err != nil { + return nil, err + } + + multiset, err := ms.deserializeMultiset(multisetBytes) + if err != nil { + return nil, err + } + ms.cache.Add(blockHash, multiset) + return multiset.Clone(), nil +} + +// Delete deletes the multiset associated with the given blockHash +func (ms *multisetStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := ms.stagingShard(stagingArea) + + if _, ok := stagingShard.toAdd[*blockHash]; ok { + delete(stagingShard.toAdd, *blockHash) + return + } + stagingShard.toDelete[*blockHash] = struct{}{} +} + +func (ms *multisetStore) hashAsKey(hash *externalapi.DomainHash) model.DBKey { + return ms.bucket.Key(hash.ByteSlice()) +} + +func (ms *multisetStore) serializeMultiset(multiset model.Multiset) ([]byte, error) { + return proto.Marshal(serialization.MultisetToDBMultiset(multiset)) +} + +func (ms *multisetStore) deserializeMultiset(multisetBytes []byte) (model.Multiset, error) { + dbMultiset := &serialization.DbMultiset{} + err := proto.Unmarshal(multisetBytes, dbMultiset) + if err != nil { + return nil, err + } + + return serialization.DBMultisetToMultiset(dbMultiset) +} diff --git a/domain/consensus/datastructures/pruningstore/imported_pruning_point.go b/domain/consensus/datastructures/pruningstore/imported_pruning_point.go new file mode 100644 index 0000000..b211e84 --- /dev/null +++ b/domain/consensus/datastructures/pruningstore/imported_pruning_point.go @@ -0,0 +1,239 @@ +package pruningstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +var importedPruningPointUTXOsBucketName = []byte("imported-pruning-point-utxos") +var importedPruningPointMultisetKeyName = []byte("imported-pruning-point-multiset") + +func (ps *pruningStore) ClearImportedPruningPointUTXOs(dbContext model.DBWriter) error { + cursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket) + if err != nil { + return err + } + defer cursor.Close() + + for ok := cursor.First(); ok; ok = cursor.Next() { + key, err := cursor.Key() + if err != nil { + return err + } + err = dbContext.Delete(key) + if err != nil { + return err + } + } + return nil +} + +func (ps *pruningStore) AppendImportedPruningPointUTXOs(dbTx model.DBTransaction, + outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error { + + for _, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs { + key, err := ps.importedPruningPointUTXOKey(outpointAndUTXOEntryPair.Outpoint) + if err != nil { + return err + } + serializedUTXOEntry, err := serializeUTXOEntry(outpointAndUTXOEntryPair.UTXOEntry) + if err != nil { + return err + } + err = dbTx.Put(key, serializedUTXOEntry) + if err != nil { + return err + } + } + + return nil +} + +func (ps *pruningStore) ImportedPruningPointUTXOIterator(dbContext model.DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) { + cursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket) + if err != nil { + return nil, err + } + return ps.newCursorUTXOSetIterator(cursor), nil +} + +type utxoSetIterator struct { + cursor model.DBCursor + isClosed bool +} + +func (ps *pruningStore) newCursorUTXOSetIterator(cursor model.DBCursor) externalapi.ReadOnlyUTXOSetIterator { + return &utxoSetIterator{cursor: cursor} +} + +func (u *utxoSetIterator) First() bool { + if u.isClosed { + panic("Tried using a closed utxoSetIterator") + } + return u.cursor.First() +} + +func (u *utxoSetIterator) Next() bool { + if u.isClosed { + panic("Tried using a closed utxoSetIterator") + } + return u.cursor.Next() +} + +func (u *utxoSetIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) { + if u.isClosed { + return nil, nil, errors.New("Tried using a closed utxoSetIterator") + } + key, err := u.cursor.Key() + if err != nil { + panic(err) + } + + utxoEntryBytes, err := u.cursor.Value() + if err != nil { + return nil, nil, err + } + + outpoint, err = deserializeOutpoint(key.Suffix()) + if err != nil { + return nil, nil, err + } + + utxoEntry, err = deserializeUTXOEntry(utxoEntryBytes) + if err != nil { + return nil, nil, err + } + + return outpoint, utxoEntry, nil +} + +func (u *utxoSetIterator) Close() error { + if u.isClosed { + return errors.New("Tried using a closed utxoSetIterator") + } + u.isClosed = true + err := u.cursor.Close() + if err != nil { + return err + } + u.cursor = nil + return nil +} + +func (ps *pruningStore) importedPruningPointUTXOKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) { + serializedOutpoint, err := serializeOutpoint(outpoint) + if err != nil { + return nil, err + } + + return ps.importedPruningPointUTXOsBucket.Key(serializedOutpoint), nil +} + +func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) { + return proto.Marshal(serialization.DomainOutpointToDbOutpoint(outpoint)) +} + +func serializeUTXOEntry(entry externalapi.UTXOEntry) ([]byte, error) { + return proto.Marshal(serialization.UTXOEntryToDBUTXOEntry(entry)) +} + +func deserializeOutpoint(outpointBytes []byte) (*externalapi.DomainOutpoint, error) { + dbOutpoint := &serialization.DbOutpoint{} + err := proto.Unmarshal(outpointBytes, dbOutpoint) + if err != nil { + return nil, err + } + + return serialization.DbOutpointToDomainOutpoint(dbOutpoint) +} + +func deserializeUTXOEntry(entryBytes []byte) (externalapi.UTXOEntry, error) { + dbEntry := &serialization.DbUtxoEntry{} + err := proto.Unmarshal(entryBytes, dbEntry) + if err != nil { + return nil, err + } + return serialization.DBUTXOEntryToUTXOEntry(dbEntry) +} + +func (ps *pruningStore) ClearImportedPruningPointMultiset(dbContext model.DBWriter) error { + return dbContext.Delete(ps.importedPruningPointMultisetKey) +} + +func (ps *pruningStore) ImportedPruningPointMultiset(dbContext model.DBReader) (model.Multiset, error) { + multisetBytes, err := dbContext.Get(ps.importedPruningPointMultisetKey) + if err != nil { + return nil, err + } + return ps.deserializeMultiset(multisetBytes) +} + +func (ps *pruningStore) UpdateImportedPruningPointMultiset(dbTx model.DBTransaction, multiset model.Multiset) error { + multisetBytes, err := ps.serializeMultiset(multiset) + if err != nil { + return err + } + return dbTx.Put(ps.importedPruningPointMultisetKey, multisetBytes) +} + +func (ps *pruningStore) serializeMultiset(multiset model.Multiset) ([]byte, error) { + return proto.Marshal(serialization.MultisetToDBMultiset(multiset)) +} + +func (ps *pruningStore) deserializeMultiset(multisetBytes []byte) (model.Multiset, error) { + dbMultiset := &serialization.DbMultiset{} + err := proto.Unmarshal(multisetBytes, dbMultiset) + if err != nil { + return nil, err + } + + return serialization.DBMultisetToMultiset(dbMultiset) +} + +func (ps *pruningStore) CommitImportedPruningPointUTXOSet(dbContext model.DBWriter) error { + // Delete all the old UTXOs from the database + deleteCursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket) + if err != nil { + return err + } + defer deleteCursor.Close() + for ok := deleteCursor.First(); ok; ok = deleteCursor.Next() { + key, err := deleteCursor.Key() + if err != nil { + return err + } + err = dbContext.Delete(key) + if err != nil { + return err + } + } + + // Insert all the new UTXOs into the database + insertCursor, err := dbContext.Cursor(ps.importedPruningPointUTXOsBucket) + if err != nil { + return err + } + defer insertCursor.Close() + for ok := insertCursor.First(); ok; ok = insertCursor.Next() { + importedPruningPointUTXOSetKey, err := insertCursor.Key() + if err != nil { + return err + } + pruningPointUTXOSetKey := ps.pruningPointUTXOSetBucket.Key(importedPruningPointUTXOSetKey.Suffix()) + + serializedUTXOEntry, err := insertCursor.Value() + if err != nil { + return err + } + + err = dbContext.Put(pruningPointUTXOSetKey, serializedUTXOEntry) + if err != nil { + return err + } + } + + return nil +} diff --git a/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go b/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go new file mode 100644 index 0000000..a3a71dd --- /dev/null +++ b/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go @@ -0,0 +1,81 @@ +package pruningstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type pruningStagingShard struct { + store *pruningStore + + pruningPointByIndex map[uint64]*externalapi.DomainHash + currentPruningPointIndex *uint64 + newPruningPointCandidate *externalapi.DomainHash + startUpdatingPruningPointUTXOSet bool +} + +func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard { + return stagingArea.GetOrCreateShard(ps.shardID, func() model.StagingShard { + return &pruningStagingShard{ + store: ps, + pruningPointByIndex: map[uint64]*externalapi.DomainHash{}, + newPruningPointCandidate: nil, + startUpdatingPruningPointUTXOSet: false, + } + }).(*pruningStagingShard) +} + +func (mss *pruningStagingShard) Commit(dbTx model.DBTransaction) error { + for index, hash := range mss.pruningPointByIndex { + hashCopy := hash + hashBytes, err := mss.store.serializeHash(hash) + if err != nil { + return err + } + err = dbTx.Put(mss.store.indexAsKey(index), hashBytes) + if err != nil { + return err + } + mss.store.pruningPointByIndexCache.Add(index, hashCopy) + } + + if mss.currentPruningPointIndex != nil { + indexBytes := mss.store.serializeIndex(*mss.currentPruningPointIndex) + err := dbTx.Put(mss.store.currentPruningPointIndexKey, indexBytes) + if err != nil { + return err + } + + if mss.store.currentPruningPointIndexCache == nil { + var zero uint64 + mss.store.currentPruningPointIndexCache = &zero + } + + *mss.store.currentPruningPointIndexCache = *mss.currentPruningPointIndex + } + + if mss.newPruningPointCandidate != nil { + candidateBytes, err := mss.store.serializeHash(mss.newPruningPointCandidate) + if err != nil { + return err + } + err = dbTx.Put(mss.store.candidatePruningPointHashKey, candidateBytes) + if err != nil { + return err + } + mss.store.pruningPointCandidateCache = mss.newPruningPointCandidate + } + + if mss.startUpdatingPruningPointUTXOSet { + err := dbTx.Put(mss.store.updatingPruningPointUTXOSetKey, []byte{0}) + if err != nil { + return err + } + } + + return nil +} + +func (mss *pruningStagingShard) isStaged() bool { + return len(mss.pruningPointByIndex) > 0 || mss.newPruningPointCandidate != nil || mss.startUpdatingPruningPointUTXOSet +} diff --git a/domain/consensus/datastructures/pruningstore/pruning_store.go b/domain/consensus/datastructures/pruningstore/pruning_store.go new file mode 100644 index 0000000..5869a63 --- /dev/null +++ b/domain/consensus/datastructures/pruningstore/pruning_store.go @@ -0,0 +1,357 @@ +package pruningstore + +import ( + "encoding/binary" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucacheuint64tohash" + "github.com/spectre-project/spectred/util/staging" +) + +var currentPruningPointIndexKeyName = []byte("pruning-block-index") +var candidatePruningPointHashKeyName = []byte("candidate-pruning-point-hash") +var pruningPointUTXOSetBucketName = []byte("pruning-point-utxo-set") +var updatingPruningPointUTXOSetKeyName = []byte("updating-pruning-point-utxo-set") +var pruningPointByIndexBucketName = []byte("pruning-point-by-index") + +// pruningStore represents a store for the current pruning state +type pruningStore struct { + shardID model.StagingShardID + pruningPointByIndexCache *lrucacheuint64tohash.LRUCache + currentPruningPointIndexCache *uint64 + pruningPointCandidateCache *externalapi.DomainHash + + currentPruningPointIndexKey model.DBKey + candidatePruningPointHashKey model.DBKey + pruningPointUTXOSetBucket model.DBBucket + updatingPruningPointUTXOSetKey model.DBKey + importedPruningPointUTXOsBucket model.DBBucket + importedPruningPointMultisetKey model.DBKey + pruningPointByIndexBucket model.DBBucket +} + +// New instantiates a new PruningStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.PruningStore { + return &pruningStore{ + shardID: staging.GenerateShardingID(), + pruningPointByIndexCache: lrucacheuint64tohash.New(cacheSize, preallocate), + currentPruningPointIndexKey: prefixBucket.Key(currentPruningPointIndexKeyName), + candidatePruningPointHashKey: prefixBucket.Key(candidatePruningPointHashKeyName), + pruningPointUTXOSetBucket: prefixBucket.Bucket(pruningPointUTXOSetBucketName), + importedPruningPointUTXOsBucket: prefixBucket.Bucket(importedPruningPointUTXOsBucketName), + updatingPruningPointUTXOSetKey: prefixBucket.Key(updatingPruningPointUTXOSetKeyName), + importedPruningPointMultisetKey: prefixBucket.Key(importedPruningPointMultisetKeyName), + pruningPointByIndexBucket: prefixBucket.Bucket(pruningPointByIndexBucketName), + } +} + +func (ps *pruningStore) StagePruningPointCandidate(stagingArea *model.StagingArea, candidate *externalapi.DomainHash) { + stagingShard := ps.stagingShard(stagingArea) + + stagingShard.newPruningPointCandidate = candidate +} + +func (ps *pruningStore) PruningPointCandidate(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + stagingShard := ps.stagingShard(stagingArea) + + if stagingShard.newPruningPointCandidate != nil { + return stagingShard.newPruningPointCandidate, nil + } + + if ps.pruningPointCandidateCache != nil { + return ps.pruningPointCandidateCache, nil + } + + candidateBytes, err := dbContext.Get(ps.candidatePruningPointHashKey) + if err != nil { + return nil, err + } + + candidate, err := ps.deserializePruningPoint(candidateBytes) + if err != nil { + return nil, err + } + ps.pruningPointCandidateCache = candidate + return candidate, nil +} + +func (ps *pruningStore) HasPruningPointCandidate(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) { + stagingShard := ps.stagingShard(stagingArea) + + if stagingShard.newPruningPointCandidate != nil { + return true, nil + } + + if ps.pruningPointCandidateCache != nil { + return true, nil + } + + return dbContext.Has(ps.candidatePruningPointHashKey) +} + +// StagePruningPoint stages the pruning state +func (ps *pruningStore) StagePruningPoint(dbContext model.DBWriter, stagingArea *model.StagingArea, pruningPointBlockHash *externalapi.DomainHash) error { + newPruningPointIndex := uint64(0) + pruningPointIndex, err := ps.CurrentPruningPointIndex(dbContext, stagingArea) + if database.IsNotFoundError(err) { + newPruningPointIndex = 0 + } else if err != nil { + return err + } else { + newPruningPointIndex = pruningPointIndex + 1 + } + + err = ps.StagePruningPointByIndex(dbContext, stagingArea, pruningPointBlockHash, newPruningPointIndex) + if err != nil { + return err + } + + return nil +} + +func (ps *pruningStore) IsStaged(stagingArea *model.StagingArea) bool { + return ps.stagingShard(stagingArea).isStaged() +} + +func (ps *pruningStore) UpdatePruningPointUTXOSet(dbContext model.DBWriter, diff externalapi.UTXODiff) error { + toRemoveIterator := diff.ToRemove().Iterator() + defer toRemoveIterator.Close() + for ok := toRemoveIterator.First(); ok; ok = toRemoveIterator.Next() { + toRemoveOutpoint, _, err := toRemoveIterator.Get() + if err != nil { + return err + } + serializedOutpoint, err := serializeOutpoint(toRemoveOutpoint) + if err != nil { + return err + } + err = dbContext.Delete(ps.pruningPointUTXOSetBucket.Key(serializedOutpoint)) + if err != nil { + return err + } + } + + toAddIterator := diff.ToAdd().Iterator() + defer toAddIterator.Close() + for ok := toAddIterator.First(); ok; ok = toAddIterator.Next() { + toAddOutpoint, entry, err := toAddIterator.Get() + if err != nil { + return err + } + serializedOutpoint, err := serializeOutpoint(toAddOutpoint) + if err != nil { + return err + } + serializedUTXOEntry, err := serializeUTXOEntry(entry) + if err != nil { + return err + } + err = dbContext.Put(ps.pruningPointUTXOSetBucket.Key(serializedOutpoint), serializedUTXOEntry) + if err != nil { + return err + } + } + return nil +} + +// PruningPoint gets the current pruning point +func (ps *pruningStore) PruningPoint(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + pruningPointIndex, err := ps.CurrentPruningPointIndex(dbContext, stagingArea) + if err != nil { + return nil, err + } + + return ps.PruningPointByIndex(dbContext, stagingArea, pruningPointIndex) +} + +func (ps *pruningStore) PruningPointByIndex(dbContext model.DBReader, stagingArea *model.StagingArea, index uint64) (*externalapi.DomainHash, error) { + stagingShard := ps.stagingShard(stagingArea) + + if hash, exists := stagingShard.pruningPointByIndex[index]; exists { + return hash, nil + } + + if hash, exists := ps.pruningPointByIndexCache.Get(index); exists { + return hash, nil + } + + pruningPointBytes, err := dbContext.Get(ps.indexAsKey(index)) + if err != nil { + return nil, err + } + + pruningPoint, err := ps.deserializePruningPoint(pruningPointBytes) + if err != nil { + return nil, err + } + ps.pruningPointByIndexCache.Add(index, pruningPoint) + return pruningPoint, nil +} + +func (ps *pruningStore) serializeHash(hash *externalapi.DomainHash) ([]byte, error) { + return proto.Marshal(serialization.DomainHashToDbHash(hash)) +} + +func (ps *pruningStore) deserializePruningPoint(pruningPointBytes []byte) (*externalapi.DomainHash, error) { + dbHash := &serialization.DbHash{} + err := proto.Unmarshal(pruningPointBytes, dbHash) + if err != nil { + return nil, err + } + + return serialization.DbHashToDomainHash(dbHash) +} + +func (ps *pruningStore) deserializeIndex(indexBytes []byte) (uint64, error) { + return binaryserialization.DeserializeUint64(indexBytes) +} + +func (ps *pruningStore) serializeIndex(index uint64) []byte { + return binaryserialization.SerializeUint64(index) +} + +func (ps *pruningStore) HasPruningPoint(dbContext model.DBReader, stagingArea *model.StagingArea) (bool, error) { + stagingShard := ps.stagingShard(stagingArea) + + if stagingShard.currentPruningPointIndex != nil { + return true, nil + } + + if ps.currentPruningPointIndexCache != nil { + return true, nil + } + + return dbContext.Has(ps.currentPruningPointIndexKey) +} + +func (ps *pruningStore) PruningPointUTXOIterator(dbContext model.DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) { + cursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket) + if err != nil { + return nil, err + } + return ps.newCursorUTXOSetIterator(cursor), nil +} + +func (ps *pruningStore) PruningPointUTXOs(dbContext model.DBReader, + fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) { + + cursor, err := dbContext.Cursor(ps.pruningPointUTXOSetBucket) + if err != nil { + return nil, err + } + defer cursor.Close() + + if fromOutpoint != nil { + serializedFromOutpoint, err := serializeOutpoint(fromOutpoint) + if err != nil { + return nil, err + } + seekKey := ps.pruningPointUTXOSetBucket.Key(serializedFromOutpoint) + err = cursor.Seek(seekKey) + if err != nil { + return nil, err + } + } + + pruningPointUTXOIterator := ps.newCursorUTXOSetIterator(cursor) + defer pruningPointUTXOIterator.Close() + + outpointAndUTXOEntryPairs := make([]*externalapi.OutpointAndUTXOEntryPair, 0, limit) + for len(outpointAndUTXOEntryPairs) < limit && pruningPointUTXOIterator.Next() { + outpoint, utxoEntry, err := pruningPointUTXOIterator.Get() + if err != nil { + return nil, err + } + outpointAndUTXOEntryPairs = append(outpointAndUTXOEntryPairs, &externalapi.OutpointAndUTXOEntryPair{ + Outpoint: outpoint, + UTXOEntry: utxoEntry, + }) + } + return outpointAndUTXOEntryPairs, nil +} + +func (ps *pruningStore) StageStartUpdatingPruningPointUTXOSet(stagingArea *model.StagingArea) { + stagingShard := ps.stagingShard(stagingArea) + + stagingShard.startUpdatingPruningPointUTXOSet = true +} + +func (ps *pruningStore) HadStartedUpdatingPruningPointUTXOSet(dbContext model.DBWriter) (bool, error) { + return dbContext.Has(ps.updatingPruningPointUTXOSetKey) +} + +func (ps *pruningStore) FinishUpdatingPruningPointUTXOSet(dbContext model.DBWriter) error { + return dbContext.Delete(ps.updatingPruningPointUTXOSetKey) +} + +func (ps *pruningStore) indexAsKey(index uint64) model.DBKey { + var keyBytes [8]byte + binary.BigEndian.PutUint64(keyBytes[:], index) + return ps.pruningPointByIndexBucket.Key(keyBytes[:]) +} + +func (ps *pruningStore) StagePruningPointByIndex(dbContext model.DBReader, stagingArea *model.StagingArea, + pruningPointBlockHash *externalapi.DomainHash, index uint64) error { + + stagingShard := ps.stagingShard(stagingArea) + _, exists := stagingShard.pruningPointByIndex[index] + if exists { + return errors.Errorf("%s is already staged for pruning point with index %d", stagingShard.pruningPointByIndex[index], index) + } + + stagingShard.pruningPointByIndex[index] = pruningPointBlockHash + + pruningPointIndex, err := ps.CurrentPruningPointIndex(dbContext, stagingArea) + isNotFoundError := database.IsNotFoundError(err) + if !isNotFoundError && err != nil { + delete(stagingShard.pruningPointByIndex, index) + return err + } + + if stagingShard.currentPruningPointIndex == nil { + var zero uint64 + stagingShard.currentPruningPointIndex = &zero + } + + if isNotFoundError || index > pruningPointIndex { + *stagingShard.currentPruningPointIndex = index + } + + return nil +} + +func (ps *pruningStore) CurrentPruningPointIndex(dbContext model.DBReader, stagingArea *model.StagingArea) (uint64, error) { + stagingShard := ps.stagingShard(stagingArea) + + if stagingShard.currentPruningPointIndex != nil { + return *stagingShard.currentPruningPointIndex, nil + } + + if ps.currentPruningPointIndexCache != nil { + return *ps.currentPruningPointIndexCache, nil + } + + pruningPointIndexBytes, err := dbContext.Get(ps.currentPruningPointIndexKey) + if err != nil { + return 0, err + } + + index, err := ps.deserializeIndex(pruningPointIndexBytes) + if err != nil { + return 0, err + } + + if ps.currentPruningPointIndexCache == nil { + var zero uint64 + ps.currentPruningPointIndexCache = &zero + } + + *ps.currentPruningPointIndexCache = index + return index, nil +} diff --git a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go new file mode 100644 index 0000000..17192d3 --- /dev/null +++ b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go @@ -0,0 +1,53 @@ +package reachabilitydatastore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type reachabilityDataStagingShard struct { + store *reachabilityDataStore + reachabilityData map[externalapi.DomainHash]model.ReachabilityData + reachabilityReindexRoot *externalapi.DomainHash +} + +func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard { + return stagingArea.GetOrCreateShard(rds.shardID, func() model.StagingShard { + return &reachabilityDataStagingShard{ + store: rds, + reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData), + reachabilityReindexRoot: nil, + } + }).(*reachabilityDataStagingShard) +} + +func (rdss *reachabilityDataStagingShard) Commit(dbTx model.DBTransaction) error { + if rdss.reachabilityReindexRoot != nil { + reachabilityReindexRootBytes, err := rdss.store.serializeReachabilityReindexRoot(rdss.reachabilityReindexRoot) + if err != nil { + return err + } + err = dbTx.Put(rdss.store.reachabilityReindexRootKey, reachabilityReindexRootBytes) + if err != nil { + return err + } + rdss.store.reachabilityReindexRootCache = rdss.reachabilityReindexRoot + } + for hash, reachabilityData := range rdss.reachabilityData { + reachabilityDataBytes, err := rdss.store.serializeReachabilityData(reachabilityData) + if err != nil { + return err + } + err = dbTx.Put(rdss.store.reachabilityDataBlockHashAsKey(&hash), reachabilityDataBytes) + if err != nil { + return err + } + rdss.store.reachabilityDataCache.Add(&hash, reachabilityData) + } + + return nil +} + +func (rdss *reachabilityDataStagingShard) isStaged() bool { + return len(rdss.reachabilityData) != 0 || rdss.reachabilityReindexRoot != nil +} diff --git a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go new file mode 100644 index 0000000..0209412 --- /dev/null +++ b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go @@ -0,0 +1,176 @@ +package reachabilitydatastore + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/util/staging" +) + +var reachabilityDataBucketName = []byte("reachability-data") +var reachabilityReindexRootKeyName = []byte("reachability-reindex-root") + +// reachabilityDataStore represents a store of ReachabilityData +type reachabilityDataStore struct { + shardID model.StagingShardID + reachabilityDataCache *lrucache.LRUCache + reachabilityReindexRootCache *externalapi.DomainHash + + reachabilityDataBucket model.DBBucket + reachabilityReindexRootKey model.DBKey +} + +// New instantiates a new ReachabilityDataStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.ReachabilityDataStore { + return &reachabilityDataStore{ + shardID: staging.GenerateShardingID(), + reachabilityDataCache: lrucache.New(cacheSize, preallocate), + reachabilityDataBucket: prefixBucket.Bucket(reachabilityDataBucketName), + reachabilityReindexRootKey: prefixBucket.Key(reachabilityReindexRootKeyName), + } +} + +// StageReachabilityData stages the given reachabilityData for the given blockHash +func (rds *reachabilityDataStore) StageReachabilityData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, reachabilityData model.ReachabilityData) { + stagingShard := rds.stagingShard(stagingArea) + + stagingShard.reachabilityData[*blockHash] = reachabilityData +} + +func (rds *reachabilityDataStore) Delete(dbContext model.DBWriter) error { + cursor, err := dbContext.Cursor(rds.reachabilityDataBucket) + if err != nil { + return err + } + + for ok := cursor.First(); ok; ok = cursor.Next() { + key, err := cursor.Key() + if err != nil { + return err + } + + err = dbContext.Delete(key) + if err != nil { + return err + } + } + + return dbContext.Delete(rds.reachabilityReindexRootKey) +} + +// StageReachabilityReindexRoot stages the given reachabilityReindexRoot +func (rds *reachabilityDataStore) StageReachabilityReindexRoot(stagingArea *model.StagingArea, reachabilityReindexRoot *externalapi.DomainHash) { + stagingShard := rds.stagingShard(stagingArea) + + stagingShard.reachabilityReindexRoot = reachabilityReindexRoot +} + +func (rds *reachabilityDataStore) IsStaged(stagingArea *model.StagingArea) bool { + return rds.stagingShard(stagingArea).isStaged() +} + +var errNotFound = errors.Wrap(database.ErrNotFound, "reachability data not found") + +// ReachabilityData returns the reachabilityData associated with the given blockHash +func (rds *reachabilityDataStore) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) { + stagingShard := rds.stagingShard(stagingArea) + + if reachabilityData, ok := stagingShard.reachabilityData[*blockHash]; ok { + return reachabilityData, nil + } + + if reachabilityData, ok := rds.reachabilityDataCache.Get(blockHash); ok { + if reachabilityData == nil { + return nil, errNotFound + } + return reachabilityData.(model.ReachabilityData), nil + } + + reachabilityDataBytes, err := dbContext.Get(rds.reachabilityDataBlockHashAsKey(blockHash)) + if database.IsNotFoundError(err) { + rds.reachabilityDataCache.Add(blockHash, nil) + } + if err != nil { + return nil, err + } + + reachabilityData, err := rds.deserializeReachabilityData(reachabilityDataBytes) + if err != nil { + return nil, err + } + rds.reachabilityDataCache.Add(blockHash, reachabilityData) + return reachabilityData, nil +} + +func (rds *reachabilityDataStore) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + _, err := rds.ReachabilityData(dbContext, stagingArea, blockHash) + if database.IsNotFoundError(err) { + return false, nil + } + if err != nil { + return false, err + } + + return true, nil +} + +// ReachabilityReindexRoot returns the current reachability reindex root +func (rds *reachabilityDataStore) ReachabilityReindexRoot(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + stagingShard := rds.stagingShard(stagingArea) + + if stagingShard.reachabilityReindexRoot != nil { + return stagingShard.reachabilityReindexRoot, nil + } + + if rds.reachabilityReindexRootCache != nil { + return rds.reachabilityReindexRootCache, nil + } + + reachabilityReindexRootBytes, err := dbContext.Get(rds.reachabilityReindexRootKey) + if err != nil { + return nil, err + } + + reachabilityReindexRoot, err := rds.deserializeReachabilityReindexRoot(reachabilityReindexRootBytes) + if err != nil { + return nil, err + } + rds.reachabilityReindexRootCache = reachabilityReindexRoot + return reachabilityReindexRoot, nil +} + +func (rds *reachabilityDataStore) reachabilityDataBlockHashAsKey(hash *externalapi.DomainHash) model.DBKey { + return rds.reachabilityDataBucket.Key(hash.ByteSlice()) +} + +func (rds *reachabilityDataStore) serializeReachabilityData(reachabilityData model.ReachabilityData) ([]byte, error) { + return proto.Marshal(serialization.ReachablityDataToDBReachablityData(reachabilityData)) +} + +func (rds *reachabilityDataStore) deserializeReachabilityData(reachabilityDataBytes []byte) (model.ReachabilityData, error) { + dbReachabilityData := &serialization.DbReachabilityData{} + err := proto.Unmarshal(reachabilityDataBytes, dbReachabilityData) + if err != nil { + return nil, err + } + + return serialization.DBReachablityDataToReachablityData(dbReachabilityData) +} + +func (rds *reachabilityDataStore) serializeReachabilityReindexRoot(reachabilityReindexRoot *externalapi.DomainHash) ([]byte, error) { + return proto.Marshal(serialization.DomainHashToDbHash(reachabilityReindexRoot)) +} + +func (rds *reachabilityDataStore) deserializeReachabilityReindexRoot(reachabilityReindexRootBytes []byte) (*externalapi.DomainHash, error) { + dbHash := &serialization.DbHash{} + err := proto.Unmarshal(reachabilityReindexRootBytes, dbHash) + if err != nil { + return nil, err + } + + return serialization.DbHashToDomainHash(dbHash) +} diff --git a/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go b/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go new file mode 100644 index 0000000..4b1b928 --- /dev/null +++ b/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go @@ -0,0 +1,74 @@ +package utxodiffstore + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type utxoDiffStagingShard struct { + store *utxoDiffStore + utxoDiffToAdd map[externalapi.DomainHash]externalapi.UTXODiff + utxoDiffChildToAdd map[externalapi.DomainHash]*externalapi.DomainHash + toDelete map[externalapi.DomainHash]struct{} +} + +func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard { + return stagingArea.GetOrCreateShard(uds.shardID, func() model.StagingShard { + return &utxoDiffStagingShard{ + store: uds, + utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff), + utxoDiffChildToAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash), + toDelete: make(map[externalapi.DomainHash]struct{}), + } + }).(*utxoDiffStagingShard) +} + +func (udss *utxoDiffStagingShard) Commit(dbTx model.DBTransaction) error { + for hash, utxoDiff := range udss.utxoDiffToAdd { + utxoDiffBytes, err := udss.store.serializeUTXODiff(utxoDiff) + if err != nil { + return err + } + err = dbTx.Put(udss.store.utxoDiffHashAsKey(&hash), utxoDiffBytes) + if err != nil { + return err + } + udss.store.utxoDiffCache.Add(&hash, utxoDiff) + } + + for hash, utxoDiffChild := range udss.utxoDiffChildToAdd { + if utxoDiffChild == nil { + continue + } + + utxoDiffChildBytes, err := udss.store.serializeUTXODiffChild(utxoDiffChild) + if err != nil { + return err + } + err = dbTx.Put(udss.store.utxoDiffChildHashAsKey(&hash), utxoDiffChildBytes) + if err != nil { + return err + } + udss.store.utxoDiffChildCache.Add(&hash, utxoDiffChild) + } + + for hash := range udss.toDelete { + err := dbTx.Delete(udss.store.utxoDiffHashAsKey(&hash)) + if err != nil { + return err + } + udss.store.utxoDiffCache.Remove(&hash) + + err = dbTx.Delete(udss.store.utxoDiffChildHashAsKey(&hash)) + if err != nil { + return err + } + udss.store.utxoDiffChildCache.Remove(&hash) + } + + return nil +} + +func (udss *utxoDiffStagingShard) isStaged() bool { + return len(udss.utxoDiffToAdd) != 0 || len(udss.utxoDiffChildToAdd) != 0 || len(udss.toDelete) != 0 +} diff --git a/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go b/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go new file mode 100644 index 0000000..f9447bd --- /dev/null +++ b/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go @@ -0,0 +1,190 @@ +package utxodiffstore + +import ( + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/lrucache" + "github.com/spectre-project/spectred/util/staging" +) + +var utxoDiffBucketName = []byte("utxo-diffs") +var utxoDiffChildBucketName = []byte("utxo-diff-children") + +// utxoDiffStore represents a store of UTXODiffs +type utxoDiffStore struct { + shardID model.StagingShardID + utxoDiffCache *lrucache.LRUCache + utxoDiffChildCache *lrucache.LRUCache + utxoDiffBucket model.DBBucket + utxoDiffChildBucket model.DBBucket +} + +// New instantiates a new UTXODiffStore +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.UTXODiffStore { + return &utxoDiffStore{ + shardID: staging.GenerateShardingID(), + utxoDiffCache: lrucache.New(cacheSize, preallocate), + utxoDiffChildCache: lrucache.New(cacheSize, preallocate), + utxoDiffBucket: prefixBucket.Bucket(utxoDiffBucketName), + utxoDiffChildBucket: prefixBucket.Bucket(utxoDiffChildBucketName), + } +} + +// Stage stages the given utxoDiff for the given blockHash +func (uds *utxoDiffStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) { + + stagingShard := uds.stagingShard(stagingArea) + + stagingShard.utxoDiffToAdd[*blockHash] = utxoDiff + + if utxoDiffChild != nil { + stagingShard.utxoDiffChildToAdd[*blockHash] = utxoDiffChild + } +} + +func (uds *utxoDiffStore) IsStaged(stagingArea *model.StagingArea) bool { + return uds.stagingShard(stagingArea).isStaged() +} + +func (uds *utxoDiffStore) isBlockHashStaged(stagingShard *utxoDiffStagingShard, blockHash *externalapi.DomainHash) bool { + if _, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok { + return true + } + _, ok := stagingShard.utxoDiffChildToAdd[*blockHash] + return ok +} + +// UTXODiff gets the utxoDiff associated with the given blockHash +func (uds *utxoDiffStore) UTXODiff(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, error) { + stagingShard := uds.stagingShard(stagingArea) + + if utxoDiff, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok { + return utxoDiff, nil + } + + if utxoDiff, ok := uds.utxoDiffCache.Get(blockHash); ok { + return utxoDiff.(externalapi.UTXODiff), nil + } + + utxoDiffBytes, err := dbContext.Get(uds.utxoDiffHashAsKey(blockHash)) + if err != nil { + return nil, err + } + + utxoDiff, err := uds.deserializeUTXODiff(utxoDiffBytes) + if err != nil { + return nil, err + } + uds.utxoDiffCache.Add(blockHash, utxoDiff) + return utxoDiff, nil +} + +// UTXODiffChild gets the utxoDiff child associated with the given blockHash +func (uds *utxoDiffStore) UTXODiffChild(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + stagingShard := uds.stagingShard(stagingArea) + + if utxoDiffChild, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok { + return utxoDiffChild, nil + } + + if utxoDiffChild, ok := uds.utxoDiffChildCache.Get(blockHash); ok { + return utxoDiffChild.(*externalapi.DomainHash), nil + } + + utxoDiffChildBytes, err := dbContext.Get(uds.utxoDiffChildHashAsKey(blockHash)) + if err != nil { + return nil, err + } + + utxoDiffChild, err := uds.deserializeUTXODiffChild(utxoDiffChildBytes) + if err != nil { + return nil, err + } + uds.utxoDiffChildCache.Add(blockHash, utxoDiffChild) + return utxoDiffChild, nil +} + +// HasUTXODiffChild returns true if the given blockHash has a UTXODiffChild +func (uds *utxoDiffStore) HasUTXODiffChild(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + stagingShard := uds.stagingShard(stagingArea) + + if _, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok { + return true, nil + } + + if uds.utxoDiffChildCache.Has(blockHash) { + return true, nil + } + + return dbContext.Has(uds.utxoDiffChildHashAsKey(blockHash)) +} + +// Delete deletes the utxoDiff associated with the given blockHash +func (uds *utxoDiffStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + stagingShard := uds.stagingShard(stagingArea) + + if uds.isBlockHashStaged(stagingShard, blockHash) { + if _, ok := stagingShard.utxoDiffToAdd[*blockHash]; ok { + delete(stagingShard.utxoDiffToAdd, *blockHash) + } + if _, ok := stagingShard.utxoDiffChildToAdd[*blockHash]; ok { + delete(stagingShard.utxoDiffChildToAdd, *blockHash) + } + return + } + stagingShard.toDelete[*blockHash] = struct{}{} +} + +func (uds *utxoDiffStore) utxoDiffHashAsKey(hash *externalapi.DomainHash) model.DBKey { + return uds.utxoDiffBucket.Key(hash.ByteSlice()) +} + +func (uds *utxoDiffStore) utxoDiffChildHashAsKey(hash *externalapi.DomainHash) model.DBKey { + return uds.utxoDiffChildBucket.Key(hash.ByteSlice()) +} + +func (uds *utxoDiffStore) serializeUTXODiff(utxoDiff externalapi.UTXODiff) ([]byte, error) { + dbUtxoDiff, err := serialization.UTXODiffToDBUTXODiff(utxoDiff) + if err != nil { + return nil, err + } + + bytes, err := proto.Marshal(dbUtxoDiff) + if err != nil { + return nil, errors.WithStack(err) + } + + return bytes, nil +} + +func (uds *utxoDiffStore) deserializeUTXODiff(utxoDiffBytes []byte) (externalapi.UTXODiff, error) { + dbUTXODiff := &serialization.DbUtxoDiff{} + err := proto.Unmarshal(utxoDiffBytes, dbUTXODiff) + if err != nil { + return nil, errors.WithStack(err) + } + + return serialization.DBUTXODiffToUTXODiff(dbUTXODiff) +} + +func (uds *utxoDiffStore) serializeUTXODiffChild(utxoDiffChild *externalapi.DomainHash) ([]byte, error) { + bytes, err := proto.Marshal(serialization.DomainHashToDbHash(utxoDiffChild)) + if err != nil { + return nil, errors.WithStack(err) + } + return bytes, nil +} + +func (uds *utxoDiffStore) deserializeUTXODiffChild(utxoDiffChildBytes []byte) (*externalapi.DomainHash, error) { + dbHash := &serialization.DbHash{} + err := proto.Unmarshal(utxoDiffChildBytes, dbHash) + if err != nil { + return nil, errors.WithStack(err) + } + + return serialization.DbHashToDomainHash(dbHash) +} diff --git a/domain/consensus/factory.go b/domain/consensus/factory.go new file mode 100644 index 0000000..9e34367 --- /dev/null +++ b/domain/consensus/factory.go @@ -0,0 +1,742 @@ +package consensus + +import ( + "io/ioutil" + "os" + "sync" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockwindowheapslicestore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/daawindowstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/mergedepthrootstore" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/processes/blockparentbuilder" + parentssanager "github.com/spectre-project/spectred/domain/consensus/processes/parentsmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/pruningproofmanager" + "github.com/spectre-project/spectred/util/staging" + + "github.com/spectre-project/spectred/domain/prefixmanager/prefix" + "github.com/spectre-project/spectred/util/txmass" + + consensusdatabase "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/datastructures/acceptancedatastore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockheaderstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockrelationstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockstatusstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/consensusstatestore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/daablocksstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/finalitystore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/ghostdagdatastore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/headersselectedchainstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/headersselectedtipstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/multisetstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/pruningstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/reachabilitydatastore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/utxodiffstore" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/processes/blockbuilder" + "github.com/spectre-project/spectred/domain/consensus/processes/blockprocessor" + "github.com/spectre-project/spectred/domain/consensus/processes/blockvalidator" + "github.com/spectre-project/spectred/domain/consensus/processes/coinbasemanager" + "github.com/spectre-project/spectred/domain/consensus/processes/consensusstatemanager" + "github.com/spectre-project/spectred/domain/consensus/processes/dagtopologymanager" + "github.com/spectre-project/spectred/domain/consensus/processes/dagtraversalmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/difficultymanager" + "github.com/spectre-project/spectred/domain/consensus/processes/finalitymanager" + "github.com/spectre-project/spectred/domain/consensus/processes/ghostdagmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/headersselectedtipmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/mergedepthmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/pastmediantimemanager" + "github.com/spectre-project/spectred/domain/consensus/processes/pruningmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/reachabilitymanager" + "github.com/spectre-project/spectred/domain/consensus/processes/syncmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/transactionvalidator" + "github.com/spectre-project/spectred/domain/dagconfig" + infrastructuredatabase "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" +) + +const ( + defaultTestLeveldbCacheSizeMiB = 8 + defaultPreallocateCaches = true + defaultTestPreallocateCaches = false +) + +// Config is the full config required to run consensus +type Config struct { + dagconfig.Params + // IsArchival tells the consensus if it should not prune old blocks + IsArchival bool + // EnableSanityCheckPruningUTXOSet checks the full pruning point utxo set against the commitment at every pruning movement + EnableSanityCheckPruningUTXOSet bool + + SkipAddingGenesis bool +} + +// Factory instantiates new Consensuses +type Factory interface { + NewConsensus(config *Config, db infrastructuredatabase.Database, dbPrefix *prefix.Prefix, + consensusEventsChan chan externalapi.ConsensusEvent) ( + externalapi.Consensus, bool, error) + NewTestConsensus(config *Config, testName string) ( + tc testapi.TestConsensus, teardown func(keepDataDir bool), err error) + + SetTestDataDir(dataDir string) + SetTestGHOSTDAGManager(ghostdagConstructor GHOSTDAGManagerConstructor) + SetTestLevelDBCacheSize(cacheSizeMiB int) + SetTestPreAllocateCache(preallocateCaches bool) + SetTestPastMedianTimeManager(medianTimeConstructor PastMedianTimeManagerConstructor) + SetTestDifficultyManager(difficultyConstructor DifficultyManagerConstructor) +} + +type factory struct { + dataDir string + ghostdagConstructor GHOSTDAGManagerConstructor + pastMedianTimeConsructor PastMedianTimeManagerConstructor + difficultyConstructor DifficultyManagerConstructor + cacheSizeMiB *int + preallocateCaches *bool +} + +// NewFactory creates a new Consensus factory +func NewFactory() Factory { + return &factory{ + ghostdagConstructor: ghostdagmanager.New, + pastMedianTimeConsructor: pastmediantimemanager.New, + difficultyConstructor: difficultymanager.New, + } +} + +// NewConsensus instantiates a new Consensus +func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Database, dbPrefix *prefix.Prefix, + consensusEventsChan chan externalapi.ConsensusEvent) ( + consensusInstance externalapi.Consensus, shouldMigrate bool, err error) { + + dbManager := consensusdatabase.New(db) + prefixBucket := consensusdatabase.MakeBucket(dbPrefix.Serialize()) + + pruningWindowSizeForCaches := int(config.PruningDepth()) + + var preallocateCaches bool + if f.preallocateCaches != nil { + preallocateCaches = *f.preallocateCaches + } else { + preallocateCaches = defaultPreallocateCaches + } + + // This is used for caches that are used as part of deletePastBlocks that need to traverse until + // the previous pruning point. + pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth()) + + // Data Structures + mergeDepthRootStore := mergedepthrootstore.New(prefixBucket, 200, preallocateCaches) + daaWindowStore := daawindowstore.New(prefixBucket, 10_000, preallocateCaches) + acceptanceDataStore := acceptancedatastore.New(prefixBucket, 200, preallocateCaches) + blockStore, err := blockstore.New(dbManager, prefixBucket, 200, preallocateCaches) + if err != nil { + return nil, false, err + } + blockHeaderStore, err := blockheaderstore.New(dbManager, prefixBucket, 10_000, preallocateCaches) + if err != nil { + return nil, false, err + } + + blockStatusStore := blockstatusstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) + multisetStore := multisetstore.New(prefixBucket, 200, preallocateCaches) + pruningStore := pruningstore.New(prefixBucket, 2, preallocateCaches) + utxoDiffStore := utxodiffstore.New(prefixBucket, 200, preallocateCaches) + consensusStateStore := consensusstatestore.New(prefixBucket, 10_000, preallocateCaches) + + headersSelectedTipStore := headersselectedtipstore.New(prefixBucket) + finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches) + headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches) + daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches) + windowHeapSliceStore := blockwindowheapslicestore.New(2000, preallocateCaches) + + newReachabilityDataStore := reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache*2, preallocateCaches) + blockRelationStores, reachabilityDataStores, ghostdagDataStores := dagStores(config, prefixBucket, pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches, preallocateCaches) + oldReachabilityManager := reachabilitymanager.New( + dbManager, + ghostdagDataStores[0], + reachabilityDataStores[0]) + isOldReachabilityInitialized, err := reachabilityDataStores[0].HasReachabilityData(dbManager, model.NewStagingArea(), model.VirtualGenesisBlockHash) + if err != nil { + return nil, false, err + } + + newReachabilityManager := reachabilitymanager.New( + dbManager, + ghostdagDataStores[0], + newReachabilityDataStore) + reachabilityManager := newReachabilityManager + if isOldReachabilityInitialized { + reachabilityManager = oldReachabilityManager + } else { + for i := range reachabilityDataStores { + reachabilityDataStores[i] = newReachabilityDataStore + } + } + reachabilityDataStore := reachabilityDataStores[0] + + dagTopologyManagers, ghostdagManagers, dagTraversalManagers := f.dagProcesses(config, dbManager, blockHeaderStore, daaWindowStore, windowHeapSliceStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores, isOldReachabilityInitialized) + + blockRelationStore := blockRelationStores[0] + + ghostdagDataStore := ghostdagDataStores[0] + + dagTopologyManager := dagTopologyManagers[0] + ghostdagManager := ghostdagManagers[0] + dagTraversalManager := dagTraversalManagers[0] + + // Processes + parentsManager := parentssanager.New(config.GenesisHash, config.MaxBlockLevel) + blockParentBuilder := blockparentbuilder.New( + dbManager, + blockHeaderStore, + dagTopologyManager, + parentsManager, + reachabilityDataStore, + pruningStore, + + config.GenesisHash, + config.MaxBlockLevel, + ) + + txMassCalculator := txmass.NewCalculator(config.MassPerTxByte, config.MassPerScriptPubKeyByte, config.MassPerSigOp) + + pastMedianTimeManager := f.pastMedianTimeConsructor( + config.TimestampDeviationTolerance, + dbManager, + dagTraversalManager, + blockHeaderStore, + ghostdagDataStore, + config.GenesisHash) + transactionValidator := transactionvalidator.New(config.BlockCoinbaseMaturity, + config.EnableNonNativeSubnetworks, + config.MaxCoinbasePayloadLength, + config.K, + config.CoinbasePayloadScriptPublicKeyMaxLength, + dbManager, + pastMedianTimeManager, + ghostdagDataStore, + daaBlocksStore, + txMassCalculator) + difficultyManager := f.difficultyConstructor( + dbManager, + ghostdagManager, + ghostdagDataStore, + blockHeaderStore, + daaBlocksStore, + dagTopologyManager, + dagTraversalManager, + config.PowMax, + config.DifficultyAdjustmentWindowSize, + config.DisableDifficultyAdjustment, + config.TargetTimePerBlock, + config.GenesisHash, + config.GenesisBlock.Header.Bits()) + coinbaseManager := coinbasemanager.New( + dbManager, + + config.SubsidyGenesisReward, + config.PreDeflationaryPhaseBaseSubsidy, + config.CoinbasePayloadScriptPublicKeyMaxLength, + config.GenesisHash, + config.DeflationaryPhaseDaaScore, + config.DeflationaryPhaseBaseSubsidy, + + dagTraversalManager, + ghostdagDataStore, + acceptanceDataStore, + daaBlocksStore, + blockStore, + pruningStore, + blockHeaderStore) + headerTipsManager := headersselectedtipmanager.New(dbManager, dagTopologyManager, dagTraversalManager, + ghostdagManager, headersSelectedTipStore, headersSelectedChainStore) + genesisHash := config.GenesisHash + finalityManager := finalitymanager.New( + dbManager, + dagTopologyManager, + finalityStore, + ghostdagDataStore, + pruningStore, + genesisHash, + config.FinalityDepth()) + mergeDepthManager := mergedepthmanager.New( + dbManager, + dagTopologyManager, + dagTraversalManager, + finalityManager, + genesisHash, + config.MergeDepth, + ghostdagDataStore, + mergeDepthRootStore, + daaBlocksStore, + pruningStore, + finalityStore) + consensusStateManager, err := consensusstatemanager.New( + dbManager, + config.MaxBlockParents, + config.MergeSetSizeLimit, + genesisHash, + + ghostdagManager, + dagTopologyManager, + dagTraversalManager, + pastMedianTimeManager, + transactionValidator, + coinbaseManager, + mergeDepthManager, + finalityManager, + difficultyManager, + + blockStatusStore, + ghostdagDataStore, + consensusStateStore, + multisetStore, + blockStore, + utxoDiffStore, + blockRelationStore, + acceptanceDataStore, + blockHeaderStore, + headersSelectedTipStore, + pruningStore, + daaBlocksStore) + if err != nil { + return nil, false, err + } + + pruningManager := pruningmanager.New( + dbManager, + dagTraversalManager, + dagTopologyManager, + consensusStateManager, + finalityManager, + + consensusStateStore, + ghostdagDataStore, + pruningStore, + blockStatusStore, + headersSelectedTipStore, + multisetStore, + acceptanceDataStore, + blockStore, + blockHeaderStore, + utxoDiffStore, + daaBlocksStore, + reachabilityDataStore, + daaWindowStore, + + config.IsArchival, + genesisHash, + config.FinalityDepth(), + config.PruningDepth(), + config.EnableSanityCheckPruningUTXOSet, + config.K, + config.DifficultyAdjustmentWindowSize, + ) + + blockValidator := blockvalidator.New( + config.PowMax, + config.SkipProofOfWork, + genesisHash, + config.EnableNonNativeSubnetworks, + config.MaxBlockMass, + config.MergeSetSizeLimit, + config.MaxBlockParents, + config.TimestampDeviationTolerance, + config.TargetTimePerBlock, + config.MaxBlockLevel, + + dbManager, + difficultyManager, + pastMedianTimeManager, + transactionValidator, + ghostdagManagers, + dagTopologyManagers, + dagTraversalManager, + coinbaseManager, + mergeDepthManager, + reachabilityManager, + finalityManager, + blockParentBuilder, + pruningManager, + parentsManager, + + pruningStore, + blockStore, + ghostdagDataStores, + blockHeaderStore, + blockStatusStore, + reachabilityDataStore, + consensusStateStore, + daaBlocksStore, + + txMassCalculator, + ) + + syncManager := syncmanager.New( + dbManager, + genesisHash, + config.MergeSetSizeLimit, + dagTraversalManager, + dagTopologyManager, + ghostdagManager, + pruningManager, + + ghostdagDataStore, + blockStatusStore, + blockHeaderStore, + blockStore, + pruningStore, + headersSelectedChainStore) + + blockBuilder := blockbuilder.New( + dbManager, + genesisHash, + + difficultyManager, + pastMedianTimeManager, + coinbaseManager, + consensusStateManager, + ghostdagManager, + transactionValidator, + finalityManager, + blockParentBuilder, + pruningManager, + + acceptanceDataStore, + blockRelationStore, + multisetStore, + ghostdagDataStore, + daaBlocksStore, + ) + + blockProcessor := blockprocessor.New( + genesisHash, + config.TargetTimePerBlock, + config.MaxBlockLevel, + dbManager, + consensusStateManager, + pruningManager, + blockValidator, + dagTopologyManager, + reachabilityManager, + difficultyManager, + pastMedianTimeManager, + coinbaseManager, + headerTipsManager, + syncManager, + + acceptanceDataStore, + blockStore, + blockStatusStore, + blockRelationStore, + multisetStore, + ghostdagDataStore, + consensusStateStore, + pruningStore, + reachabilityDataStore, + utxoDiffStore, + blockHeaderStore, + headersSelectedTipStore, + finalityStore, + headersSelectedChainStore, + daaBlocksStore, + daaWindowStore) + + pruningProofManager := pruningproofmanager.New( + dbManager, + dagTopologyManagers, + ghostdagManagers, + reachabilityManager, + dagTraversalManagers, + parentsManager, + pruningManager, + + ghostdagDataStores, + pruningStore, + blockHeaderStore, + blockStatusStore, + finalityStore, + consensusStateStore, + blockRelationStore, + reachabilityDataStore, + + genesisHash, + config.K, + config.PruningProofM, + config.MaxBlockLevel, + ) + + c := &consensus{ + lock: &sync.Mutex{}, + databaseContext: dbManager, + + genesisBlock: config.GenesisBlock, + genesisHash: config.GenesisHash, + + expectedDAAWindowDurationInMilliseconds: config.TargetTimePerBlock.Milliseconds() * + int64(config.DifficultyAdjustmentWindowSize), + + blockProcessor: blockProcessor, + blockBuilder: blockBuilder, + consensusStateManager: consensusStateManager, + transactionValidator: transactionValidator, + syncManager: syncManager, + pastMedianTimeManager: pastMedianTimeManager, + blockValidator: blockValidator, + coinbaseManager: coinbaseManager, + dagTopologyManagers: dagTopologyManagers, + dagTraversalManager: dagTraversalManager, + difficultyManager: difficultyManager, + ghostdagManagers: ghostdagManagers, + headerTipsManager: headerTipsManager, + mergeDepthManager: mergeDepthManager, + pruningManager: pruningManager, + reachabilityManager: reachabilityManager, + finalityManager: finalityManager, + pruningProofManager: pruningProofManager, + + acceptanceDataStore: acceptanceDataStore, + blockStore: blockStore, + blockHeaderStore: blockHeaderStore, + pruningStore: pruningStore, + ghostdagDataStores: ghostdagDataStores, + blockStatusStore: blockStatusStore, + blockRelationStores: blockRelationStores, + consensusStateStore: consensusStateStore, + headersSelectedTipStore: headersSelectedTipStore, + multisetStore: multisetStore, + reachabilityDataStore: reachabilityDataStore, + utxoDiffStore: utxoDiffStore, + finalityStore: finalityStore, + headersSelectedChainStore: headersSelectedChainStore, + daaBlocksStore: daaBlocksStore, + blocksWithTrustedDataDAAWindowStore: daaWindowStore, + + consensusEventsChan: consensusEventsChan, + virtualNotUpdated: true, + } + + if isOldReachabilityInitialized { + return c, true, nil + } + + err = c.Init(config.SkipAddingGenesis) + if err != nil { + return nil, false, err + } + + err = consensusStateManager.RecoverUTXOIfRequired() + if err != nil { + return nil, false, err + } + err = pruningManager.ClearImportedPruningPointData() + if err != nil { + return nil, false, err + } + err = pruningManager.UpdatePruningPointIfRequired() + if err != nil { + return nil, false, err + } + + // If the virtual moved before shutdown but the pruning point hasn't, we + // move it if needed. + stagingArea := model.NewStagingArea() + err = pruningManager.UpdatePruningPointByVirtual(stagingArea) + if err != nil { + return nil, false, err + } + + err = staging.CommitAllChanges(dbManager, stagingArea) + if err != nil { + return nil, false, err + } + + err = pruningManager.UpdatePruningPointIfRequired() + if err != nil { + return nil, false, err + } + + return c, false, nil +} + +func (f *factory) NewTestConsensus(config *Config, testName string) ( + tc testapi.TestConsensus, teardown func(keepDataDir bool), err error) { + datadir := f.dataDir + if datadir == "" { + datadir, err = ioutil.TempDir("", testName) + if err != nil { + return nil, nil, err + } + } + var cacheSizeMiB int + if f.cacheSizeMiB != nil { + cacheSizeMiB = *f.cacheSizeMiB + } else { + cacheSizeMiB = defaultTestLeveldbCacheSizeMiB + } + if f.preallocateCaches == nil { + f.SetTestPreAllocateCache(defaultTestPreallocateCaches) + } + db, err := ldb.NewLevelDB(datadir, cacheSizeMiB) + if err != nil { + return nil, nil, err + } + + testConsensusDBPrefix := &prefix.Prefix{} + consensusAsInterface, shouldMigrate, err := f.NewConsensus(config, db, testConsensusDBPrefix, nil) + if err != nil { + return nil, nil, err + } + + if shouldMigrate { + return nil, nil, errors.Errorf("A fresh consensus should never return shouldMigrate=true") + } + + consensusAsImplementation := consensusAsInterface.(*consensus) + testConsensusStateManager := consensusstatemanager.NewTestConsensusStateManager(consensusAsImplementation.consensusStateManager) + testTransactionValidator := transactionvalidator.NewTestTransactionValidator(consensusAsImplementation.transactionValidator) + + tstConsensus := &testConsensus{ + dagParams: &config.Params, + consensus: consensusAsImplementation, + database: db, + testConsensusStateManager: testConsensusStateManager, + testReachabilityManager: reachabilitymanager.NewTestReachabilityManager(consensusAsImplementation. + reachabilityManager), + testTransactionValidator: testTransactionValidator, + } + tstConsensus.testBlockBuilder = blockbuilder.NewTestBlockBuilder(consensusAsImplementation.blockBuilder, tstConsensus) + teardown = func(keepDataDir bool) { + db.Close() + if !keepDataDir { + err := os.RemoveAll(f.dataDir) + if err != nil { + log.Errorf("Error removing data directory for test consensus: %s", err) + } + } + } + return tstConsensus, teardown, nil +} + +func (f *factory) SetTestDataDir(dataDir string) { + f.dataDir = dataDir +} + +func (f *factory) SetTestGHOSTDAGManager(ghostdagConstructor GHOSTDAGManagerConstructor) { + f.ghostdagConstructor = ghostdagConstructor +} + +func (f *factory) SetTestPastMedianTimeManager(medianTimeConstructor PastMedianTimeManagerConstructor) { + f.pastMedianTimeConsructor = medianTimeConstructor +} + +// SetTestDifficultyManager is a setter for the difficultyManager field on the factory. +func (f *factory) SetTestDifficultyManager(difficultyConstructor DifficultyManagerConstructor) { + f.difficultyConstructor = difficultyConstructor +} + +func (f *factory) SetTestLevelDBCacheSize(cacheSizeMiB int) { + f.cacheSizeMiB = &cacheSizeMiB +} +func (f *factory) SetTestPreAllocateCache(preallocateCaches bool) { + f.preallocateCaches = &preallocateCaches +} + +func dagStores(config *Config, + prefixBucket model.DBBucket, + pruningWindowSizePlusFinalityDepthForCache, pruningWindowSizeForCaches int, + preallocateCaches bool) ([]model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore) { + + blockRelationStores := make([]model.BlockRelationStore, config.MaxBlockLevel+1) + reachabilityDataStores := make([]model.ReachabilityDataStore, config.MaxBlockLevel+1) + ghostdagDataStores := make([]model.GHOSTDAGDataStore, config.MaxBlockLevel+1) + + ghostdagDataCacheSize := pruningWindowSizeForCaches * 2 + if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize { + ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize + } + + for i := 0; i <= config.MaxBlockLevel; i++ { + prefixBucket := prefixBucket.Bucket([]byte{byte(i)}) + if i == 0 { + blockRelationStores[i] = blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) + reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache*2, preallocateCaches) + ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches) + } else { + blockRelationStores[i] = blockrelationstore.New(prefixBucket, 200, false) + reachabilityDataStores[i] = reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, false) + ghostdagDataStores[i] = ghostdagdatastore.New(prefixBucket, 200, false) + } + } + + return blockRelationStores, reachabilityDataStores, ghostdagDataStores +} + +func (f *factory) dagProcesses(config *Config, + dbManager model.DBManager, + blockHeaderStore model.BlockHeaderStore, + daaWindowStore model.BlocksWithTrustedDataDAAWindowStore, + windowHeapSliceStore model.WindowHeapSliceStore, + blockRelationStores []model.BlockRelationStore, + reachabilityDataStores []model.ReachabilityDataStore, + ghostdagDataStores []model.GHOSTDAGDataStore, + isOldReachabilityInitialized bool) ( + []model.DAGTopologyManager, + []model.GHOSTDAGManager, + []model.DAGTraversalManager, +) { + + reachabilityManagers := make([]model.ReachabilityManager, config.MaxBlockLevel+1) + dagTopologyManagers := make([]model.DAGTopologyManager, config.MaxBlockLevel+1) + ghostdagManagers := make([]model.GHOSTDAGManager, config.MaxBlockLevel+1) + dagTraversalManagers := make([]model.DAGTraversalManager, config.MaxBlockLevel+1) + + newReachabilityManager := reachabilitymanager.New( + dbManager, + ghostdagDataStores[0], + reachabilityDataStores[0]) + + for i := 0; i <= config.MaxBlockLevel; i++ { + if isOldReachabilityInitialized { + reachabilityManagers[i] = reachabilitymanager.New( + dbManager, + ghostdagDataStores[i], + reachabilityDataStores[i]) + } else { + reachabilityManagers[i] = newReachabilityManager + } + + dagTopologyManagers[i] = dagtopologymanager.New( + dbManager, + reachabilityManagers[i], + blockRelationStores[i], + ghostdagDataStores[i]) + + ghostdagManagers[i] = f.ghostdagConstructor( + dbManager, + dagTopologyManagers[i], + ghostdagDataStores[i], + blockHeaderStore, + config.K, + config.GenesisHash) + + dagTraversalManagers[i] = dagtraversalmanager.New( + dbManager, + dagTopologyManagers[i], + ghostdagDataStores[i], + reachabilityManagers[i], + ghostdagManagers[i], + daaWindowStore, + windowHeapSliceStore, + config.GenesisHash, + config.DifficultyAdjustmentWindowSize) + } + + return dagTopologyManagers, ghostdagManagers, dagTraversalManagers +} diff --git a/domain/consensus/factory_test.go b/domain/consensus/factory_test.go new file mode 100644 index 0000000..e81592c --- /dev/null +++ b/domain/consensus/factory_test.go @@ -0,0 +1,36 @@ +package consensus + +import ( + "io/ioutil" + "testing" + + "github.com/spectre-project/spectred/domain/prefixmanager/prefix" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" +) + +func TestNewConsensus(t *testing.T) { + f := NewFactory() + + config := &Config{Params: dagconfig.DevnetParams} + + tmpDir, err := ioutil.TempDir("", "TestNewConsensus") + if err != nil { + return + } + + db, err := ldb.NewLevelDB(tmpDir, 8) + if err != nil { + t.Fatalf("error in NewLevelDB: %s", err) + } + + _, shouldMigrate, err := f.NewConsensus(config, db, &prefix.Prefix{}, nil) + if err != nil { + t.Fatalf("error in NewConsensus: %+v", err) + } + + if shouldMigrate { + t.Fatalf("A fresh consensus should never return shouldMigrate=true") + } +} diff --git a/domain/consensus/finality_test.go b/domain/consensus/finality_test.go new file mode 100644 index 0000000..a6bcaeb --- /dev/null +++ b/domain/consensus/finality_test.go @@ -0,0 +1,616 @@ +package consensus_test + +import ( + "fmt" + "math" + "math/rand" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestFinality(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // Set finalityInterval to 20 blocks, so that test runs quickly + consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock + + factory := consensus.NewFactory() + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestFinality") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + buildAndInsertBlock := func(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) { + block, _, err := consensus.BuildBlockWithParents(parentHashes, nil, nil) + if err != nil { + return nil, err + } + + err = consensus.ValidateAndInsertBlock(block, true) + if err != nil { + return nil, err + } + return block, nil + } + + // Build a chain of `finalityInterval - 1` blocks + finalityInterval := consensusConfig.FinalityDepth() + var mainChainTip *externalapi.DomainBlock + mainChainTipHash := consensusConfig.GenesisHash + + for i := uint64(0); i < finalityInterval-1; i++ { + mainChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{mainChainTipHash}) + if err != nil { + t.Fatalf("TestFinality: Failed to process Block #%d: %+v", i, err) + } + mainChainTipHash = consensushashing.BlockHash(mainChainTip) + + blockInfo, err := consensus.GetBlockInfo(mainChainTipHash) + if err != nil { + t.Fatalf("TestFinality: Block #%d failed to get info: %+v", i, err) + } + if blockInfo.BlockStatus != externalapi.StatusUTXOValid { + t.Fatalf("Block #%d in main chain expected to have status '%s', but got '%s'", + i, externalapi.StatusUTXOValid, blockInfo.BlockStatus) + } + } + + // Mine another chain of `finality-Interval - 2` blocks + var sideChainTip *externalapi.DomainBlock + sideChainTipHash := consensusConfig.GenesisHash + for i := uint64(0); i < finalityInterval-2; i++ { + sideChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{sideChainTipHash}) + if err != nil { + t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %+v", i, err) + } + sideChainTipHash = consensushashing.BlockHash(sideChainTip) + + blockInfo, err := consensus.GetBlockInfo(sideChainTipHash) + if err != nil { + t.Fatalf("TestFinality: Block #%d failed to get info: %v", i, err) + } else if !blockInfo.Exists { + t.Fatalf("TestFinality: Failed getting block info, doesn't exists") + } + if blockInfo.BlockStatus != externalapi.StatusUTXOPendingVerification { + t.Fatalf("Block #%d in side chain expected to have status '%s', but got '%s'", + i, externalapi.StatusUTXOPendingVerification, blockInfo.BlockStatus) + } + } + + stagingArea := model.NewStagingArea() + + // Add two more blocks in the side-chain until it becomes the selected chain + for i := uint64(0); i < 2; i++ { + sideChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{sideChainTipHash}) + if err != nil { + t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err) + } + sideChainTipHash = consensushashing.BlockHash(sideChainTip) + } + + // Make sure that now the sideChainTip is valid and selectedTip + blockInfo, err := consensus.GetBlockInfo(sideChainTipHash) + if err != nil { + t.Fatalf("TestFinality: Failed to get block info: %v", err) + } else if !blockInfo.Exists { + t.Fatalf("TestFinality: Failed getting block info, doesn't exists") + } + if blockInfo.BlockStatus != externalapi.StatusUTXOValid { + t.Fatalf("TestFinality: Overtaking block in side-chain expected to have status '%s', but got '%s'", + externalapi.StatusUTXOValid, blockInfo.BlockStatus) + } + selectedTip, err := consensus.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("TestFinality: Failed getting virtual selectedParent: %v", err) + } + if !selectedTip.Equal(sideChainTipHash) { + t.Fatalf("Overtaking block in side-chain is not selectedTip") + } + + // Add two more blocks to main chain, to move finality point to first non-genesis block in mainChain + for i := uint64(0); i < 2; i++ { + mainChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{mainChainTipHash}) + if err != nil { + t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err) + } + mainChainTipHash = consensushashing.BlockHash(mainChainTip) + } + + virtualFinality, err := consensus.FinalityManager().VirtualFinalityPoint(stagingArea) + if err != nil { + t.Fatalf("TestFinality: Failed getting the virtual's finality point: %v", err) + } + + if virtualFinality.Equal(consensusConfig.GenesisHash) { + t.Fatalf("virtual's finalityPoint is still genesis after adding finalityInterval + 1 blocks to the main chain") + } + + // TODO: Make sure that a finality conflict notification is sent + // Add two more blocks to the side chain, so that it violates finality and gets status UTXOPendingVerification even + // though it is the block with the highest blue score. + for i := uint64(0); i < 2; i++ { + sideChainTip, err = buildAndInsertBlock([]*externalapi.DomainHash{sideChainTipHash}) + if err != nil { + t.Fatalf("TestFinality: Failed to process sidechain Block #%d: %v", i, err) + } + sideChainTipHash = consensushashing.BlockHash(sideChainTip) + } + + // Check that sideChainTip hash higher blue score than the selected parent + selectedTip, err = consensus.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("TestFinality: Failed getting virtual selectedParent: %v", err) + } + selectedTipGhostDagData, err := + consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), stagingArea, selectedTip, false) + if err != nil { + t.Fatalf("TestFinality: Failed getting the ghost dag data of the selected tip: %v", err) + } + + sideChainTipGhostDagData, err := + consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), stagingArea, sideChainTipHash, false) + if err != nil { + t.Fatalf("TestFinality: Failed getting the ghost dag data of the sidechain tip: %v", err) + } + + if selectedTipGhostDagData.BlueWork().Cmp(sideChainTipGhostDagData.BlueWork()) == 1 { + t.Fatalf("sideChainTip is not the bluest tip when it is expected to be") + } + + // Blocks violating finality should have a UTXOPendingVerification status + blockInfo, err = consensus.GetBlockInfo(sideChainTipHash) + if err != nil { + t.Fatalf("TestFinality: Failed to get block info: %v", err) + } else if !blockInfo.Exists { + t.Fatalf("TestFinality: Failed getting block info, doesn't exists") + } + if blockInfo.BlockStatus != externalapi.StatusUTXOPendingVerification { + t.Fatalf("TestFinality: Finality violating block expected to have status '%s', but got '%s'", + externalapi.StatusUTXOPendingVerification, blockInfo.BlockStatus) + } + }) +} + +func TestBoundedMergeDepth(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + rd := rand.New(rand.NewSource(0)) + // Set finalityInterval to 50 blocks, so that test runs quickly + consensusConfig.K = 5 + consensusConfig.MergeDepth = 7 + consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock + + if uint64(consensusConfig.K) >= consensusConfig.FinalityDepth() { + t.Fatal("K must be smaller than finality duration for this test to run") + } + + if uint64(consensusConfig.K) >= consensusConfig.MergeDepth { + t.Fatal("K must be smaller than merge depth for this test to run") + } + + checkViolatingMergeDepth := func(consensus testapi.TestConsensus, parents []*externalapi.DomainHash) (*externalapi.DomainBlock, bool) { + block, _, err := consensus.BuildBlockWithParents(parents, nil, nil) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: BuildBlockWithParents failed: %+v", err) + return nil, false // fo some reason go doesn't recognize that t.Fatalf never returns + } + + err = consensus.ValidateAndInsertBlock(block, true) + if err == nil { + return block, false + } else if errors.Is(err, ruleerrors.ErrViolatingBoundedMergeDepth) { + return block, true + } else { + t.Fatalf("TestBoundedMergeDepth: expected err: %v, found err: %v", ruleerrors.ErrViolatingBoundedMergeDepth, err) + return nil, false // fo some reason go doesn't recognize that t.Fatalf never returns + } + } + + processBlock := func(consensus testapi.TestConsensus, block *externalapi.DomainBlock, name string) { + err := consensus.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: %s got unexpected error from ProcessBlock: %+v", name, err) + + } + } + + buildAndInsertBlock := func(consensus testapi.TestConsensus, parentHashes []*externalapi.DomainHash) *externalapi.DomainBlock { + block, _, err := consensus.BuildBlockWithParents(parentHashes, nil, nil) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed building block: %+v", err) + } + err = consensus.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed Inserting block to consensus: %v", err) + } + return block + } + + getStatus := func(consensus testapi.TestConsensus, block *externalapi.DomainBlock) externalapi.BlockStatus { + blockInfo, err := consensus.GetBlockInfo(consensushashing.BlockHash(block)) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed to get block info: %v", err) + } else if !blockInfo.Exists { + t.Fatalf("TestBoundedMergeDepth: Failed to get block info, block doesn't exists") + } + return blockInfo.BlockStatus + } + + syncConsensuses := func(tcSyncer, tcSyncee testapi.TestConsensus) { + syncerVirtualSelectedParent, err := tcSyncer.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("GetVirtualSelectedParent: %+v", err) + } + + missingHeaderHashes, _, err := tcSyncer.GetHashesBetween(consensusConfig.GenesisHash, syncerVirtualSelectedParent, math.MaxUint64) + if err != nil { + t.Fatalf("GetHashesBetween: %+v", err) + } + + for i, blocksHash := range missingHeaderHashes { + blockInfo, err := tcSyncee.GetBlockInfo(blocksHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if blockInfo.Exists { + continue + } + + block, _, err := tcSyncer.GetBlock(blocksHash) + if err != nil { + t.Fatalf("GetBlockHeader: %+v", err) + } + + err = tcSyncee.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err) + } + } + + synceeVirtualSelectedParent, err := tcSyncee.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("Tips: %+v", err) + } + + if !syncerVirtualSelectedParent.Equal(synceeVirtualSelectedParent) { + t.Fatalf("Syncee's selected tip is %s while syncer's is %s", synceeVirtualSelectedParent, syncerVirtualSelectedParent) + } + } + + factory := consensus.NewFactory() + consensusReal, teardownFunc2, err := factory.NewTestConsensus(consensusConfig, "TestBoundedMergeTestReal") + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err) + } + defer teardownFunc2(false) + + test := func(depth uint64, root *externalapi.DomainHash, checkVirtual, isRealDepth bool) { + consensusBuild, teardownFunc1, err := factory.NewTestConsensus(consensusConfig, "TestBoundedMergeTestBuild") + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Error setting up consensus: %+v", err) + } + defer teardownFunc1(false) + consensusBuild.BlockBuilder().SetNonceCounter(rd.Uint64()) + + syncConsensuses(consensusReal, consensusBuild) + // Create a block on top on genesis + block1 := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{root}) + + // Create a chain + selectedChain := make([]*externalapi.DomainBlock, 0, depth+1) + parent := consensushashing.BlockHash(block1) + // Make sure this is always bigger than `blocksChain2` so it will stay the selected chain + for i := uint64(0); i < depth+2; i++ { + block := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{parent}) + selectedChain = append(selectedChain, block) + parent = consensushashing.BlockHash(block) + } + + // Create another chain + blocksChain2 := make([]*externalapi.DomainBlock, 0, depth+1) + parent = consensushashing.BlockHash(block1) + for i := uint64(0); i < depth+1; i++ { + block := buildAndInsertBlock(consensusBuild, []*externalapi.DomainHash{parent}) + blocksChain2 = append(blocksChain2, block) + parent = consensushashing.BlockHash(block) + } + + // Now test against the real DAG + // submit block1 + processBlock(consensusReal, block1, "block1") + + // submit chain1 + for i, block := range selectedChain { + processBlock(consensusReal, block, fmt.Sprintf("selectedChain block No %d", i)) + } + + // submit chain2 + for i, block := range blocksChain2 { + processBlock(consensusReal, block, fmt.Sprintf("blocksChain2 block No %d", i)) + } + + // submit a block pointing at tip(chain1) and on first block in chain2 directly + mergeDepthViolatingBlockBottom, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[0]), consensushashing.BlockHash(selectedChain[len(selectedChain)-1])}) + if isViolatingMergeDepth != isRealDepth { + t.Fatalf("TestBoundedMergeDepth: Expects isViolatingMergeDepth to be %t", isRealDepth) + } + + // submit a block pointing at tip(chain1) and tip(chain2) should also obviously violate merge depth (this points at first block in chain2 indirectly) + mergeDepthViolatingTop, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[len(blocksChain2)-1]), consensushashing.BlockHash(selectedChain[len(selectedChain)-1])}) + if isViolatingMergeDepth != isRealDepth { + t.Fatalf("TestBoundedMergeDepth: Expects isViolatingMergeDepth to be %t", isRealDepth) + } + + // the location of the parents in the slices need to be both `-X` so the `selectedChain` one will have higher blueScore (it's a chain longer by 1) + kosherizingBlock, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(blocksChain2[len(blocksChain2)-3]), consensushashing.BlockHash(selectedChain[len(selectedChain)-3])}) + kosherizingBlockHash := consensushashing.BlockHash(kosherizingBlock) + if isViolatingMergeDepth { + t.Fatalf("TestBoundedMergeDepth: Expected blueKosherizingBlock to not violate merge depth") + } + + if checkVirtual { + stagingArea := model.NewStagingArea() + virtualGhotDagData, err := consensusReal.GHOSTDAGDataStore().Get(consensusReal.DatabaseContext(), + stagingArea, model.VirtualBlockHash, false) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed getting the ghostdag data of the virtual: %v", err) + } + // Make sure it's actually blue + found := false + for _, blue := range virtualGhotDagData.MergeSetBlues() { + if blue.Equal(kosherizingBlockHash) { + found = true + break + } + } + if !found { + t.Fatalf("TestBoundedMergeDepth: Expected kosherizingBlock to be blue by the virtual") + } + } + + pointAtBlueKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{kosherizingBlockHash, consensushashing.BlockHash(selectedChain[len(selectedChain)-1])}) + if isViolatingMergeDepth { + t.Fatalf("TestBoundedMergeDepth: Expected selectedTip to not violate merge depth") + } + + if checkVirtual { + virtualSelectedParent, err := consensusReal.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err) + } + + if !virtualSelectedParent.Equal(consensushashing.BlockHash(pointAtBlueKosherizing)) { + t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensushashing.BlockHash(pointAtBlueKosherizing), virtualSelectedParent) + } + } + + // Now let's make the kosherizing block red and try to merge again + tip := consensushashing.BlockHash(selectedChain[len(selectedChain)-1]) + // we use k-1 because `kosherizingBlock` points at tip-2, so 2+k-1 = k+1 anticone. + for i := 0; i < int(consensusConfig.K)-1; i++ { + block := buildAndInsertBlock(consensusReal, []*externalapi.DomainHash{tip}) + tip = consensushashing.BlockHash(block) + } + + if checkVirtual { + virtualSelectedParent, err := consensusReal.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err) + } + + if !virtualSelectedParent.Equal(tip) { + t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", tip, virtualSelectedParent) + } + + virtualGhotDagData, err := consensusReal.GHOSTDAGDataStore().Get( + consensusReal.DatabaseContext(), model.NewStagingArea(), model.VirtualBlockHash, false) + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed getting the ghostdag data of the virtual: %v", err) + } + // Make sure it's actually blue + found := false + for _, blue := range virtualGhotDagData.MergeSetBlues() { + if blue.Equal(kosherizingBlockHash) { + found = true + break + } + } + if found { + t.Fatalf("expected kosherizingBlock to be red by the virtual") + } + } + + pointAtRedKosherizing, isViolatingMergeDepth := checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{kosherizingBlockHash, tip}) + if isViolatingMergeDepth != isRealDepth { + t.Fatalf("TestBoundedMergeDepth: Expects isViolatingMergeDepth to be %t", isRealDepth) + } + + // Now `pointAtBlueKosherizing` itself is actually still blue, so we can still point at that even though we can't point at kosherizing directly anymore + transitiveBlueKosherizing, isViolatingMergeDepth := + checkViolatingMergeDepth(consensusReal, []*externalapi.DomainHash{consensushashing.BlockHash(pointAtBlueKosherizing), tip}) + if isViolatingMergeDepth { + t.Fatalf("TestBoundedMergeDepth: Expected transitiveBlueKosherizing to not violate merge depth") + } + + if checkVirtual { + virtualSelectedParent, err := consensusReal.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("TestBoundedMergeDepth: Failed getting the virtual selected parent %v", err) + } + + if !virtualSelectedParent.Equal(consensushashing.BlockHash(transitiveBlueKosherizing)) { + t.Fatalf("TestBoundedMergeDepth: Expected %s to be the selectedTip but found %s instead", consensushashing.BlockHash(transitiveBlueKosherizing), virtualSelectedParent) + } + + // Lets validate the status of all the interesting blocks + if getStatus(consensusReal, pointAtBlueKosherizing) != externalapi.StatusUTXOValid { + t.Fatalf("TestBoundedMergeDepth: pointAtBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusUTXOValid, getStatus(consensusReal, pointAtBlueKosherizing)) + } + if getStatus(consensusReal, pointAtRedKosherizing) != externalapi.StatusInvalid { + t.Fatalf("TestBoundedMergeDepth: pointAtRedKosherizing expected status '%s' but got '%s'", externalapi.StatusInvalid, getStatus(consensusReal, pointAtRedKosherizing)) + } + if getStatus(consensusReal, transitiveBlueKosherizing) != externalapi.StatusUTXOValid { + t.Fatalf("TestBoundedMergeDepth: transitiveBlueKosherizing expected status '%s' but got '%s'", externalapi.StatusUTXOValid, getStatus(consensusReal, transitiveBlueKosherizing)) + } + if getStatus(consensusReal, mergeDepthViolatingBlockBottom) != externalapi.StatusInvalid { + t.Fatalf("TestBoundedMergeDepth: mergeDepthViolatingBlockBottom expected status '%s' but got '%s'", externalapi.StatusInvalid, getStatus(consensusReal, mergeDepthViolatingBlockBottom)) + } + if getStatus(consensusReal, mergeDepthViolatingTop) != externalapi.StatusInvalid { + t.Fatalf("TestBoundedMergeDepth: mergeDepthViolatingTop expected status '%s' but got '%s'", externalapi.StatusInvalid, getStatus(consensusReal, mergeDepthViolatingTop)) + } + if getStatus(consensusReal, kosherizingBlock) != externalapi.StatusUTXOPendingVerification { + t.Fatalf("kosherizingBlock expected status '%s' but got '%s'", externalapi.StatusUTXOPendingVerification, getStatus(consensusReal, kosherizingBlock)) + } + + for i, b := range blocksChain2 { + if getStatus(consensusReal, b) != externalapi.StatusUTXOPendingVerification { + t.Fatalf("blocksChain2[%d] expected status '%s' but got '%s'", i, externalapi.StatusUTXOPendingVerification, getStatus(consensusReal, b)) + } + } + for i, b := range selectedChain { + if getStatus(consensusReal, b) != externalapi.StatusUTXOValid { + t.Fatalf("selectedChain[%d] expected status '%s' but got '%s'", i, externalapi.StatusUTXOValid, getStatus(consensusReal, b)) + } + } + } + } + + test(consensusConfig.MergeDepth, consensusConfig.GenesisHash, true, true) + }) +} + +func TestFinalityResolveVirtual(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // Set finalityInterval to 20 blocks, so that test runs quickly + consensusConfig.FinalityDuration = 20 * consensusConfig.TargetTimePerBlock + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestFinalityResolveVirtual") + if err != nil { + panic(err) + } + defer teardown(false) + + tip := consensusConfig.GenesisHash + for { + tip, _, err = tc.AddBlock([]*externalapi.DomainHash{tip}, nil, nil) + if err != nil { + t.Fatal(err) + } + + virtualFinalityPoint, err := tc.FinalityManager().VirtualFinalityPoint(model.NewStagingArea()) + if err != nil { + t.Fatal(err) + } + + if !virtualFinalityPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + + tcAttacker, teardownAttacker, err := factory.NewTestConsensus(consensusConfig, "TestFinalityResolveVirtual_attacker") + if err != nil { + panic(err) + } + defer teardownAttacker(false) + + virtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + panic(err) + } + + stagingArea := model.NewStagingArea() + virtualSelectedParentGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, virtualSelectedParent, false) + if err != nil { + panic(err) + } + + t.Logf("Selected tip blue score %d", virtualSelectedParentGHOSTDAGData.BlueScore()) + + sideChain := make([]*externalapi.DomainBlock, 0) + + for i := uint64(0); ; i++ { + tips, err := tcAttacker.Tips() + if err != nil { + panic(err) + } + + block, _, err := tcAttacker.BuildBlockWithParents(tips, nil, nil) + if err != nil { + panic(err) + } + + // We change the nonce of the first block so its hash won't be similar to any of the + // honest DAG blocks. As a result the rest of the side chain should have unique hashes + // as well. + if i == 0 { + mutableHeader := block.Header.ToMutable() + mutableHeader.SetNonce(uint64(rand.NewSource(84147).Int63())) + block.Header = mutableHeader.ToImmutable() + } + + err = tcAttacker.ValidateAndInsertBlock(block, true) + if err != nil { + panic(err) + } + + sideChain = append(sideChain, block) + + blockHash := consensushashing.BlockHash(block) + ghostdagData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), stagingArea, blockHash, false) + if err != nil { + panic(err) + } + + if virtualSelectedParentGHOSTDAGData.BlueWork().Cmp(ghostdagData.BlueWork()) == -1 { + break + } + } + + sideChainTipHash := consensushashing.BlockHash(sideChain[len(sideChain)-1]) + sideChainTipGHOSTDAGData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), stagingArea, sideChainTipHash, false) + if err != nil { + panic(err) + } + + t.Logf("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore()) + + for _, block := range sideChain { + err := tc.ValidateAndInsertBlock(block, false) + if err != nil { + panic(err) + } + } + + err = tc.ResolveVirtual(nil) + if err != nil { + panic(err) + } + + t.Log("Resolved virtual") + + sideChainTipGHOSTDAGData, err = tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, sideChainTipHash, false) + if err != nil { + panic(err) + } + + t.Logf("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore()) + + newVirtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + panic(err) + } + + if !newVirtualSelectedParent.Equal(virtualSelectedParent) { + t.Fatalf("A finality reorg has happened") + } + }) +} diff --git a/domain/consensus/log.go b/domain/consensus/log.go new file mode 100644 index 0000000..887353c --- /dev/null +++ b/domain/consensus/log.go @@ -0,0 +1,9 @@ +package consensus + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("BDAG") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/domain/consensus/model/acceptancedata_equal_clone_test.go b/domain/consensus/model/acceptancedata_equal_clone_test.go new file mode 100644 index 0000000..be8ceb0 --- /dev/null +++ b/domain/consensus/model/acceptancedata_equal_clone_test.go @@ -0,0 +1,826 @@ +package model_test + +import ( + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +func initTestTransactionAcceptanceDataForClone() []*externalapi.TransactionAcceptanceData { + tests := []*externalapi.TransactionAcceptanceData{ + { + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }, + } + return tests +} + +type testTransactionAcceptanceDataToCompare struct { + transactionAcceptanceData *externalapi.TransactionAcceptanceData + expectedResult bool +} + +type testTransactionAcceptanceDataStruct struct { + baseTransactionAcceptanceData *externalapi.TransactionAcceptanceData + transactionAcceptanceDataToCompareTo []testTransactionAcceptanceDataToCompare +} + +func initTransactionAcceptanceDataForEqual() []testTransactionAcceptanceDataStruct { + var testTransactionAcceptanceDataBase = externalapi.TransactionAcceptanceData{ + + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + + var testTransactionAcceptanceData1 = externalapi.TransactionAcceptanceData{ + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + // test 2: different transactions + var testTransactionAcceptanceData2 = externalapi.TransactionAcceptanceData{ + &externalapi.DomainTransaction{ + Version: 2, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + //test 3: different Fee + var testTransactionAcceptanceData3 = externalapi.TransactionAcceptanceData{ + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 2, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + //test 4: different isAccepted + var testTransactionAcceptanceData4 = externalapi.TransactionAcceptanceData{ + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + false, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + + //test 5: different TransactionInputUTXOEntries + var testTransactionAcceptanceData5 = externalapi.TransactionAcceptanceData{ + &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + false, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + } + + tests := []testTransactionAcceptanceDataStruct{ + { + baseTransactionAcceptanceData: &testTransactionAcceptanceDataBase, + transactionAcceptanceDataToCompareTo: []testTransactionAcceptanceDataToCompare{ + { + transactionAcceptanceData: &testTransactionAcceptanceData1, + expectedResult: true, + }, { + transactionAcceptanceData: &testTransactionAcceptanceData2, + expectedResult: false, + }, { + transactionAcceptanceData: &testTransactionAcceptanceData3, + expectedResult: false, + }, { + transactionAcceptanceData: &testTransactionAcceptanceData4, + expectedResult: false, + }, { + transactionAcceptanceData: &testTransactionAcceptanceData5, + expectedResult: false, + }, { + transactionAcceptanceData: nil, + expectedResult: false, + }, + }, + }, { + baseTransactionAcceptanceData: nil, + transactionAcceptanceDataToCompareTo: []testTransactionAcceptanceDataToCompare{ + { + transactionAcceptanceData: &testTransactionAcceptanceData1, + expectedResult: false, + }, { + transactionAcceptanceData: nil, + expectedResult: true, + }, + }, + }, + } + return tests +} + +func TestTransactionAcceptanceData_Equal(t *testing.T) { + acceptanceData := initTransactionAcceptanceDataForEqual() + for i, test := range acceptanceData { + for j, subTest := range test.transactionAcceptanceDataToCompareTo { + result1 := test.baseTransactionAcceptanceData.Equal(subTest.transactionAcceptanceData) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.transactionAcceptanceData.Equal(test.baseTransactionAcceptanceData) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestTransactionAcceptanceData_Clone(t *testing.T) { + + testTransactionAcceptanceData := initTestTransactionAcceptanceDataForClone() + for i, transactionAcceptanceData := range testTransactionAcceptanceData { + transactionAcceptanceDataClone := transactionAcceptanceData.Clone() + if !transactionAcceptanceDataClone.Equal(transactionAcceptanceData) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(transactionAcceptanceData, transactionAcceptanceDataClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func initTestBlockAcceptanceDataForClone() []*externalapi.BlockAcceptanceData { + + tests := []*externalapi.BlockAcceptanceData{{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{ + { + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{ + {externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), + 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}, + }, + } + return tests +} + +type testBlockAcceptanceDataToCompare struct { + blockAcceptanceData *externalapi.BlockAcceptanceData + expectedResult bool +} + +type testBlockAcceptanceDataStruct struct { + baseBlockAcceptanceData *externalapi.BlockAcceptanceData + blockAcceptanceDataToCompareTo []testBlockAcceptanceDataToCompare +} + +func iniBlockAcceptanceDataForEqual() []testBlockAcceptanceDataStruct { + var testBlockAcceptanceDataBase = externalapi.BlockAcceptanceData{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}} + //test 1: structs are equal + var testBlockAcceptanceData1 = externalapi.BlockAcceptanceData{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}} + // test 2: different size + var testBlockAcceptanceData2 = externalapi.BlockAcceptanceData{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }, {}}} + //test 3: different transactions, same size + var testBlockAcceptanceData3 = externalapi.BlockAcceptanceData{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + false, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}} + + // test 4 - different block hash + var testBlockAcceptanceData4 = externalapi.BlockAcceptanceData{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}} + + tests := []testBlockAcceptanceDataStruct{ + { + baseBlockAcceptanceData: &testBlockAcceptanceDataBase, + blockAcceptanceDataToCompareTo: []testBlockAcceptanceDataToCompare{ + { + blockAcceptanceData: &testBlockAcceptanceData1, + expectedResult: true, + }, { + blockAcceptanceData: &testBlockAcceptanceData2, + expectedResult: false, + }, { + blockAcceptanceData: &testBlockAcceptanceData3, + expectedResult: false, + }, { + blockAcceptanceData: nil, + expectedResult: false, + }, + { + blockAcceptanceData: &testBlockAcceptanceData4, + expectedResult: false, + }, + }, + }, { + baseBlockAcceptanceData: nil, + blockAcceptanceDataToCompareTo: []testBlockAcceptanceDataToCompare{ + { + blockAcceptanceData: &testBlockAcceptanceData1, + expectedResult: false, + }, { + blockAcceptanceData: nil, + expectedResult: true, + }, + }, + }, + } + return tests +} + +func TestBlockAcceptanceData_Equal(t *testing.T) { + + blockAcceptances := iniBlockAcceptanceDataForEqual() + for i, test := range blockAcceptances { + for j, subTest := range test.blockAcceptanceDataToCompareTo { + result1 := test.baseBlockAcceptanceData.Equal(subTest.blockAcceptanceData) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.blockAcceptanceData.Equal(test.baseBlockAcceptanceData) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestBlockAcceptanceData_Clone(t *testing.T) { + + testBlockAcceptanceData := initTestBlockAcceptanceDataForClone() + for i, blockAcceptanceData := range testBlockAcceptanceData { + blockAcceptanceDataClone := blockAcceptanceData.Clone() + if !blockAcceptanceDataClone.Equal(blockAcceptanceData) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(blockAcceptanceData, blockAcceptanceDataClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func initTestAcceptanceDataForClone() []externalapi.AcceptanceData { + + test1 := []*externalapi.BlockAcceptanceData{{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{ + { + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}, + }, + } + tests := []externalapi.AcceptanceData{test1, test1} + return tests +} + +type testAcceptanceDataToCompare struct { + acceptanceData externalapi.AcceptanceData + expectedResult bool +} + +type testAcceptanceDataStruct struct { + baseAcceptanceData externalapi.AcceptanceData + acceptanceDataToCompareTo []testAcceptanceDataToCompare +} + +func initAcceptanceDataForEqual() []testAcceptanceDataStruct { + var testAcceptanceDataBase = []*externalapi.BlockAcceptanceData{ + { + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}}} + //test 1: structs are equal + var testAcceptanceData1 = []*externalapi.BlockAcceptanceData{ + {externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}}} + // test 2: different size + var testAcceptanceData2 = []*externalapi.BlockAcceptanceData{ + {externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}}, {}} + //test 3: different transactions, same size + var testAcceptanceData3 = []*externalapi.BlockAcceptanceData{ + {externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + []*externalapi.TransactionAcceptanceData{{ + &externalapi.DomainTransaction{ + 2, + []*externalapi.DomainTransactionInput{{ + externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + 1, + true, + []externalapi.UTXOEntry{utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }}}} + + tests := []testAcceptanceDataStruct{ + { + baseAcceptanceData: testAcceptanceDataBase, + acceptanceDataToCompareTo: []testAcceptanceDataToCompare{ + { + acceptanceData: testAcceptanceData1, + expectedResult: true, + }, { + acceptanceData: testAcceptanceData2, + expectedResult: false, + }, { + acceptanceData: testAcceptanceData3, + expectedResult: false, + }, + }, + }, + } + return tests +} + +func TestAcceptanceData_Equal(t *testing.T) { + + acceptances := initAcceptanceDataForEqual() + for i, test := range acceptances { + for j, subTest := range test.acceptanceDataToCompareTo { + result1 := test.baseAcceptanceData.Equal(subTest.acceptanceData) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.acceptanceData.Equal(test.baseAcceptanceData) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestAcceptanceData_Clone(t *testing.T) { + + testAcceptanceData := initTestAcceptanceDataForClone() + for i, acceptanceData := range testAcceptanceData { + acceptanceDataClone := acceptanceData.Clone() + if !acceptanceDataClone.Equal(acceptanceData) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(acceptanceData, acceptanceDataClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/block_heap.go b/domain/consensus/model/block_heap.go new file mode 100644 index 0000000..780692e --- /dev/null +++ b/domain/consensus/model/block_heap.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockHeap represents a heap of block hashes, providing a priority-queue functionality +type BlockHeap interface { + Push(blockHash *externalapi.DomainHash) error + PushSlice(blockHash []*externalapi.DomainHash) error + Pop() *externalapi.DomainHash + Len() int + ToSlice() []*externalapi.DomainHash +} diff --git a/domain/consensus/model/blockiterator.go b/domain/consensus/model/blockiterator.go new file mode 100644 index 0000000..dde5244 --- /dev/null +++ b/domain/consensus/model/blockiterator.go @@ -0,0 +1,11 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockIterator is an iterator over blocks according to some order. +type BlockIterator interface { + First() bool + Next() bool + Get() (*externalapi.DomainHash, error) + Close() error +} diff --git a/domain/consensus/model/blockrelations.go b/domain/consensus/model/blockrelations.go new file mode 100644 index 0000000..1448469 --- /dev/null +++ b/domain/consensus/model/blockrelations.go @@ -0,0 +1,38 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockRelations represents a block's parent/child relations +type BlockRelations struct { + Parents []*externalapi.DomainHash + Children []*externalapi.DomainHash +} + +// Clone returns a clone of BlockRelations +func (br *BlockRelations) Clone() *BlockRelations { + return &BlockRelations{ + Parents: externalapi.CloneHashes(br.Parents), + Children: externalapi.CloneHashes(br.Children), + } +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &BlockRelations{[]*externalapi.DomainHash{}, []*externalapi.DomainHash{}} + +// Equal returns whether br equals to other +func (br *BlockRelations) Equal(other *BlockRelations) bool { + if br == nil || other == nil { + return br == other + } + + if !externalapi.HashesEqual(br.Parents, other.Parents) { + return false + } + + if !externalapi.HashesEqual(br.Children, other.Children) { + return false + } + + return true +} diff --git a/domain/consensus/model/blockrelations_equal_clone_test.go b/domain/consensus/model/blockrelations_equal_clone_test.go new file mode 100644 index 0000000..4994fdb --- /dev/null +++ b/domain/consensus/model/blockrelations_equal_clone_test.go @@ -0,0 +1,146 @@ +package model + +import ( + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func initTestBlockRelationsForClone() []*BlockRelations { + + tests := []*BlockRelations{ + { + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + }, + }, + } + return tests +} + +type testBlockRelationsToCompare struct { + blockRelations *BlockRelations + expectedResult bool +} + +type testBlockRelationsStruct struct { + baseBlockRelations *BlockRelations + blockRelationsToCompareTo []testBlockRelationsToCompare +} + +func initTestBlockRelationsForEqual() []testBlockRelationsStruct { + + var testBlockRelationsBase = BlockRelations{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + }, + } + //First test: structs are equal + var testBlockRelations1 = BlockRelations{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + }, + } + //Second test: children changed + var testBlockRelations2 = BlockRelations{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{5}), + }, + } + //Third test: parents changed + var testBlockRelations3 = BlockRelations{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{6}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + }, + } + + tests := []testBlockRelationsStruct{ + { + baseBlockRelations: &testBlockRelationsBase, + blockRelationsToCompareTo: []testBlockRelationsToCompare{ + { + blockRelations: &testBlockRelations1, + expectedResult: true, + }, { + blockRelations: &testBlockRelations2, + expectedResult: false, + }, { + blockRelations: &testBlockRelations3, + expectedResult: false, + }, { + blockRelations: nil, + expectedResult: false, + }, + }, + }, { + baseBlockRelations: nil, + blockRelationsToCompareTo: []testBlockRelationsToCompare{ + { + blockRelations: &testBlockRelations1, + expectedResult: false, + }, { + blockRelations: nil, + expectedResult: true, + }, + }, + }, + } + return tests +} + +func TestBlockRelationsData_Equal(t *testing.T) { + + blockRelationss := initTestBlockRelationsForEqual() + for i, test := range blockRelationss { + for j, subTest := range test.blockRelationsToCompareTo { + result1 := test.baseBlockRelations.Equal(subTest.blockRelations) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.blockRelations.Equal(test.baseBlockRelations) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestBlockRelations_Clone(t *testing.T) { + + testBlockRelations := initTestBlockRelationsForClone() + for i, blockRelations := range testBlockRelations { + blockRelationsClone := blockRelations.Clone() + if !blockRelationsClone.Equal(blockRelations) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(blockRelations, blockRelationsClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/database.go b/domain/consensus/model/database.go new file mode 100644 index 0000000..7911de9 --- /dev/null +++ b/domain/consensus/model/database.go @@ -0,0 +1,99 @@ +package model + +// DBCursor iterates over database entries given some bucket. +type DBCursor interface { + // Next moves the iterator to the next key/value pair. It returns whether the + // iterator is exhausted. Panics if the cursor is closed. + Next() bool + + // First moves the iterator to the first key/value pair. It returns false if + // such a pair does not exist. Panics if the cursor is closed. + First() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. It returns ErrNotFound if such pair does not + // exist. + Seek(key DBKey) error + + // Key returns the key of the current key/value pair, or ErrNotFound if done. + // The caller should not modify the contents of the returned key, and + // its contents may change on the next call to Next. + Key() (DBKey, error) + + // Value returns the value of the current key/value pair, or ErrNotFound if done. + // The caller should not modify the contents of the returned slice, and its + // contents may change on the next call to Next. + Value() ([]byte, error) + + // Close releases associated resources. + Close() error +} + +// DBReader defines a proxy over domain data access +type DBReader interface { + // Get gets the value for the given key. It returns + // ErrNotFound if the given key does not exist. + Get(key DBKey) ([]byte, error) + + // Has returns true if the database does contains the + // given key. + Has(key DBKey) (bool, error) + + // Cursor begins a new cursor over the given bucket. + Cursor(bucket DBBucket) (DBCursor, error) +} + +// DBWriter is an interface to write to the database +type DBWriter interface { + DBReader + + // Put sets the value for the given key. It overwrites + // any previous value for that key. + Put(key DBKey, value []byte) error + + // Delete deletes the value for the given key. Will not + // return an error if the key doesn't exist. + Delete(key DBKey) error +} + +// DBTransaction is a proxy over domain data +// access that requires an open database transaction +type DBTransaction interface { + DBWriter + + // Rollback rolls back whatever changes were made to the + // database within this transaction. + Rollback() error + + // Commit commits whatever changes were made to the database + // within this transaction. + Commit() error + + // RollbackUnlessClosed rolls back changes that were made to + // the database within the transaction, unless the transaction + // had already been closed using either Rollback or Commit. + RollbackUnlessClosed() error +} + +// DBManager defines the interface of a database that can begin +// transactions and read data. +type DBManager interface { + DBWriter + + // Begin begins a new database transaction. + Begin() (DBTransaction, error) +} + +// DBKey is an interface for a database key +type DBKey interface { + Bytes() []byte + Bucket() DBBucket + Suffix() []byte +} + +// DBBucket is an interface for a database bucket +type DBBucket interface { + Bucket(bucketBytes []byte) DBBucket + Key(suffix []byte) DBKey + Path() []byte +} diff --git a/domain/consensus/model/errors.go b/domain/consensus/model/errors.go new file mode 100644 index 0000000..1c48ab5 --- /dev/null +++ b/domain/consensus/model/errors.go @@ -0,0 +1,11 @@ +package model + +import "github.com/pkg/errors" + +// ErrBlockNotInSelectedParentChain is returned from CreateHeadersSelectedChainBlockLocator if one of the parameters +// passed to it are not in the headers selected parent chain +var ErrBlockNotInSelectedParentChain = errors.New("Block is not in selected parent chain") + +// ErrReachedMaxTraversalAllowed is returned from AnticoneFromBlocks if `maxTraversalAllowed` was specified +// and the traversal passed it +var ErrReachedMaxTraversalAllowed = errors.New("Traversal searching for anticone passed the maxTraversalAllowed limit") diff --git a/domain/consensus/model/externalapi/acceptancedata.go b/domain/consensus/model/externalapi/acceptancedata.go new file mode 100644 index 0000000..4b85741 --- /dev/null +++ b/domain/consensus/model/externalapi/acceptancedata.go @@ -0,0 +1,145 @@ +package externalapi + +// AcceptanceData stores data about which transactions were accepted by a block. +// It's ordered in the same way as the block merge set blues. +type AcceptanceData []*BlockAcceptanceData + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ AcceptanceData = []*BlockAcceptanceData{} + +// Equal returns whether ad equals to other +func (ad AcceptanceData) Equal(other AcceptanceData) bool { + if len(ad) != len(other) { + return false + } + + for i, blockAcceptanceData := range ad { + if !blockAcceptanceData.Equal(other[i]) { + return false + } + } + + return true +} + +// Clone clones the AcceptanceData +func (ad AcceptanceData) Clone() AcceptanceData { + clone := make(AcceptanceData, len(ad)) + for i, blockAcceptanceData := range ad { + clone[i] = blockAcceptanceData.Clone() + } + + return clone +} + +// BlockAcceptanceData stores all transactions in a block with an indication +// if they were accepted or not by some other block +type BlockAcceptanceData struct { + BlockHash *DomainHash + TransactionAcceptanceData []*TransactionAcceptanceData +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &BlockAcceptanceData{&DomainHash{}, []*TransactionAcceptanceData{}} + +// Equal returns whether bad equals to other +func (bad *BlockAcceptanceData) Equal(other *BlockAcceptanceData) bool { + if bad == nil || other == nil { + return bad == other + } + + if !bad.BlockHash.Equal(other.BlockHash) { + return false + } + + if len(bad.TransactionAcceptanceData) != len(other.TransactionAcceptanceData) { + return false + } + + for i, acceptanceData := range bad.TransactionAcceptanceData { + if !acceptanceData.Equal(other.TransactionAcceptanceData[i]) { + return false + } + } + + return true +} + +// Clone returns a clone of BlockAcceptanceData +func (bad *BlockAcceptanceData) Clone() *BlockAcceptanceData { + if bad == nil { + return nil + } + + clone := &BlockAcceptanceData{ + BlockHash: bad.BlockHash, + TransactionAcceptanceData: make([]*TransactionAcceptanceData, len(bad.TransactionAcceptanceData)), + } + for i, acceptanceData := range bad.TransactionAcceptanceData { + clone.TransactionAcceptanceData[i] = acceptanceData.Clone() + } + + return clone +} + +// TransactionAcceptanceData stores a transaction together with an indication +// if it was accepted or not by some block +type TransactionAcceptanceData struct { + Transaction *DomainTransaction + Fee uint64 + IsAccepted bool + TransactionInputUTXOEntries []UTXOEntry +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &TransactionAcceptanceData{&DomainTransaction{}, 0, false, []UTXOEntry{}} + +// Equal returns whether tad equals to other +func (tad *TransactionAcceptanceData) Equal(other *TransactionAcceptanceData) bool { + if tad == nil || other == nil { + return tad == other + } + + if !tad.Transaction.Equal(other.Transaction) { + return false + } + + if tad.Fee != other.Fee { + return false + } + + if tad.IsAccepted != other.IsAccepted { + return false + } + + if len(tad.TransactionInputUTXOEntries) != len(other.TransactionInputUTXOEntries) { + return false + } + + for i, thisUTXOEntry := range tad.TransactionInputUTXOEntries { + otherUTXOEntry := other.TransactionInputUTXOEntries[i] + if !thisUTXOEntry.Equal(otherUTXOEntry) { + return false + } + } + + return true +} + +// Clone returns a clone of TransactionAcceptanceData +func (tad *TransactionAcceptanceData) Clone() *TransactionAcceptanceData { + cloneTransactionInputUTXOEntries := make([]UTXOEntry, len(tad.TransactionInputUTXOEntries)) + for i, utxoEntry := range tad.TransactionInputUTXOEntries { + cloneTransactionInputUTXOEntries[i] = utxoEntry + } + + return &TransactionAcceptanceData{ + Transaction: tad.Transaction.Clone(), + Fee: tad.Fee, + IsAccepted: tad.IsAccepted, + TransactionInputUTXOEntries: cloneTransactionInputUTXOEntries, + } +} diff --git a/domain/consensus/model/externalapi/block.go b/domain/consensus/model/externalapi/block.go new file mode 100644 index 0000000..c7e084e --- /dev/null +++ b/domain/consensus/model/externalapi/block.go @@ -0,0 +1,84 @@ +package externalapi + +import "math/big" + +// DomainBlock represents a Spectre block +type DomainBlock struct { + Header BlockHeader + Transactions []*DomainTransaction +} + +// Clone returns a clone of DomainBlock +func (block *DomainBlock) Clone() *DomainBlock { + transactionClone := make([]*DomainTransaction, len(block.Transactions)) + for i, tx := range block.Transactions { + transactionClone[i] = tx.Clone() + } + + return &DomainBlock{ + Header: block.Header, + Transactions: transactionClone, + } +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = DomainBlock{nil, []*DomainTransaction{}} + +// Equal returns whether block equals to other +func (block *DomainBlock) Equal(other *DomainBlock) bool { + if block == nil || other == nil { + return block == other + } + + if len(block.Transactions) != len(other.Transactions) { + return false + } + + if !block.Header.Equal(other.Header) { + return false + } + + for i, tx := range block.Transactions { + if !tx.Equal(other.Transactions[i]) { + return false + } + } + + return true +} + +// BlockHeader represents an immutable block header. +type BlockHeader interface { + BaseBlockHeader + ToMutable() MutableBlockHeader +} + +// BaseBlockHeader represents the header part of a Spectre block +type BaseBlockHeader interface { + Version() uint16 + Parents() []BlockLevelParents + DirectParents() BlockLevelParents + HashMerkleRoot() *DomainHash + AcceptedIDMerkleRoot() *DomainHash + UTXOCommitment() *DomainHash + TimeInMilliseconds() int64 + Bits() uint32 + Nonce() uint64 + DAAScore() uint64 + BlueScore() uint64 + BlueWork() *big.Int + PruningPoint() *DomainHash + BlockLevel(maxBlockLevel int) int + Equal(other BaseBlockHeader) bool +} + +// MutableBlockHeader represents a block header that can be mutated, but only +// the fields that are relevant to mining (Nonce and TimeInMilliseconds). +type MutableBlockHeader interface { + BaseBlockHeader + ToImmutable() BlockHeader + SetNonce(nonce uint64) + SetTimeInMilliseconds(timeInMilliseconds int64) + SetHashMerkleRoot(hashMerkleRoot *DomainHash) +} diff --git a/domain/consensus/model/externalapi/block_equal_clone_test.go b/domain/consensus/model/externalapi/block_equal_clone_test.go new file mode 100644 index 0000000..7e3e407 --- /dev/null +++ b/domain/consensus/model/externalapi/block_equal_clone_test.go @@ -0,0 +1,499 @@ +package externalapi_test + +import ( + "math/big" + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" +) + +type blockToCompare struct { + block *externalapi.DomainBlock + expectedResult bool +} + +type TestBlockStruct struct { + baseBlock *externalapi.DomainBlock + blocksToCompareTo []blockToCompare +} + +func initTestBaseTransactions() []*externalapi.DomainTransaction { + + testTx := []*externalapi.DomainTransaction{{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }} + return testTx +} + +func initTestAnotherTransactions() []*externalapi.DomainTransaction { + + testTx := []*externalapi.DomainTransaction{{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x02}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + }} + return testTx +} + +func initTestTwoTransactions() []*externalapi.DomainTransaction { + + testTx := []*externalapi.DomainTransaction{{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + }, { + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + }} + return testTx +} + +func initTestBlockStructsForClone() []*externalapi.DomainBlock { + tests := []*externalapi.DomainBlock{ + { + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + 4, + 5, + 6, + 7, + 8, + big.NewInt(9), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), + ), + initTestBaseTransactions(), + }, { + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + 4, + 5, + 6, + 7, + 8, + big.NewInt(9), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), + ), + initTestBaseTransactions(), + }, + } + + return tests +} + +func initTestBlockStructsForEqual() *[]TestBlockStruct { + tests := []TestBlockStruct{ + { + baseBlock: nil, + blocksToCompareTo: []blockToCompare{ + { + block: nil, + expectedResult: true, + }, + { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + 4, + 5, + 6, + 7, + 8, + big.NewInt(9), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), + ), + initTestBaseTransactions()}, + expectedResult: false, + }, + }, + }, { + baseBlock: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + blocksToCompareTo: []blockToCompare{ + { + block: nil, + expectedResult: false, + }, + { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestAnotherTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: true, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100})}}, // Changed + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestTwoTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), // Changed + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), // Changed + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), // Changed + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 100, // Changed + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 100, // Changed + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 100, // Changed + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 100, // Changed + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 100, // Changed + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(100), // Changed + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, { + block: &externalapi.DomainBlock{ + blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), // Changed + ), + initTestBaseTransactions(), + }, + expectedResult: false, + }, + }, + }, + } + + return &tests +} + +func TestDomainBlock_Equal(t *testing.T) { + + blockTests := initTestBlockStructsForEqual() + for i, test := range *blockTests { + for j, subTest := range test.blocksToCompareTo { + result1 := test.baseBlock.Equal(subTest.block) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.block.Equal(test.baseBlock) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } + +} + +func TestDomainBlock_Clone(t *testing.T) { + + blocks := initTestBlockStructsForClone() + for i, block := range blocks { + blockClone := block.Clone() + if !blockClone.Equal(block) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(block, blockClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/block_with_trusted_data.go b/domain/consensus/model/externalapi/block_with_trusted_data.go new file mode 100644 index 0000000..48ab0fb --- /dev/null +++ b/domain/consensus/model/externalapi/block_with_trusted_data.go @@ -0,0 +1,23 @@ +package externalapi + +// BlockWithTrustedData is a block with pre-filled data +// that is not validated by the consensus. +// This is used when bring the pruning point and its +// anticone on a pruned-headers node. +type BlockWithTrustedData struct { + Block *DomainBlock + DAAWindow []*TrustedDataDataDAAHeader + GHOSTDAGData []*BlockGHOSTDAGDataHashPair +} + +// TrustedDataDataDAAHeader is a block that belongs to BlockWithTrustedData.DAAWindow +type TrustedDataDataDAAHeader struct { + Header BlockHeader + GHOSTDAGData *BlockGHOSTDAGData +} + +// BlockGHOSTDAGDataHashPair is a pair of a block hash and its ghostdag data +type BlockGHOSTDAGDataHashPair struct { + Hash *DomainHash + GHOSTDAGData *BlockGHOSTDAGData +} diff --git a/domain/consensus/model/externalapi/blockinfo.go b/domain/consensus/model/externalapi/blockinfo.go new file mode 100644 index 0000000..43c914f --- /dev/null +++ b/domain/consensus/model/externalapi/blockinfo.go @@ -0,0 +1,37 @@ +package externalapi + +import "math/big" + +// BlockInfo contains various information about a specific block +type BlockInfo struct { + Exists bool + BlockStatus BlockStatus + BlueScore uint64 + BlueWork *big.Int + SelectedParent *DomainHash + MergeSetBlues []*DomainHash + MergeSetReds []*DomainHash +} + +// HasHeader returns whether the block exists and has a valid header +func (bi *BlockInfo) HasHeader() bool { + return bi.Exists && bi.BlockStatus != StatusInvalid +} + +// HasBody returns whether the block exists and has a valid body +func (bi *BlockInfo) HasBody() bool { + return bi.Exists && bi.BlockStatus != StatusInvalid && bi.BlockStatus != StatusHeaderOnly +} + +// Clone returns a clone of BlockInfo +func (bi *BlockInfo) Clone() *BlockInfo { + return &BlockInfo{ + Exists: bi.Exists, + BlockStatus: bi.BlockStatus.Clone(), + BlueScore: bi.BlueScore, + BlueWork: new(big.Int).Set(bi.BlueWork), + SelectedParent: bi.SelectedParent, + MergeSetBlues: CloneHashes(bi.MergeSetBlues), + MergeSetReds: CloneHashes(bi.MergeSetReds), + } +} diff --git a/domain/consensus/model/externalapi/blockinfo_clone_test.go b/domain/consensus/model/externalapi/blockinfo_clone_test.go new file mode 100644 index 0000000..003d25e --- /dev/null +++ b/domain/consensus/model/externalapi/blockinfo_clone_test.go @@ -0,0 +1,108 @@ +package externalapi + +import ( + "math/big" + "reflect" + "testing" +) + +func initTestBlockInfoStructsForClone() []*BlockInfo { + + tests := []*BlockInfo{ + { + true, + BlockStatus(0x01), + 0, + big.NewInt(0), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + true, + BlockStatus(0x02), + 0, + big.NewInt(0), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + true, + 1, + 1, + big.NewInt(0), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + true, + 255, + 2, + big.NewInt(0), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + true, + 0, + 3, + big.NewInt(0), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + true, + BlockStatus(0x01), + 0, + big.NewInt(1), + nil, + []*DomainHash{}, + []*DomainHash{}, + }, { + false, + BlockStatus(0x01), + 0, + big.NewInt(1), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + []*DomainHash{ + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + }, + []*DomainHash{ + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}), + }, + }, + } + return tests +} + +func TestBlockInfo_Clone(t *testing.T) { + + blockInfos := initTestBlockInfoStructsForClone() + for i, blockInfo := range blockInfos { + blockInfoClone := blockInfo.Clone() + if !reflect.DeepEqual(blockInfo, blockInfoClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/blocklevelparents.go b/domain/consensus/model/externalapi/blocklevelparents.go new file mode 100644 index 0000000..80c52a5 --- /dev/null +++ b/domain/consensus/model/externalapi/blocklevelparents.go @@ -0,0 +1,63 @@ +package externalapi + +// BlockLevelParents represent the parents within a single super-block level +// See https://github.com/spectre-project/research/issues/3 for further details +type BlockLevelParents []*DomainHash + +// Equal returns true if this BlockLevelParents is equal to `other` +func (sl BlockLevelParents) Equal(other BlockLevelParents) bool { + if len(sl) != len(other) { + return false + } + for _, thisHash := range sl { + found := false + for _, otherHash := range other { + if thisHash.Equal(otherHash) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Clone creates a clone of this BlockLevelParents +func (sl BlockLevelParents) Clone() BlockLevelParents { + return CloneHashes(sl) +} + +// Contains returns true if this BlockLevelParents contains the given blockHash +func (sl BlockLevelParents) Contains(blockHash *DomainHash) bool { + for _, blockLevelParent := range sl { + if blockLevelParent.Equal(blockHash) { + return true + } + } + return false +} + +// ParentsEqual returns true if all the BlockLevelParents in `a` and `b` are +// equal pairwise +func ParentsEqual(a, b []BlockLevelParents) bool { + if len(a) != len(b) { + return false + } + for i, blockLevelParents := range a { + if !blockLevelParents.Equal(b[i]) { + return false + } + } + return true +} + +// CloneParents creates a clone of the given BlockLevelParents slice +func CloneParents(parents []BlockLevelParents) []BlockLevelParents { + clone := make([]BlockLevelParents, len(parents)) + for i, blockLevelParents := range parents { + clone[i] = blockLevelParents.Clone() + } + return clone +} diff --git a/domain/consensus/model/externalapi/blocklocator.go b/domain/consensus/model/externalapi/blocklocator.go new file mode 100644 index 0000000..b6dd1ca --- /dev/null +++ b/domain/consensus/model/externalapi/blocklocator.go @@ -0,0 +1,24 @@ +package externalapi + +// BlockLocator is used to help locate a specific block. The algorithm for +// building the block locator is to add block hashes in reverse order on the +// block's selected parent chain until the desired stop block is reached. +// In order to keep the list of locator hashes to a reasonable number of entries, +// the step between each entry is doubled each loop iteration to exponentially +// decrease the number of hashes as a function of the distance from the block +// being located. +// +// For example, assume a selected parent chain with IDs as depicted below, and the +// stop block is genesis: +// +// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// +// The block locator for block 17 would be the hashes of blocks: +// +// [17 16 14 11 7 2 genesis] +type BlockLocator []*DomainHash + +// Clone returns a clone of BlockLocator +func (locator BlockLocator) Clone() BlockLocator { + return CloneHashes(locator) +} diff --git a/domain/consensus/model/externalapi/blocklocator_clone_test.go b/domain/consensus/model/externalapi/blocklocator_clone_test.go new file mode 100644 index 0000000..28e9076 --- /dev/null +++ b/domain/consensus/model/externalapi/blocklocator_clone_test.go @@ -0,0 +1,76 @@ +package externalapi + +import ( + "reflect" + "testing" +) + +func initTestBlockLocatorForClone() []*BlockLocator { + + tests := []*BlockLocator{{ + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}), + }, { + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 2}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1, 1}), + NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 2, 1}), + }, + } + return tests +} + +func TestBlockLocator_Clone(t *testing.T) { + + testBlockLocator := initTestBlockLocatorForClone() + for i, blockLocator := range testBlockLocator { + blockLocatorClone := blockLocator.Clone() + if !reflect.DeepEqual(blockLocator, &blockLocatorClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/blockstatus.go b/domain/consensus/model/externalapi/blockstatus.go new file mode 100644 index 0000000..7358baf --- /dev/null +++ b/domain/consensus/model/externalapi/blockstatus.go @@ -0,0 +1,49 @@ +package externalapi + +// BlockStatus represents the validation state of the block. +type BlockStatus byte + +// Clone returns a clone of BlockStatus +func (bs BlockStatus) Clone() BlockStatus { + return bs +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ BlockStatus = 0 + +// Equal returns whether bs equals to other +func (bs BlockStatus) Equal(other BlockStatus) bool { + return bs == other +} + +const ( + // StatusInvalid indicates that the block is invalid. + StatusInvalid BlockStatus = iota + + // StatusUTXOValid indicates the block is valid from any UTXO related aspects and has passed all the other validations as well. + StatusUTXOValid + + // StatusUTXOPendingVerification indicates that the block is pending verification against its past UTXO-Set, either + // because it was not yet verified since the block was never in the selected parent chain, or if the + // block violates finality. + StatusUTXOPendingVerification + + // StatusDisqualifiedFromChain indicates that the block is not eligible to be a selected parent. + StatusDisqualifiedFromChain + + // StatusHeaderOnly indicates that the block transactions are not held (pruned or wasn't added yet) + StatusHeaderOnly +) + +var blockStatusStrings = map[BlockStatus]string{ + StatusInvalid: "Invalid", + StatusUTXOValid: "Valid", + StatusUTXOPendingVerification: "UTXOPendingVerification", + StatusDisqualifiedFromChain: "DisqualifiedFromChain", + StatusHeaderOnly: "HeaderOnly", +} + +func (bs BlockStatus) String() string { + return blockStatusStrings[bs] +} diff --git a/domain/consensus/model/externalapi/blockstatus_equal_clone_test.go b/domain/consensus/model/externalapi/blockstatus_equal_clone_test.go new file mode 100644 index 0000000..7737296 --- /dev/null +++ b/domain/consensus/model/externalapi/blockstatus_equal_clone_test.go @@ -0,0 +1,87 @@ +package externalapi + +import ( + "reflect" + "testing" +) + +func initTestBlockStatusForClone() []BlockStatus { + + tests := []BlockStatus{1, 2, 0xFF, 0} + + return tests +} + +type TestBlockStatusToCompare struct { + blockStatus BlockStatus + expectedResult bool +} + +type TestBlockStatusStruct struct { + baseBlockStatus BlockStatus + blockStatusesToCompareTo []TestBlockStatusToCompare +} + +func initTestBlockStatusForEqual() []TestBlockStatusStruct { + tests := []TestBlockStatusStruct{ + { + baseBlockStatus: 0, + blockStatusesToCompareTo: []TestBlockStatusToCompare{ + { + blockStatus: 1, + expectedResult: false, + }, + { + blockStatus: 0, + expectedResult: true, + }, + }, + }, { + baseBlockStatus: 255, + blockStatusesToCompareTo: []TestBlockStatusToCompare{ + { + blockStatus: 1, + expectedResult: false, + }, + { + blockStatus: 255, + expectedResult: true, + }, + }, + }, + } + return tests +} + +func TestBlockStatus_Equal(t *testing.T) { + + testBlockStatus := initTestBlockStatusForEqual() + + for i, test := range testBlockStatus { + for j, subTest := range test.blockStatusesToCompareTo { + result1 := test.baseBlockStatus.Equal(subTest.blockStatus) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + + result2 := subTest.blockStatus.Equal(test.baseBlockStatus) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestBlockStatus_Clone(t *testing.T) { + + testBlockStatus := initTestBlockStatusForClone() + for i, blockStatus := range testBlockStatus { + blockStatusClone := blockStatus.Clone() + if !blockStatusClone.Equal(blockStatus) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(blockStatus, blockStatusClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/blocktemplate.go b/domain/consensus/model/externalapi/blocktemplate.go new file mode 100644 index 0000000..5456ef2 --- /dev/null +++ b/domain/consensus/model/externalapi/blocktemplate.go @@ -0,0 +1,19 @@ +package externalapi + +// DomainBlockTemplate contains a Block plus metadata related to its generation +type DomainBlockTemplate struct { + Block *DomainBlock + CoinbaseData *DomainCoinbaseData + CoinbaseHasRedReward bool + IsNearlySynced bool +} + +// Clone returns a clone of DomainBlockTemplate +func (bt *DomainBlockTemplate) Clone() *DomainBlockTemplate { + return &DomainBlockTemplate{ + Block: bt.Block.Clone(), + CoinbaseData: bt.CoinbaseData.Clone(), + CoinbaseHasRedReward: bt.CoinbaseHasRedReward, + IsNearlySynced: bt.IsNearlySynced, + } +} diff --git a/domain/consensus/model/externalapi/coinbase.go b/domain/consensus/model/externalapi/coinbase.go new file mode 100644 index 0000000..39dbfb9 --- /dev/null +++ b/domain/consensus/model/externalapi/coinbase.go @@ -0,0 +1,38 @@ +package externalapi + +import "bytes" + +// DomainCoinbaseData contains data by which a coinbase transaction +// is built +type DomainCoinbaseData struct { + ScriptPublicKey *ScriptPublicKey + ExtraData []byte +} + +// Clone returns a clone of DomainCoinbaseData +func (dcd *DomainCoinbaseData) Clone() *DomainCoinbaseData { + + scriptPubKeyClone := make([]byte, len(dcd.ScriptPublicKey.Script)) + copy(scriptPubKeyClone, dcd.ScriptPublicKey.Script) + + extraDataClone := make([]byte, len(dcd.ExtraData)) + copy(extraDataClone, dcd.ExtraData) + + return &DomainCoinbaseData{ + ScriptPublicKey: &ScriptPublicKey{Script: scriptPubKeyClone, Version: dcd.ScriptPublicKey.Version}, + ExtraData: extraDataClone, + } +} + +// Equal returns whether dcd equals to other +func (dcd *DomainCoinbaseData) Equal(other *DomainCoinbaseData) bool { + if dcd == nil || other == nil { + return dcd == other + } + + if !bytes.Equal(dcd.ExtraData, other.ExtraData) { + return false + } + + return dcd.ScriptPublicKey.Equal(other.ScriptPublicKey) +} diff --git a/domain/consensus/model/externalapi/coinbase_clone_test.go b/domain/consensus/model/externalapi/coinbase_clone_test.go new file mode 100644 index 0000000..df465d2 --- /dev/null +++ b/domain/consensus/model/externalapi/coinbase_clone_test.go @@ -0,0 +1,59 @@ +package externalapi + +import ( + "reflect" + "testing" +) + +func initTestCoinbaseDataStructsForClone() []*DomainCoinbaseData { + + tests := []*DomainCoinbaseData{ + { + &ScriptPublicKey{Script: []byte{1, 2, 3, 4, 5, 6}, Version: 0}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + }, { + &ScriptPublicKey{Script: []byte{0, 0, 0, 0, 55}, Version: 0}, + []byte{0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF}, + }, + } + return tests +} + +func TestDomainCoinbaseData_Clone(t *testing.T) { + + coinbaseData := initTestCoinbaseDataStructsForClone() + for i, coinbase := range coinbaseData { + coinbaseClone := coinbase.Clone() + if !reflect.DeepEqual(coinbase, coinbaseClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/consensus.go b/domain/consensus/model/externalapi/consensus.go new file mode 100644 index 0000000..da065ad --- /dev/null +++ b/domain/consensus/model/externalapi/consensus.go @@ -0,0 +1,59 @@ +package externalapi + +// Consensus maintains the current core state of the node +type Consensus interface { + Init(skipAddingGenesis bool) error + BuildBlock(coinbaseData *DomainCoinbaseData, transactions []*DomainTransaction) (*DomainBlock, error) + BuildBlockTemplate(coinbaseData *DomainCoinbaseData, transactions []*DomainTransaction) (*DomainBlockTemplate, error) + ValidateAndInsertBlock(block *DomainBlock, updateVirtual bool) error + ValidateAndInsertBlockWithTrustedData(block *BlockWithTrustedData, validateUTXO bool) error + ValidateTransactionAndPopulateWithConsensusData(transaction *DomainTransaction) error + ImportPruningPoints(pruningPoints []BlockHeader) error + BuildPruningPointProof() (*PruningPointProof, error) + ValidatePruningPointProof(pruningPointProof *PruningPointProof) error + ApplyPruningPointProof(pruningPointProof *PruningPointProof) error + + GetBlock(blockHash *DomainHash) (*DomainBlock, bool, error) + GetBlockEvenIfHeaderOnly(blockHash *DomainHash) (*DomainBlock, error) + GetBlockHeader(blockHash *DomainHash) (BlockHeader, error) + GetBlockInfo(blockHash *DomainHash) (*BlockInfo, error) + GetBlockRelations(blockHash *DomainHash) (parents []*DomainHash, children []*DomainHash, err error) + GetBlockAcceptanceData(blockHash *DomainHash) (AcceptanceData, error) + GetBlocksAcceptanceData(blockHashes []*DomainHash) ([]AcceptanceData, error) + + GetHashesBetween(lowHash, highHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, actualHighHash *DomainHash, err error) + GetAnticone(blockHash, contextHash *DomainHash, maxBlocks uint64) (hashes []*DomainHash, err error) + GetMissingBlockBodyHashes(highHash *DomainHash) ([]*DomainHash, error) + GetPruningPointUTXOs(expectedPruningPointHash *DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error) + GetVirtualUTXOs(expectedVirtualParents []*DomainHash, fromOutpoint *DomainOutpoint, limit int) ([]*OutpointAndUTXOEntryPair, error) + PruningPoint() (*DomainHash, error) + PruningPointHeaders() ([]BlockHeader, error) + PruningPointAndItsAnticone() ([]*DomainHash, error) + ClearImportedPruningPointData() error + AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*OutpointAndUTXOEntryPair) error + ValidateAndInsertImportedPruningPoint(newPruningPoint *DomainHash) error + GetVirtualSelectedParent() (*DomainHash, error) + CreateBlockLocatorFromPruningPoint(highHash *DomainHash, limit uint32) (BlockLocator, error) + CreateHeadersSelectedChainBlockLocator(lowHash, highHash *DomainHash) (BlockLocator, error) + CreateFullHeadersSelectedChainBlockLocator() (BlockLocator, error) + GetSyncInfo() (*SyncInfo, error) + Tips() ([]*DomainHash, error) + GetVirtualInfo() (*VirtualInfo, error) + GetVirtualDAAScore() (uint64, error) + IsValidPruningPoint(blockHash *DomainHash) (bool, error) + ArePruningPointsViolatingFinality(pruningPoints []BlockHeader) (bool, error) + GetVirtualSelectedParentChainFromBlock(blockHash *DomainHash) (*SelectedChainPath, error) + IsInSelectedParentChainOf(blockHashA *DomainHash, blockHashB *DomainHash) (bool, error) + GetHeadersSelectedTip() (*DomainHash, error) + Anticone(blockHash *DomainHash) ([]*DomainHash, error) + EstimateNetworkHashesPerSecond(startHash *DomainHash, windowSize int) (uint64, error) + PopulateMass(transaction *DomainTransaction) + ResolveVirtual(progressReportCallback func(uint64, uint64)) error + BlockDAAWindowHashes(blockHash *DomainHash) ([]*DomainHash, error) + TrustedDataDataDAAHeader(trustedBlockHash, daaBlockHash *DomainHash, daaBlockWindowIndex uint64) (*TrustedDataDataDAAHeader, error) + TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash *DomainHash) ([]*DomainHash, error) + TrustedGHOSTDAGData(blockHash *DomainHash) (*BlockGHOSTDAGData, error) + IsChainBlock(blockHash *DomainHash) (bool, error) + VirtualMergeDepthRoot() (*DomainHash, error) + IsNearlySynced() (bool, error) +} diff --git a/domain/consensus/model/externalapi/consensus_events.go b/domain/consensus/model/externalapi/consensus_events.go new file mode 100644 index 0000000..54c0158 --- /dev/null +++ b/domain/consensus/model/externalapi/consensus_events.go @@ -0,0 +1,30 @@ +package externalapi + +// ConsensusEvent is an interface type that is implemented by all events raised by consensus +type ConsensusEvent interface { + isConsensusEvent() +} + +// BlockAdded is an event raised by consensus when a block was added to the dag +type BlockAdded struct { + Block *DomainBlock +} + +func (*BlockAdded) isConsensusEvent() {} + +// VirtualChangeSet is an event raised by consensus when virtual changes +type VirtualChangeSet struct { + VirtualSelectedParentChainChanges *SelectedChainPath + VirtualUTXODiff UTXODiff + VirtualParents []*DomainHash + VirtualSelectedParentBlueScore uint64 + VirtualDAAScore uint64 +} + +func (*VirtualChangeSet) isConsensusEvent() {} + +// SelectedChainPath is a path the of the selected chains between two blocks. +type SelectedChainPath struct { + Added []*DomainHash + Removed []*DomainHash +} diff --git a/domain/consensus/model/externalapi/ghostdag.go b/domain/consensus/model/externalapi/ghostdag.go new file mode 100644 index 0000000..d92918d --- /dev/null +++ b/domain/consensus/model/externalapi/ghostdag.go @@ -0,0 +1,67 @@ +package externalapi + +import ( + "math/big" +) + +// KType defines the size of GHOSTDAG consensus algorithm K parameter. +type KType byte + +// BlockGHOSTDAGData represents GHOSTDAG data for some block +type BlockGHOSTDAGData struct { + blueScore uint64 + blueWork *big.Int + selectedParent *DomainHash + mergeSetBlues []*DomainHash + mergeSetReds []*DomainHash + bluesAnticoneSizes map[DomainHash]KType +} + +// NewBlockGHOSTDAGData creates a new instance of BlockGHOSTDAGData +func NewBlockGHOSTDAGData( + blueScore uint64, + blueWork *big.Int, + selectedParent *DomainHash, + mergeSetBlues []*DomainHash, + mergeSetReds []*DomainHash, + bluesAnticoneSizes map[DomainHash]KType) *BlockGHOSTDAGData { + + return &BlockGHOSTDAGData{ + blueScore: blueScore, + blueWork: blueWork, + selectedParent: selectedParent, + mergeSetBlues: mergeSetBlues, + mergeSetReds: mergeSetReds, + bluesAnticoneSizes: bluesAnticoneSizes, + } +} + +// BlueScore returns the BlueScore of the block +func (bgd *BlockGHOSTDAGData) BlueScore() uint64 { + return bgd.blueScore +} + +// BlueWork returns the BlueWork of the block +func (bgd *BlockGHOSTDAGData) BlueWork() *big.Int { + return bgd.blueWork +} + +// SelectedParent returns the SelectedParent of the block +func (bgd *BlockGHOSTDAGData) SelectedParent() *DomainHash { + return bgd.selectedParent +} + +// MergeSetBlues returns the MergeSetBlues of the block (not a copy) +func (bgd *BlockGHOSTDAGData) MergeSetBlues() []*DomainHash { + return bgd.mergeSetBlues +} + +// MergeSetReds returns the MergeSetReds of the block (not a copy) +func (bgd *BlockGHOSTDAGData) MergeSetReds() []*DomainHash { + return bgd.mergeSetReds +} + +// BluesAnticoneSizes returns a map between the blocks in its MergeSetBlues and the size of their anticone +func (bgd *BlockGHOSTDAGData) BluesAnticoneSizes() map[DomainHash]KType { + return bgd.bluesAnticoneSizes +} diff --git a/domain/consensus/model/externalapi/hash.go b/domain/consensus/model/externalapi/hash.go new file mode 100644 index 0000000..1ea8a90 --- /dev/null +++ b/domain/consensus/model/externalapi/hash.go @@ -0,0 +1,123 @@ +package externalapi + +import ( + "bytes" + "encoding/hex" + + "github.com/pkg/errors" +) + +// DomainHashSize of array used to store hashes. +const DomainHashSize = 32 + +// DomainHash is the domain representation of a Hash +type DomainHash struct { + hashArray [DomainHashSize]byte +} + +// NewZeroHash returns a DomainHash that represents the zero value (0x000000...000) +func NewZeroHash() *DomainHash { + return &DomainHash{hashArray: [32]byte{}} +} + +// NewDomainHashFromByteArray constructs a new DomainHash out of a byte array +func NewDomainHashFromByteArray(hashBytes *[DomainHashSize]byte) *DomainHash { + return &DomainHash{ + hashArray: *hashBytes, + } +} + +// NewDomainHashFromByteSlice constructs a new DomainHash out of a byte slice. +// Returns an error if the length of the byte slice is not exactly `DomainHashSize` +func NewDomainHashFromByteSlice(hashBytes []byte) (*DomainHash, error) { + if len(hashBytes) != DomainHashSize { + return nil, errors.Errorf("invalid hash size. Want: %d, got: %d", + DomainHashSize, len(hashBytes)) + } + domainHash := DomainHash{ + hashArray: [DomainHashSize]byte{}, + } + copy(domainHash.hashArray[:], hashBytes) + return &domainHash, nil +} + +// NewDomainHashFromString constructs a new DomainHash out of a hex-encoded string. +// Returns an error if the length of the string is not exactly `DomainHashSize * 2` +func NewDomainHashFromString(hashString string) (*DomainHash, error) { + expectedLength := DomainHashSize * 2 + // Return error if hash string is too long. + if len(hashString) != expectedLength { + return nil, errors.Errorf("hash string length is %d, while it should be be %d", + len(hashString), expectedLength) + } + + hashBytes, err := hex.DecodeString(hashString) + if err != nil { + return nil, errors.WithStack(err) + } + + return NewDomainHashFromByteSlice(hashBytes) +} + +// String returns the Hash as the hexadecimal string of the hash. +func (hash DomainHash) String() string { + return hex.EncodeToString(hash.hashArray[:]) +} + +// ByteArray returns the bytes in this hash represented as a byte array. +// The hash bytes are cloned, therefore it is safe to modify the resulting array. +func (hash *DomainHash) ByteArray() *[DomainHashSize]byte { + arrayClone := hash.hashArray + return &arrayClone +} + +// ByteSlice returns the bytes in this hash represented as a byte slice. +// The hash bytes are cloned, therefore it is safe to modify the resulting slice. +func (hash *DomainHash) ByteSlice() []byte { + return hash.ByteArray()[:] +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ DomainHash = DomainHash{hashArray: [DomainHashSize]byte{}} + +// Equal returns whether hash equals to other +func (hash *DomainHash) Equal(other *DomainHash) bool { + if hash == nil || other == nil { + return hash == other + } + + return hash.hashArray == other.hashArray +} + +// Less returns true if hash is less than other +func (hash *DomainHash) Less(other *DomainHash) bool { + return bytes.Compare(hash.hashArray[:], other.hashArray[:]) < 0 +} + +// LessOrEqual returns true if hash is smaller or equal to other +func (hash *DomainHash) LessOrEqual(other *DomainHash) bool { + return bytes.Compare(hash.hashArray[:], other.hashArray[:]) <= 0 +} + +// CloneHashes returns a clone of the given hashes slice. +// Note: since DomainHash is a read-only type, the clone is shallow +func CloneHashes(hashes []*DomainHash) []*DomainHash { + clone := make([]*DomainHash, len(hashes)) + copy(clone, hashes) + return clone +} + +// HashesEqual returns whether the given hash slices are equal. +func HashesEqual(a, b []*DomainHash) bool { + if len(a) != len(b) { + return false + } + + for i, hash := range a { + if !hash.Equal(b[i]) { + return false + } + } + return true +} diff --git a/domain/consensus/model/externalapi/hash_clone_equal_test.go b/domain/consensus/model/externalapi/hash_clone_equal_test.go new file mode 100644 index 0000000..7e47b03 --- /dev/null +++ b/domain/consensus/model/externalapi/hash_clone_equal_test.go @@ -0,0 +1,79 @@ +package externalapi + +import ( + "testing" +) + +type testHashToCompare struct { + hash *DomainHash + expectedResult bool +} + +type testHashStruct struct { + baseHash *DomainHash + hashesToCompareTo []testHashToCompare +} + +func initTestDomainHashForEqual() []*testHashStruct { + tests := []*testHashStruct{ + { + baseHash: nil, + hashesToCompareTo: []testHashToCompare{ + { + hash: nil, + expectedResult: true, + }, { + hash: NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + expectedResult: false, + }, + }, + }, { + baseHash: NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}), + hashesToCompareTo: []testHashToCompare{ + { + hash: nil, + expectedResult: false, + }, { + hash: NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + expectedResult: false, + }, { + hash: NewDomainHashFromByteArray(&[DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF}), + expectedResult: true, + }, + }, + }, + } + return tests +} + +func TestDomainHash_Equal(t *testing.T) { + hashTests := initTestDomainHashForEqual() + for i, test := range hashTests { + for j, subTest := range test.hashesToCompareTo { + result1 := test.baseHash.Equal(subTest.hash) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.hash.Equal(test.baseHash) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} diff --git a/domain/consensus/model/externalapi/pruning_point_proof.go b/domain/consensus/model/externalapi/pruning_point_proof.go new file mode 100644 index 0000000..bee119c --- /dev/null +++ b/domain/consensus/model/externalapi/pruning_point_proof.go @@ -0,0 +1,6 @@ +package externalapi + +// PruningPointProof is the data structure holding the pruning point proof +type PruningPointProof struct { + Headers [][]BlockHeader +} diff --git a/domain/consensus/model/externalapi/readonlyutxoset.go b/domain/consensus/model/externalapi/readonlyutxoset.go new file mode 100644 index 0000000..61b1827 --- /dev/null +++ b/domain/consensus/model/externalapi/readonlyutxoset.go @@ -0,0 +1,10 @@ +package externalapi + +// ReadOnlyUTXOSetIterator is an iterator over all entries in a +// ReadOnlyUTXOSet +type ReadOnlyUTXOSetIterator interface { + First() bool + Next() bool + Get() (outpoint *DomainOutpoint, utxoEntry UTXOEntry, err error) + Close() error +} diff --git a/domain/consensus/model/externalapi/subnetworkid.go b/domain/consensus/model/externalapi/subnetworkid.go new file mode 100644 index 0000000..06069c6 --- /dev/null +++ b/domain/consensus/model/externalapi/subnetworkid.go @@ -0,0 +1,33 @@ +package externalapi + +import "encoding/hex" + +// DomainSubnetworkIDSize is the size of the array used to store subnetwork IDs. +const DomainSubnetworkIDSize = 20 + +// DomainSubnetworkID is the domain representation of a Subnetwork ID +type DomainSubnetworkID [DomainSubnetworkIDSize]byte + +// String stringifies a subnetwork ID. +func (id DomainSubnetworkID) String() string { + return hex.EncodeToString(id[:]) +} + +// Clone returns a clone of DomainSubnetworkID +func (id *DomainSubnetworkID) Clone() *DomainSubnetworkID { + idClone := *id + return &idClone +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ DomainSubnetworkID = [DomainSubnetworkIDSize]byte{} + +// Equal returns whether id equals to other +func (id *DomainSubnetworkID) Equal(other *DomainSubnetworkID) bool { + if id == nil || other == nil { + return id == other + } + + return *id == *other +} diff --git a/domain/consensus/model/externalapi/subnetworkid_clone_equal_test.go b/domain/consensus/model/externalapi/subnetworkid_clone_equal_test.go new file mode 100644 index 0000000..dc6d7c5 --- /dev/null +++ b/domain/consensus/model/externalapi/subnetworkid_clone_equal_test.go @@ -0,0 +1,99 @@ +package externalapi + +import ( + "reflect" + "testing" +) + +func initTestDomainSubnetworkIDForClone() []*DomainSubnetworkID { + + tests := []*DomainSubnetworkID{{1, 0, 0xFF, 0}, {0, 1, 0xFF, 1}, + {0, 1, 0xFF, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}} + return tests +} + +type testDomainSubnetworkIDToCompare struct { + domainSubnetworkID *DomainSubnetworkID + expectedResult bool +} + +type testDomainSubnetworkIDStruct struct { + baseDomainSubnetworkID *DomainSubnetworkID + domainSubnetworkIDToCompareTo []testDomainSubnetworkIDToCompare +} + +func initTestDomainSubnetworkIDForEqual() []testDomainSubnetworkIDStruct { + tests := []testDomainSubnetworkIDStruct{ + { + baseDomainSubnetworkID: nil, + domainSubnetworkIDToCompareTo: []testDomainSubnetworkIDToCompare{ + { + domainSubnetworkID: &DomainSubnetworkID{255, 255, 0xFF, 0}, + expectedResult: false, + }, + { + domainSubnetworkID: nil, + expectedResult: true, + }, + }, + }, { + baseDomainSubnetworkID: &DomainSubnetworkID{0}, + domainSubnetworkIDToCompareTo: []testDomainSubnetworkIDToCompare{ + { + domainSubnetworkID: &DomainSubnetworkID{255, 254, 0xFF, 0}, + expectedResult: false, + }, + { + domainSubnetworkID: &DomainSubnetworkID{0}, + expectedResult: true, + }, + }, + }, { + baseDomainSubnetworkID: &DomainSubnetworkID{0, 1, 0xFF, 1, 1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + domainSubnetworkIDToCompareTo: []testDomainSubnetworkIDToCompare{ + { + domainSubnetworkID: &DomainSubnetworkID{0, 1, 0xFF, 1, 1, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, + expectedResult: true, + }, + { + domainSubnetworkID: &DomainSubnetworkID{0, 10, 0xFF, 0}, + expectedResult: false, + }, + }, + }, + } + return tests +} + +func TestDomainSubnetworkID_Equal(t *testing.T) { + + domainSubnetworkIDs := initTestDomainSubnetworkIDForEqual() + for i, test := range domainSubnetworkIDs { + for j, subTest := range test.domainSubnetworkIDToCompareTo { + result1 := test.baseDomainSubnetworkID.Equal(subTest.domainSubnetworkID) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.domainSubnetworkID.Equal(test.baseDomainSubnetworkID) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestDomainSubnetworkID_Clone(t *testing.T) { + + domainSubnetworkIDs := initTestDomainSubnetworkIDForClone() + for i, domainSubnetworkID := range domainSubnetworkIDs { + domainSubnetworkIDClone := domainSubnetworkID.Clone() + if !domainSubnetworkIDClone.Equal(domainSubnetworkID) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(domainSubnetworkID, domainSubnetworkIDClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/sync.go b/domain/consensus/model/externalapi/sync.go new file mode 100644 index 0000000..797401e --- /dev/null +++ b/domain/consensus/model/externalapi/sync.go @@ -0,0 +1,36 @@ +package externalapi + +// SyncInfo holds info about the current sync state of the consensus +type SyncInfo struct { + HeaderCount uint64 + BlockCount uint64 +} + +// Clone returns a clone of SyncInfo +func (si *SyncInfo) Clone() *SyncInfo { + return &SyncInfo{ + HeaderCount: si.HeaderCount, + BlockCount: si.BlockCount, + } +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = SyncInfo{0, 0} + +// Equal returns whether si equals to other +func (si *SyncInfo) Equal(other *SyncInfo) bool { + if si == nil || other == nil { + return si == other + } + + if si.HeaderCount != other.HeaderCount { + return false + } + + if si.BlockCount != other.BlockCount { + return false + } + + return true +} diff --git a/domain/consensus/model/externalapi/sync_equal_clone_test.go b/domain/consensus/model/externalapi/sync_equal_clone_test.go new file mode 100644 index 0000000..d4959ac --- /dev/null +++ b/domain/consensus/model/externalapi/sync_equal_clone_test.go @@ -0,0 +1,99 @@ +package externalapi + +import ( + "reflect" + "testing" +) + +func initTestSyncInfoForClone() []*SyncInfo { + + tests := []*SyncInfo{{ + 0xF, + 0xF}} + return tests +} + +type testSyncInfoToCompare struct { + syncInfo *SyncInfo + expectedResult bool +} + +type testSyncInfoStruct struct { + baseSyncInfo *SyncInfo + syncInfoToCompareTo []testSyncInfoToCompare +} + +func initTestSyncInfoForEqual() []*testSyncInfoStruct { + tests := []*testSyncInfoStruct{ + { + baseSyncInfo: nil, + syncInfoToCompareTo: []testSyncInfoToCompare{ + { + syncInfo: &SyncInfo{ + 0xF, + 0xF}, + expectedResult: false, + }, { + syncInfo: nil, + expectedResult: true, + }, + }}, { + baseSyncInfo: &SyncInfo{ + 0xF, + 0xF}, + syncInfoToCompareTo: []testSyncInfoToCompare{ + { + syncInfo: &SyncInfo{ + 0xF, + 0xF}, + expectedResult: true, + }, + { + syncInfo: &SyncInfo{ + 0xF1, + 0xF}, + expectedResult: false, + }, { + syncInfo: nil, + expectedResult: false, + }, { + syncInfo: &SyncInfo{ + 0xF, + 0xF1}, + expectedResult: false}, + }, + }, + } + return tests +} + +func TestSyncInfo_Equal(t *testing.T) { + + testSyncState := initTestSyncInfoForEqual() + for i, test := range testSyncState { + for j, subTest := range test.syncInfoToCompareTo { + result1 := test.baseSyncInfo.Equal(subTest.syncInfo) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.syncInfo.Equal(test.baseSyncInfo) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestSyncInfo_Clone(t *testing.T) { + + testSyncInfo := initTestSyncInfoForClone() + for i, syncInfo := range testSyncInfo { + syncStateClone := syncInfo.Clone() + if !syncStateClone.Equal(syncInfo) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(syncInfo, syncStateClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/model/externalapi/transaction.go b/domain/consensus/model/externalapi/transaction.go new file mode 100644 index 0000000..6caf7c7 --- /dev/null +++ b/domain/consensus/model/externalapi/transaction.go @@ -0,0 +1,363 @@ +package externalapi + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/pkg/errors" +) + +// DomainTransaction represents a Spectre transaction +type DomainTransaction struct { + Version uint16 + Inputs []*DomainTransactionInput + Outputs []*DomainTransactionOutput + LockTime uint64 + SubnetworkID DomainSubnetworkID + Gas uint64 + Payload []byte + + Fee uint64 + Mass uint64 + + // ID is a field that is used to cache the transaction ID. + // Always use consensushashing.TransactionID instead of accessing this field directly + ID *DomainTransactionID +} + +// Clone returns a clone of DomainTransaction +func (tx *DomainTransaction) Clone() *DomainTransaction { + payloadClone := make([]byte, len(tx.Payload)) + copy(payloadClone, tx.Payload) + + inputsClone := make([]*DomainTransactionInput, len(tx.Inputs)) + for i, input := range tx.Inputs { + inputsClone[i] = input.Clone() + } + + outputsClone := make([]*DomainTransactionOutput, len(tx.Outputs)) + for i, output := range tx.Outputs { + outputsClone[i] = output.Clone() + } + + var idClone *DomainTransactionID + if tx.ID != nil { + idClone = tx.ID.Clone() + } + + return &DomainTransaction{ + Version: tx.Version, + Inputs: inputsClone, + Outputs: outputsClone, + LockTime: tx.LockTime, + SubnetworkID: *tx.SubnetworkID.Clone(), + Gas: tx.Gas, + Payload: payloadClone, + Fee: tx.Fee, + Mass: tx.Mass, + ID: idClone, + } +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = DomainTransaction{0, []*DomainTransactionInput{}, []*DomainTransactionOutput{}, 0, + DomainSubnetworkID{}, 0, []byte{}, 0, 0, + &DomainTransactionID{}} + +// Equal returns whether tx equals to other +func (tx *DomainTransaction) Equal(other *DomainTransaction) bool { + if tx == nil || other == nil { + return tx == other + } + + if tx.Version != other.Version { + return false + } + + if len(tx.Inputs) != len(other.Inputs) { + return false + } + + for i, input := range tx.Inputs { + if !input.Equal(other.Inputs[i]) { + return false + } + } + + if len(tx.Outputs) != len(other.Outputs) { + return false + } + + for i, output := range tx.Outputs { + if !output.Equal(other.Outputs[i]) { + return false + } + } + + if tx.LockTime != other.LockTime { + return false + } + + if !tx.SubnetworkID.Equal(&other.SubnetworkID) { + return false + } + + if tx.Gas != other.Gas { + return false + } + + if !bytes.Equal(tx.Payload, other.Payload) { + return false + } + + if tx.Fee != 0 && other.Fee != 0 && tx.Fee != other.Fee { + panic(errors.New("identical transactions should always have the same fee")) + } + + if tx.Mass != 0 && other.Mass != 0 && tx.Mass != other.Mass { + panic(errors.New("identical transactions should always have the same mass")) + } + + if tx.ID != nil && other.ID != nil && !tx.ID.Equal(other.ID) { + panic(errors.New("identical transactions should always have the same ID")) + } + + return true +} + +// DomainTransactionInput represents a Spectre transaction input +type DomainTransactionInput struct { + PreviousOutpoint DomainOutpoint + SignatureScript []byte + Sequence uint64 + SigOpCount byte + + UTXOEntry UTXOEntry +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &DomainTransactionInput{DomainOutpoint{}, []byte{}, 0, 0, nil} + +// Equal returns whether input equals to other +func (input *DomainTransactionInput) Equal(other *DomainTransactionInput) bool { + if input == nil || other == nil { + return input == other + } + + if !input.PreviousOutpoint.Equal(&other.PreviousOutpoint) { + return false + } + + if !bytes.Equal(input.SignatureScript, other.SignatureScript) { + return false + } + + if input.Sequence != other.Sequence { + return false + } + + if input.SigOpCount != other.SigOpCount { + return false + } + + if input.UTXOEntry != nil && other.UTXOEntry != nil && !input.UTXOEntry.Equal(other.UTXOEntry) { + panic(errors.New("identical inputs should always have the same UTXO entry")) + } + + return true +} + +// Clone returns a clone of DomainTransactionInput +func (input *DomainTransactionInput) Clone() *DomainTransactionInput { + signatureScriptClone := make([]byte, len(input.SignatureScript)) + copy(signatureScriptClone, input.SignatureScript) + + return &DomainTransactionInput{ + PreviousOutpoint: *input.PreviousOutpoint.Clone(), + SignatureScript: signatureScriptClone, + Sequence: input.Sequence, + SigOpCount: input.SigOpCount, + UTXOEntry: input.UTXOEntry, + } +} + +// DomainOutpoint represents a Spectre transaction outpoint +type DomainOutpoint struct { + TransactionID DomainTransactionID + Index uint32 +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = DomainOutpoint{DomainTransactionID{}, 0} + +// Equal returns whether op equals to other +func (op *DomainOutpoint) Equal(other *DomainOutpoint) bool { + if op == nil || other == nil { + return op == other + } + + return *op == *other +} + +// Clone returns a clone of DomainOutpoint +func (op *DomainOutpoint) Clone() *DomainOutpoint { + return &DomainOutpoint{ + TransactionID: *op.TransactionID.Clone(), + Index: op.Index, + } +} + +// String stringifies an outpoint. +func (op DomainOutpoint) String() string { + return fmt.Sprintf("(%s: %d)", op.TransactionID, op.Index) +} + +// NewDomainOutpoint instantiates a new DomainOutpoint with the given id and index +func NewDomainOutpoint(id *DomainTransactionID, index uint32) *DomainOutpoint { + return &DomainOutpoint{ + TransactionID: *id, + Index: index, + } +} + +// ScriptPublicKey represents a Spectred ScriptPublicKey +type ScriptPublicKey struct { + Script []byte + Version uint16 +} + +// Equal returns whether spk equals to other +func (spk *ScriptPublicKey) Equal(other *ScriptPublicKey) bool { + if spk == nil || other == nil { + return spk == other + } + + if spk.Version != other.Version { + return false + } + + return bytes.Equal(spk.Script, other.Script) +} + +// String stringifies a ScriptPublicKey. +func (spk *ScriptPublicKey) String() string { + var versionBytes = make([]byte, 2) // uint16 + binary.LittleEndian.PutUint16(versionBytes, spk.Version) + versionString := string(versionBytes) + scriptString := string(spk.Script) + return versionString + scriptString +} + +// NewScriptPublicKeyFromString converts the given string to a scriptPublicKey +func NewScriptPublicKeyFromString(ScriptPublicKeyString string) *ScriptPublicKey { + bytes := []byte(ScriptPublicKeyString) + version := binary.LittleEndian.Uint16(bytes[:2]) + script := bytes[2:] + return &ScriptPublicKey{Script: script, Version: version} +} + +// DomainTransactionOutput represents a Spectred transaction output +type DomainTransactionOutput struct { + Value uint64 + ScriptPublicKey *ScriptPublicKey +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = DomainTransactionOutput{0, &ScriptPublicKey{Script: []byte{}, Version: 0}} + +// Equal returns whether output equals to other +func (output *DomainTransactionOutput) Equal(other *DomainTransactionOutput) bool { + if output == nil || other == nil { + return output == other + } + + if output.Value != other.Value { + return false + } + + return output.ScriptPublicKey.Equal(other.ScriptPublicKey) +} + +// Clone returns a clone of DomainTransactionOutput +func (output *DomainTransactionOutput) Clone() *DomainTransactionOutput { + scriptPublicKeyClone := &ScriptPublicKey{ + Script: make([]byte, len(output.ScriptPublicKey.Script)), + Version: output.ScriptPublicKey.Version} + copy(scriptPublicKeyClone.Script, output.ScriptPublicKey.Script) + + return &DomainTransactionOutput{ + Value: output.Value, + ScriptPublicKey: scriptPublicKeyClone, + } +} + +// DomainTransactionID represents the ID of a Spectre transaction +type DomainTransactionID DomainHash + +// NewDomainTransactionIDFromByteArray constructs a new TransactionID out of a byte array +func NewDomainTransactionIDFromByteArray(transactionIDBytes *[DomainHashSize]byte) *DomainTransactionID { + return (*DomainTransactionID)(NewDomainHashFromByteArray(transactionIDBytes)) +} + +// NewDomainTransactionIDFromByteSlice constructs a new TransactionID out of a byte slice +// Returns an error if the length of the byte slice is not exactly `DomainHashSize` +func NewDomainTransactionIDFromByteSlice(transactionIDBytes []byte) (*DomainTransactionID, error) { + hash, err := NewDomainHashFromByteSlice(transactionIDBytes) + if err != nil { + return nil, err + } + return (*DomainTransactionID)(hash), nil +} + +// NewDomainTransactionIDFromString constructs a new TransactionID out of a string +// Returns an error if the length of the string is not exactly `DomainHashSize * 2` +func NewDomainTransactionIDFromString(transactionIDString string) (*DomainTransactionID, error) { + hash, err := NewDomainHashFromString(transactionIDString) + if err != nil { + return nil, err + } + return (*DomainTransactionID)(hash), nil +} + +// String stringifies a transaction ID. +func (id DomainTransactionID) String() string { + return DomainHash(id).String() +} + +// Clone returns a clone of DomainTransactionID +func (id *DomainTransactionID) Clone() *DomainTransactionID { + idClone := *id + return &idClone +} + +// Equal returns whether id equals to other +func (id *DomainTransactionID) Equal(other *DomainTransactionID) bool { + return (*DomainHash)(id).Equal((*DomainHash)(other)) +} + +// Less returns true if id is less than other +func (id *DomainTransactionID) Less(other *DomainTransactionID) bool { + return (*DomainHash)(id).Less((*DomainHash)(other)) +} + +// LessOrEqual returns true if id is smaller or equal to other +func (id *DomainTransactionID) LessOrEqual(other *DomainTransactionID) bool { + return (*DomainHash)(id).LessOrEqual((*DomainHash)(other)) +} + +// ByteArray returns the bytes in this transactionID represented as a byte array. +// The transactionID bytes are cloned, therefore it is safe to modify the resulting array. +func (id *DomainTransactionID) ByteArray() *[DomainHashSize]byte { + return (*DomainHash)(id).ByteArray() +} + +// ByteSlice returns the bytes in this transactionID represented as a byte slice. +// The transactionID bytes are cloned, therefore it is safe to modify the resulting slice. +func (id *DomainTransactionID) ByteSlice() []byte { + return (*DomainHash)(id).ByteSlice() +} diff --git a/domain/consensus/model/externalapi/transaction_equal_clone_test.go b/domain/consensus/model/externalapi/transaction_equal_clone_test.go new file mode 100644 index 0000000..5f61d56 --- /dev/null +++ b/domain/consensus/model/externalapi/transaction_equal_clone_test.go @@ -0,0 +1,1107 @@ +package externalapi_test + +import ( + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +// Changed fields of a test struct compared to a base test struct marked as "changed" and +// pointing in some cases name changed struct field + +type transactionToCompare struct { + tx *externalapi.DomainTransaction + expectedResult bool + expectsPanic bool +} + +type testDomainTransactionStruct struct { + baseTx *externalapi.DomainTransaction + transactionToCompareTo []*transactionToCompare +} + +type transactionInputToCompare struct { + tx *externalapi.DomainTransactionInput + expectedResult bool + expectsPanic bool +} + +type testDomainTransactionInputStruct struct { + baseTx *externalapi.DomainTransactionInput + transactionInputToCompareTo []*transactionInputToCompare +} + +type transactionOutputToCompare struct { + tx *externalapi.DomainTransactionOutput + expectedResult bool +} + +type testDomainTransactionOutputStruct struct { + baseTx *externalapi.DomainTransactionOutput + transactionOutputToCompareTo []*transactionOutputToCompare +} + +type domainOutpointToCompare struct { + domainOutpoint *externalapi.DomainOutpoint + expectedResult bool +} + +type testDomainOutpointStruct struct { + baseDomainOutpoint *externalapi.DomainOutpoint + domainOutpointToCompareTo []*domainOutpointToCompare +} + +type domainTransactionIDToCompare struct { + domainTransactionID *externalapi.DomainTransactionID + expectedResult bool +} + +type testDomainTransactionIDStruct struct { + baseDomainTransactionID *externalapi.DomainTransactionID + domainTransactionIDToCompareTo []*domainTransactionIDToCompare +} + +func initTestBaseTransaction() *externalapi.DomainTransaction { + + testTx := &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + } + return testTx +} + +func initTestTransactionToCompare() []*transactionToCompare { + + testTx := []*transactionToCompare{{ + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}, //Changed + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01, 0x02}, //Changed + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01, 0x02}, //Changed + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: true, + }, + { + // ID changed + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + }, + expectsPanic: true, + }, + { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 1000000000, //Changed + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: true, + }, { + tx: &externalapi.DomainTransaction{ + 2, //Changed + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 2, //Changed + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}), + }, + expectsPanic: true, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 2, //Changed + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + {externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}, {uint64(0xFFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2, 3}, Version: 0}}}, //changed Outputs + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + nil, //changed + }, + expectedResult: true, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFF0), // Changed sequence + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 3, // Changed SigOpCount + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, + { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{{externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}}, + []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, + {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 2, // Changed + []byte{0x01}, + 0, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + expectedResult: false, + }, + } + return testTx +} + +func initTestDomainTransactionForClone() []*externalapi.DomainTransaction { + + tests := []*externalapi.DomainTransaction{ + { + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{ + {externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2)}, + }, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 5555555555, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, { + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{}), + }, + } + return tests +} + +func initTestDomainTransactionForEqual() []testDomainTransactionStruct { + + tests := []testDomainTransactionStruct{ + { + baseTx: initTestBaseTransaction(), + transactionToCompareTo: initTestTransactionToCompare(), + }, + { + baseTx: nil, + transactionToCompareTo: []*transactionToCompare{{ + tx: nil, + expectedResult: true}}, + }, { + baseTx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{}, + []*externalapi.DomainTransactionOutput{}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 1, + 1, + externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + }, + transactionToCompareTo: []*transactionToCompare{{ + tx: nil, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{}, + []*externalapi.DomainTransactionOutput{}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 0, + []byte{0x01}, + 1, + 1, + nil, + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{}, + []*externalapi.DomainTransactionOutput{}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 1, + 1, + nil, + }, + expectedResult: true, + }, { + tx: &externalapi.DomainTransaction{ + 1, + []*externalapi.DomainTransactionInput{}, + []*externalapi.DomainTransactionOutput{}, + 1, + externalapi.DomainSubnetworkID{0x01}, + 1, + []byte{0x01}, + 2, // Changed fee + 1, + nil, + }, + expectsPanic: true, + }}, + }, + } + return tests +} + +func initTestBaseDomainTransactionInput() *externalapi.DomainTransactionInput { + basetxInput := &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + } + return basetxInput +} + +func initTestDomainTxInputToCompare() []*transactionInputToCompare { + txInput := []*transactionInputToCompare{{ + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, + expectedResult: true, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, false, 2), // Changed + }, + expectsPanic: true, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + nil, // Changed + }, + expectedResult: true, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFF0), // Changed + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFF0), + 5, // Changed + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3, 4}, // Changed + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01, 0x02}), 0xFFFF}, // Changed + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01, 0x02}), 0xFFFF}, // Changed + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(2 /* Changed */, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), // Changed + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionInput{ + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01, 0x02}), 0xFFFF}, // Changed + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(3 /* Changed */, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 3), // Changed + }, + expectedResult: false, + }, { + tx: nil, + expectedResult: false, + }} + return txInput + +} + +func initTestDomainTransactionInputForClone() []*externalapi.DomainTransactionInput { + txInput := []*externalapi.DomainTransactionInput{ + { + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, { + + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFFF), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }, { + + externalapi.DomainOutpoint{*externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x01}), 0xFFFF}, + []byte{1, 2, 3}, + uint64(0xFFFFFFF0), + 1, + utxo.NewUTXOEntry(1, &externalapi.ScriptPublicKey{Script: []byte{0, 1, 2, 3}, Version: 0}, true, 2), + }} + return txInput +} + +func initTestBaseDomainTransactionOutput() *externalapi.DomainTransactionOutput { + basetxOutput := &externalapi.DomainTransactionOutput{ + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}, + } + return basetxOutput +} + +func initTestDomainTransactionOutputForClone() []*externalapi.DomainTransactionOutput { + txInput := []*externalapi.DomainTransactionOutput{ + { + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xF0, 0xFF}, Version: 0}, + }, { + 0xFFFFFFF1, + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}, + }} + return txInput +} + +func initTestDomainTransactionOutputForEqual() []testDomainTransactionOutputStruct { + tests := []testDomainTransactionOutputStruct{ + { + baseTx: initTestBaseDomainTransactionOutput(), + transactionOutputToCompareTo: []*transactionOutputToCompare{{ + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}}, + expectedResult: true, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xF0, 0xFF}, Version: 0}, // Changed + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, // Changed + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}, + }, + expectedResult: false, + }, { + tx: nil, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, // Changed + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF, 0x01}, Version: 0}}, // Changed + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, // Changed + &externalapi.ScriptPublicKey{Script: []byte{}, Version: 0}, // Changed + }, + expectedResult: false, + }}, + }, + { + baseTx: nil, + transactionOutputToCompareTo: []*transactionOutputToCompare{{ + tx: nil, + expectedResult: true, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}}, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xF0, 0xFF}, Version: 0}, // Changed + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, // Changed + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF}, Version: 0}, + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, + &externalapi.ScriptPublicKey{Script: []byte{0xFF, 0xFF, 0x01}, Version: 0}, // Changed + }, + expectedResult: false, + }, { + tx: &externalapi.DomainTransactionOutput{ + 0xFFFFFFF0, + &externalapi.ScriptPublicKey{Script: []byte{}, Version: 0}, // Changed + }, + expectedResult: false, + }}, + }, + } + return tests +} + +func initTestDomainTransactionInputForEqual() []testDomainTransactionInputStruct { + + tests := []testDomainTransactionInputStruct{ + { + baseTx: initTestBaseDomainTransactionInput(), + transactionInputToCompareTo: initTestDomainTxInputToCompare(), + }, + } + return tests +} + +func TestDomainTransaction_Equal(t *testing.T) { + + txTests := initTestDomainTransactionForEqual() + for i, test := range txTests { + for j, subTest := range test.transactionToCompareTo { + func() { + defer func() { + r := recover() + panicked := r != nil + if panicked != subTest.expectsPanic { + t.Fatalf("Test #%d:%d: panicked expected to be %t but got %t: %s", i, j, subTest.expectsPanic, panicked, r) + } + }() + result1 := test.baseTx.Equal(subTest.tx) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + }() + func() { + defer func() { + r := recover() + panicked := r != nil + if panicked != subTest.expectsPanic { + t.Fatalf("Test #%d:%d: panicked expected to be %t but got %t: %s", i, j, subTest.expectsPanic, panicked, r) + } + }() + result2 := subTest.tx.Equal(test.baseTx) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + }() + } + } +} + +func TestDomainTransaction_Clone(t *testing.T) { + + txs := initTestDomainTransactionForClone() + for i, tx := range txs { + txClone := tx.Clone() + if !txClone.Equal(tx) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(tx, txClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func TestDomainTransactionInput_Equal(t *testing.T) { + + txTests := initTestDomainTransactionInputForEqual() + for i, test := range txTests { + for j, subTest := range test.transactionInputToCompareTo { + func() { + defer func() { + r := recover() + panicked := r != nil + if panicked != subTest.expectsPanic { + t.Fatalf("Test #%d:%d: panicked expected to be %t but got %t: %s", i, j, subTest.expectsPanic, panicked, r) + } + }() + result1 := test.baseTx.Equal(subTest.tx) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + }() + func() { + defer func() { + r := recover() + panicked := r != nil + if panicked != subTest.expectsPanic { + t.Fatalf("Test #%d:%d: panicked expected to be %t but got %t: %s", i, j, subTest.expectsPanic, panicked, r) + } + }() + result2 := subTest.tx.Equal(test.baseTx) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + }() + } + } +} + +func TestDomainTransactionInput_Clone(t *testing.T) { + + txInputs := initTestDomainTransactionInputForClone() + for i, txInput := range txInputs { + txInputClone := txInput.Clone() + if !txInputClone.Equal(txInput) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(txInput, txInputClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func TestDomainTransactionOutput_Equal(t *testing.T) { + + txTests := initTestDomainTransactionOutputForEqual() + for i, test := range txTests { + for j, subTest := range test.transactionOutputToCompareTo { + result1 := test.baseTx.Equal(subTest.tx) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.tx.Equal(test.baseTx) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestDomainTransactionOutput_Clone(t *testing.T) { + + txInputs := initTestDomainTransactionOutputForClone() + for i, txOutput := range txInputs { + txOutputClone := txOutput.Clone() + if !txOutputClone.Equal(txOutput) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(txOutput, txOutputClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func initTestDomainOutpointForClone() []*externalapi.DomainOutpoint { + outpoint := []*externalapi.DomainOutpoint{{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + 1}, + } + return outpoint +} + +func initTestDomainOutpointForEqual() []testDomainOutpointStruct { + + var outpoint = []*domainOutpointToCompare{{ + domainOutpoint: &externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + 1}, + expectedResult: true, + }, { + domainOutpoint: &externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + 1}, + expectedResult: false, + }, { + domainOutpoint: &externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0}), + 2}, + expectedResult: false, + }} + tests := []testDomainOutpointStruct{ + { + baseDomainOutpoint: &externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + 1}, + domainOutpointToCompareTo: outpoint, + }, {baseDomainOutpoint: &externalapi.DomainOutpoint{ + *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + 1}, + domainOutpointToCompareTo: []*domainOutpointToCompare{{domainOutpoint: nil, expectedResult: false}}, + }, {baseDomainOutpoint: nil, + domainOutpointToCompareTo: []*domainOutpointToCompare{{domainOutpoint: nil, expectedResult: true}}, + }, + } + return tests +} + +func TestDomainOutpoint_Equal(t *testing.T) { + + domainOutpoints := initTestDomainOutpointForEqual() + for i, test := range domainOutpoints { + for j, subTest := range test.domainOutpointToCompareTo { + result1 := test.baseDomainOutpoint.Equal(subTest.domainOutpoint) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.domainOutpoint.Equal(test.baseDomainOutpoint) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestDomainOutpoint_Clone(t *testing.T) { + + domainOutpoints := initTestDomainOutpointForClone() + for i, outpoint := range domainOutpoints { + outpointClone := outpoint.Clone() + if !outpointClone.Equal(outpoint) { + t.Fatalf("Test #%d:[Equal] clone should be equal to the original", i) + } + if !reflect.DeepEqual(outpoint, outpointClone) { + t.Fatalf("Test #%d:[DeepEqual] clone should be equal to the original", i) + } + } +} + +func initTestDomainTransactionIDForEqual() []testDomainTransactionIDStruct { + + var outpoint = []*domainTransactionIDToCompare{{ + domainTransactionID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + expectedResult: true, + }, { + domainTransactionID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + expectedResult: false, + }, { + domainTransactionID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0}), + expectedResult: false, + }} + tests := []testDomainTransactionIDStruct{ + { + baseDomainTransactionID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + domainTransactionIDToCompareTo: outpoint, + }, { + baseDomainTransactionID: nil, + domainTransactionIDToCompareTo: []*domainTransactionIDToCompare{{ + domainTransactionID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}), + expectedResult: false, + }}, + }, + } + return tests +} + +func TestDomainTransactionID_Equal(t *testing.T) { + domainDomainTransactionIDs := initTestDomainTransactionIDForEqual() + for i, test := range domainDomainTransactionIDs { + for j, subTest := range test.domainTransactionIDToCompareTo { + result1 := test.baseDomainTransactionID.Equal(subTest.domainTransactionID) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.domainTransactionID.Equal(test.baseDomainTransactionID) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} diff --git a/domain/consensus/model/externalapi/utxodiff.go b/domain/consensus/model/externalapi/utxodiff.go new file mode 100644 index 0000000..30d7e1f --- /dev/null +++ b/domain/consensus/model/externalapi/utxodiff.go @@ -0,0 +1,32 @@ +package externalapi + +// UTXOCollection represents a collection of UTXO entries, indexed by their outpoint +type UTXOCollection interface { + Iterator() ReadOnlyUTXOSetIterator + Get(outpoint *DomainOutpoint) (UTXOEntry, bool) + Contains(outpoint *DomainOutpoint) bool + Len() int +} + +// UTXODiff represents the diff between two UTXO sets +type UTXODiff interface { + ToAdd() UTXOCollection + ToRemove() UTXOCollection + WithDiff(other UTXODiff) (UTXODiff, error) + DiffFrom(other UTXODiff) (UTXODiff, error) + Reversed() UTXODiff + CloneMutable() MutableUTXODiff +} + +// MutableUTXODiff represents a UTXO-Diff that can be mutated +type MutableUTXODiff interface { + ToImmutable() UTXODiff + + WithDiff(other UTXODiff) (UTXODiff, error) + DiffFrom(other UTXODiff) (UTXODiff, error) + ToAdd() UTXOCollection + ToRemove() UTXOCollection + + WithDiffInPlace(other UTXODiff) error + AddTransaction(transaction *DomainTransaction, blockDAAScore uint64) error +} diff --git a/domain/consensus/model/externalapi/utxoentry.go b/domain/consensus/model/externalapi/utxoentry.go new file mode 100644 index 0000000..fb628b6 --- /dev/null +++ b/domain/consensus/model/externalapi/utxoentry.go @@ -0,0 +1,20 @@ +package externalapi + +// UTXOEntry houses details about an individual transaction output in a utxo +// set such as whether or not it was contained in a coinbase tx, the daa +// score of the block that accepts the tx, its public key script, and how +// much it pays. +type UTXOEntry interface { + Amount() uint64 // Utxo amount in Sompis + ScriptPublicKey() *ScriptPublicKey // The public key script for the output. + BlockDAAScore() uint64 // Daa score of the block accepting the tx. + IsCoinbase() bool + Equal(other UTXOEntry) bool +} + +// OutpointAndUTXOEntryPair is an outpoint along with its +// respective UTXO entry +type OutpointAndUTXOEntryPair struct { + Outpoint *DomainOutpoint + UTXOEntry UTXOEntry +} diff --git a/domain/consensus/model/externalapi/virtual.go b/domain/consensus/model/externalapi/virtual.go new file mode 100644 index 0000000..42eb975 --- /dev/null +++ b/domain/consensus/model/externalapi/virtual.go @@ -0,0 +1,10 @@ +package externalapi + +// VirtualInfo represents information about the virtual block needed by external components +type VirtualInfo struct { + ParentHashes []*DomainHash + Bits uint32 + PastMedianTime int64 + BlueScore uint64 + DAAScore uint64 +} diff --git a/domain/consensus/model/interface_datastructures_acceptancedatastore.go b/domain/consensus/model/interface_datastructures_acceptancedatastore.go new file mode 100644 index 0000000..a78a6ed --- /dev/null +++ b/domain/consensus/model/interface_datastructures_acceptancedatastore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// AcceptanceDataStore represents a store of AcceptanceData +type AcceptanceDataStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData) + IsStaged(stagingArea *StagingArea) bool + Get(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.AcceptanceData, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) +} diff --git a/domain/consensus/model/interface_datastructures_blockheaderstore.go b/domain/consensus/model/interface_datastructures_blockheaderstore.go new file mode 100644 index 0000000..e157038 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_blockheaderstore.go @@ -0,0 +1,15 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockHeaderStore represents a store of block headers +type BlockHeaderStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, blockHeader externalapi.BlockHeader) + IsStaged(stagingArea *StagingArea) bool + BlockHeader(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) + HasBlockHeader(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + BlockHeaders(dbContext DBReader, stagingArea *StagingArea, blockHashes []*externalapi.DomainHash) ([]externalapi.BlockHeader, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) + Count(stagingArea *StagingArea) uint64 +} diff --git a/domain/consensus/model/interface_datastructures_blockrelationstore.go b/domain/consensus/model/interface_datastructures_blockrelationstore.go new file mode 100644 index 0000000..f4e2926 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_blockrelationstore.go @@ -0,0 +1,13 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockRelationStore represents a store of BlockRelations +type BlockRelationStore interface { + Store + StageBlockRelation(stagingArea *StagingArea, blockHash *externalapi.DomainHash, blockRelations *BlockRelations) + IsStaged(stagingArea *StagingArea) bool + BlockRelation(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*BlockRelations, error) + Has(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + UnstageAll(stagingArea *StagingArea) +} diff --git a/domain/consensus/model/interface_datastructures_blocks_with_trusted_data_daa_window_store.go b/domain/consensus/model/interface_datastructures_blocks_with_trusted_data_daa_window_store.go new file mode 100644 index 0000000..fce9bbd --- /dev/null +++ b/domain/consensus/model/interface_datastructures_blocks_with_trusted_data_daa_window_store.go @@ -0,0 +1,13 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// BlocksWithTrustedDataDAAWindowStore stores the DAA window of blocks with trusted data +type BlocksWithTrustedDataDAAWindowStore interface { + Store + IsStaged(stagingArea *StagingArea) bool + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, index uint64, ghostdagData *externalapi.BlockGHOSTDAGDataHashPair) + DAAWindowBlock(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash, index uint64) (*externalapi.BlockGHOSTDAGDataHashPair, error) +} diff --git a/domain/consensus/model/interface_datastructures_blockstatusstore.go b/domain/consensus/model/interface_datastructures_blockstatusstore.go new file mode 100644 index 0000000..ff0a034 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_blockstatusstore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockStatusStore represents a store of BlockStatuses +type BlockStatusStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, blockStatus externalapi.BlockStatus) + IsStaged(stagingArea *StagingArea) bool + Get(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.BlockStatus, error) + Exists(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) +} diff --git a/domain/consensus/model/interface_datastructures_blockstore.go b/domain/consensus/model/interface_datastructures_blockstore.go new file mode 100644 index 0000000..3fbfde3 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_blockstore.go @@ -0,0 +1,16 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockStore represents a store of blocks +type BlockStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, block *externalapi.DomainBlock) + IsStaged(stagingArea *StagingArea) bool + Block(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainBlock, error) + HasBlock(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + Blocks(dbContext DBReader, stagingArea *StagingArea, blockHashes []*externalapi.DomainHash) ([]*externalapi.DomainBlock, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) + Count(stagingArea *StagingArea) uint64 + AllBlockHashesIterator(dbContext DBReader) (BlockIterator, error) +} diff --git a/domain/consensus/model/interface_datastructures_consensusstatestore.go b/domain/consensus/model/interface_datastructures_consensusstatestore.go new file mode 100644 index 0000000..095d98b --- /dev/null +++ b/domain/consensus/model/interface_datastructures_consensusstatestore.go @@ -0,0 +1,23 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ConsensusStateStore represents a store for the current consensus state +type ConsensusStateStore interface { + Store + IsStaged(stagingArea *StagingArea) bool + + StageVirtualUTXODiff(stagingArea *StagingArea, virtualUTXODiff externalapi.UTXODiff) + UTXOByOutpoint(dbContext DBReader, stagingArea *StagingArea, outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, error) + HasUTXOByOutpoint(dbContext DBReader, stagingArea *StagingArea, outpoint *externalapi.DomainOutpoint) (bool, error) + VirtualUTXOSetIterator(dbContext DBReader, stagingArea *StagingArea) (externalapi.ReadOnlyUTXOSetIterator, error) + VirtualUTXOs(dbContext DBReader, fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) + + StageTips(stagingArea *StagingArea, tipHashes []*externalapi.DomainHash) + Tips(stagingArea *StagingArea, dbContext DBReader) ([]*externalapi.DomainHash, error) + + StartImportingPruningPointUTXOSet(dbContext DBWriter) error + HadStartedImportingPruningPointUTXOSet(dbContext DBWriter) (bool, error) + ImportPruningPointUTXOSetIntoVirtualUTXOSet(dbContext DBWriter, pruningPointUTXOSetIterator externalapi.ReadOnlyUTXOSetIterator) error + FinishImportingPruningPointUTXOSet(dbContext DBWriter) error +} diff --git a/domain/consensus/model/interface_datastructures_daablocksstore.go b/domain/consensus/model/interface_datastructures_daablocksstore.go new file mode 100644 index 0000000..fb9f5b8 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_daablocksstore.go @@ -0,0 +1,14 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// DAABlocksStore represents a store of ??? +type DAABlocksStore interface { + Store + StageDAAScore(stagingArea *StagingArea, blockHash *externalapi.DomainHash, daaScore uint64) + StageBlockDAAAddedBlocks(stagingArea *StagingArea, blockHash *externalapi.DomainHash, addedBlocks []*externalapi.DomainHash) + IsStaged(stagingArea *StagingArea) bool + DAAAddedBlocks(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + DAAScore(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint64, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) +} diff --git a/domain/consensus/model/interface_datastructures_finalitystore.go b/domain/consensus/model/interface_datastructures_finalitystore.go new file mode 100644 index 0000000..f7b74da --- /dev/null +++ b/domain/consensus/model/interface_datastructures_finalitystore.go @@ -0,0 +1,13 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// FinalityStore represents a store for finality data +type FinalityStore interface { + Store + IsStaged(stagingArea *StagingArea) bool + StageFinalityPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash, finalityPointHash *externalapi.DomainHash) + FinalityPoint(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_datastructures_ghostdagdatastore.go b/domain/consensus/model/interface_datastructures_ghostdagdatastore.go new file mode 100644 index 0000000..715a62d --- /dev/null +++ b/domain/consensus/model/interface_datastructures_ghostdagdatastore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// GHOSTDAGDataStore represents a store of BlockGHOSTDAGData +type GHOSTDAGDataStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, blockGHOSTDAGData *externalapi.BlockGHOSTDAGData, isTrustedData bool) + IsStaged(stagingArea *StagingArea) bool + Get(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash, isTrustedData bool) (*externalapi.BlockGHOSTDAGData, error) + UnstageAll(stagingArea *StagingArea) +} diff --git a/domain/consensus/model/interface_datastructures_headersselectedchainstore.go b/domain/consensus/model/interface_datastructures_headersselectedchainstore.go new file mode 100644 index 0000000..55133ad --- /dev/null +++ b/domain/consensus/model/interface_datastructures_headersselectedchainstore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// HeadersSelectedChainStore represents a store of the headers selected chain +type HeadersSelectedChainStore interface { + Store + Stage(dbContext DBReader, stagingArea *StagingArea, chainChanges *externalapi.SelectedChainPath) error + IsStaged(stagingArea *StagingArea) bool + GetIndexByHash(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint64, error) + GetHashByIndex(dbContext DBReader, stagingArea *StagingArea, index uint64) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_datastructures_headertipsstore.go b/domain/consensus/model/interface_datastructures_headertipsstore.go new file mode 100644 index 0000000..0ca8503 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_headertipsstore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// HeaderSelectedTipStore represents a store of the headers selected tip +type HeaderSelectedTipStore interface { + Store + Stage(stagingArea *StagingArea, selectedTip *externalapi.DomainHash) + IsStaged(stagingArea *StagingArea) bool + HeadersSelectedTip(dbContext DBReader, stagingArea *StagingArea) (*externalapi.DomainHash, error) + Has(dbContext DBReader, stagingArea *StagingArea) (bool, error) +} diff --git a/domain/consensus/model/interface_datastructures_mergedepthrootstore.go b/domain/consensus/model/interface_datastructures_mergedepthrootstore.go new file mode 100644 index 0000000..a80fcbf --- /dev/null +++ b/domain/consensus/model/interface_datastructures_mergedepthrootstore.go @@ -0,0 +1,13 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MergeDepthRootStore represents a store for merge depth roots +type MergeDepthRootStore interface { + Store + IsStaged(stagingArea *StagingArea) bool + StageMergeDepthRoot(stagingArea *StagingArea, blockHash *externalapi.DomainHash, root *externalapi.DomainHash) + MergeDepthRoot(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_datastructures_multisetstore.go b/domain/consensus/model/interface_datastructures_multisetstore.go new file mode 100644 index 0000000..0dc4b6b --- /dev/null +++ b/domain/consensus/model/interface_datastructures_multisetstore.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// MultisetStore represents a store of Multisets +type MultisetStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, multiset Multiset) + IsStaged(stagingArea *StagingArea) bool + Get(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (Multiset, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) +} diff --git a/domain/consensus/model/interface_datastructures_pruningstore.go b/domain/consensus/model/interface_datastructures_pruningstore.go new file mode 100644 index 0000000..056963c --- /dev/null +++ b/domain/consensus/model/interface_datastructures_pruningstore.go @@ -0,0 +1,34 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// PruningStore represents a store for the current pruning state +type PruningStore interface { + Store + StagePruningPoint(dbContext DBWriter, stagingArea *StagingArea, pruningPointBlockHash *externalapi.DomainHash) error + StagePruningPointByIndex(dbContext DBReader, stagingArea *StagingArea, + pruningPointBlockHash *externalapi.DomainHash, index uint64) error + StagePruningPointCandidate(stagingArea *StagingArea, candidate *externalapi.DomainHash) + IsStaged(stagingArea *StagingArea) bool + PruningPointCandidate(dbContext DBReader, stagingArea *StagingArea) (*externalapi.DomainHash, error) + HasPruningPointCandidate(dbContext DBReader, stagingArea *StagingArea) (bool, error) + PruningPoint(dbContext DBReader, stagingArea *StagingArea) (*externalapi.DomainHash, error) + HasPruningPoint(dbContext DBReader, stagingArea *StagingArea) (bool, error) + CurrentPruningPointIndex(dbContext DBReader, stagingArea *StagingArea) (uint64, error) + PruningPointByIndex(dbContext DBReader, stagingArea *StagingArea, index uint64) (*externalapi.DomainHash, error) + + StageStartUpdatingPruningPointUTXOSet(stagingArea *StagingArea) + HadStartedUpdatingPruningPointUTXOSet(dbContext DBWriter) (bool, error) + FinishUpdatingPruningPointUTXOSet(dbContext DBWriter) error + UpdatePruningPointUTXOSet(dbContext DBWriter, diff externalapi.UTXODiff) error + + ClearImportedPruningPointUTXOs(dbContext DBWriter) error + AppendImportedPruningPointUTXOs(dbTx DBTransaction, outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error + ImportedPruningPointUTXOIterator(dbContext DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) + ClearImportedPruningPointMultiset(dbContext DBWriter) error + ImportedPruningPointMultiset(dbContext DBReader) (Multiset, error) + UpdateImportedPruningPointMultiset(dbTx DBTransaction, multiset Multiset) error + CommitImportedPruningPointUTXOSet(dbContext DBWriter) error + PruningPointUTXOs(dbContext DBReader, fromOutpoint *externalapi.DomainOutpoint, limit int) ([]*externalapi.OutpointAndUTXOEntryPair, error) + PruningPointUTXOIterator(dbContext DBReader) (externalapi.ReadOnlyUTXOSetIterator, error) +} diff --git a/domain/consensus/model/interface_datastructures_reachabilitydatastore.go b/domain/consensus/model/interface_datastructures_reachabilitydatastore.go new file mode 100644 index 0000000..5c499e9 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_reachabilitydatastore.go @@ -0,0 +1,15 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ReachabilityDataStore represents a store of ReachabilityData +type ReachabilityDataStore interface { + Store + StageReachabilityData(stagingArea *StagingArea, blockHash *externalapi.DomainHash, reachabilityData ReachabilityData) + StageReachabilityReindexRoot(stagingArea *StagingArea, reachabilityReindexRoot *externalapi.DomainHash) + IsStaged(stagingArea *StagingArea) bool + ReachabilityData(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (ReachabilityData, error) + HasReachabilityData(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + ReachabilityReindexRoot(dbContext DBReader, stagingArea *StagingArea) (*externalapi.DomainHash, error) + Delete(dbContext DBWriter) error +} diff --git a/domain/consensus/model/interface_datastructures_utxodiffstore.go b/domain/consensus/model/interface_datastructures_utxodiffstore.go new file mode 100644 index 0000000..6f77f41 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_utxodiffstore.go @@ -0,0 +1,14 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// UTXODiffStore represents a store of UTXODiffs +type UTXODiffStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) + IsStaged(stagingArea *StagingArea) bool + UTXODiff(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, error) + UTXODiffChild(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) + HasUTXODiffChild(dbContext DBReader, stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + Delete(stagingArea *StagingArea, blockHash *externalapi.DomainHash) +} diff --git a/domain/consensus/model/interface_datastructures_windowheapstore.go b/domain/consensus/model/interface_datastructures_windowheapstore.go new file mode 100644 index 0000000..2881b41 --- /dev/null +++ b/domain/consensus/model/interface_datastructures_windowheapstore.go @@ -0,0 +1,11 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// WindowHeapSliceStore caches the slices that are needed for the heap implementation of DAGTraversalManager.BlockWindow +type WindowHeapSliceStore interface { + Store + Stage(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int, pairs []*externalapi.BlockGHOSTDAGDataHashPair) + IsStaged(stagingArea *StagingArea) bool + Get(stagingArea *StagingArea, blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, error) +} diff --git a/domain/consensus/model/interface_processes_blockbuilder.go b/domain/consensus/model/interface_processes_blockbuilder.go new file mode 100644 index 0000000..e89be5f --- /dev/null +++ b/domain/consensus/model/interface_processes_blockbuilder.go @@ -0,0 +1,9 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockBuilder is responsible for creating blocks from the current state +type BlockBuilder interface { + BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (block *externalapi.DomainBlock, coinbaseHasRedReward bool, err error) +} diff --git a/domain/consensus/model/interface_processes_blockparentbuilder.go b/domain/consensus/model/interface_processes_blockparentbuilder.go new file mode 100644 index 0000000..1f23668 --- /dev/null +++ b/domain/consensus/model/interface_processes_blockparentbuilder.go @@ -0,0 +1,11 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockParentBuilder exposes a method to build super-block parents for +// a given set of direct parents +type BlockParentBuilder interface { + BuildParents(stagingArea *StagingArea, + daaScore uint64, + directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) +} diff --git a/domain/consensus/model/interface_processes_blockprocessor.go b/domain/consensus/model/interface_processes_blockprocessor.go new file mode 100644 index 0000000..90f49e8 --- /dev/null +++ b/domain/consensus/model/interface_processes_blockprocessor.go @@ -0,0 +1,10 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// BlockProcessor is responsible for processing incoming blocks +type BlockProcessor interface { + ValidateAndInsertBlock(block *externalapi.DomainBlock, shouldValidateAgainstUTXO bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) + ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainHash) error + ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, validateUTXO bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) +} diff --git a/domain/consensus/model/interface_processes_blockvalidator.go b/domain/consensus/model/interface_processes_blockvalidator.go new file mode 100644 index 0000000..92915db --- /dev/null +++ b/domain/consensus/model/interface_processes_blockvalidator.go @@ -0,0 +1,15 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// BlockValidator exposes a set of validation classes, after which +// it's possible to determine whether a block is valid +type BlockValidator interface { + ValidateHeaderInIsolation(stagingArea *StagingArea, blockHash *externalapi.DomainHash) error + ValidateBodyInIsolation(stagingArea *StagingArea, blockHash *externalapi.DomainHash) error + ValidateHeaderInContext(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error + ValidateBodyInContext(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error + ValidatePruningPointViolationAndProofOfWorkAndDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error +} diff --git a/domain/consensus/model/interface_processes_coinbasemanager.go b/domain/consensus/model/interface_processes_coinbasemanager.go new file mode 100644 index 0000000..272fc59 --- /dev/null +++ b/domain/consensus/model/interface_processes_coinbasemanager.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// CoinbaseManager exposes methods for handling blocks' +// coinbase transactions +type CoinbaseManager interface { + ExpectedCoinbaseTransaction(stagingArea *StagingArea, blockHash *externalapi.DomainHash, + coinbaseData *externalapi.DomainCoinbaseData) (expectedTransaction *externalapi.DomainTransaction, hasRedReward bool, err error) + CalcBlockSubsidy(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint64, error) + ExtractCoinbaseDataBlueScoreAndSubsidy(coinbaseTx *externalapi.DomainTransaction) (blueScore uint64, coinbaseData *externalapi.DomainCoinbaseData, subsidy uint64, err error) +} diff --git a/domain/consensus/model/interface_processes_consensusstatemanager.go b/domain/consensus/model/interface_processes_consensusstatemanager.go new file mode 100644 index 0000000..6796404 --- /dev/null +++ b/domain/consensus/model/interface_processes_consensusstatemanager.go @@ -0,0 +1,17 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ConsensusStateManager manages the node's consensus state +type ConsensusStateManager interface { + AddBlock(stagingArea *StagingArea, blockHash *externalapi.DomainHash, updateVirtual bool) (*externalapi.SelectedChainPath, externalapi.UTXODiff, *UTXODiffReversalData, error) + PopulateTransactionWithUTXOEntries(stagingArea *StagingArea, transaction *externalapi.DomainTransaction) error + ImportPruningPointUTXOSet(stagingArea *StagingArea, newPruningPoint *externalapi.DomainHash) error + ImportPruningPoints(stagingArea *StagingArea, pruningPoints []externalapi.BlockHeader) error + RestorePastUTXOSetIterator(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.ReadOnlyUTXOSetIterator, error) + CalculatePastUTXOAndAcceptanceData(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, externalapi.AcceptanceData, Multiset, error) + GetVirtualSelectedParentChainFromBlock(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) + RecoverUTXOIfRequired() error + ReverseUTXODiffs(tipHash *externalapi.DomainHash, reversalData *UTXODiffReversalData) error + ResolveVirtual(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) +} diff --git a/domain/consensus/model/interface_processes_dagtopologymanager.go b/domain/consensus/model/interface_processes_dagtopologymanager.go new file mode 100644 index 0000000..e315eda --- /dev/null +++ b/domain/consensus/model/interface_processes_dagtopologymanager.go @@ -0,0 +1,19 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// DAGTopologyManager exposes methods for querying relationships +// between blocks in the DAG +type DAGTopologyManager interface { + Parents(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + Children(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + IsParentOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + IsChildOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + IsAncestorOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + IsAncestorOfAny(stagingArea *StagingArea, blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error) + IsAnyAncestorOf(stagingArea *StagingArea, potentialAncestors []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (bool, error) + IsInSelectedParentChainOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + ChildInSelectedParentChainOf(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error) + + SetParents(stagingArea *StagingArea, blockHash *externalapi.DomainHash, parentHashes []*externalapi.DomainHash) error +} diff --git a/domain/consensus/model/interface_processes_dagtraversalmanager.go b/domain/consensus/model/interface_processes_dagtraversalmanager.go new file mode 100644 index 0000000..bcb32f5 --- /dev/null +++ b/domain/consensus/model/interface_processes_dagtraversalmanager.go @@ -0,0 +1,21 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// DAGTraversalManager exposes methods for traversing blocks +// in the DAG +type DAGTraversalManager interface { + LowestChainBlockAboveOrEqualToBlueScore(stagingArea *StagingArea, highHash *externalapi.DomainHash, blueScore uint64) (*externalapi.DomainHash, error) + // SelectedChildIterator should return a BlockIterator that iterates + // from lowHash (exclusive) to highHash (inclusive) over highHash's selected parent chain + SelectedChildIterator(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash, includeLowHash bool) (BlockIterator, error) + SelectedChild(stagingArea *StagingArea, highHash, lowHash *externalapi.DomainHash) (*externalapi.DomainHash, error) + AnticoneFromBlocks(stagingArea *StagingArea, tips []*externalapi.DomainHash, blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) ([]*externalapi.DomainHash, error) + AnticoneFromVirtualPOV(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + BlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash, windowSize int) ([]*externalapi.DomainHash, error) + DAABlockWindow(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + NewDownHeap(stagingArea *StagingArea) BlockHeap + NewUpHeap(stagingArea *StagingArea) BlockHeap + CalculateChainPath(stagingArea *StagingArea, fromBlockHash, toBlockHash *externalapi.DomainHash) ( + *externalapi.SelectedChainPath, error) +} diff --git a/domain/consensus/model/interface_processes_difficultymanager.go b/domain/consensus/model/interface_processes_difficultymanager.go new file mode 100644 index 0000000..753bd6f --- /dev/null +++ b/domain/consensus/model/interface_processes_difficultymanager.go @@ -0,0 +1,13 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DifficultyManager provides a method to resolve the +// difficulty value of a block +type DifficultyManager interface { + StageDAADataAndReturnRequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (uint32, error) + RequiredDifficulty(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (uint32, error) + EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) +} diff --git a/domain/consensus/model/interface_processes_finalitymanager.go b/domain/consensus/model/interface_processes_finalitymanager.go new file mode 100644 index 0000000..9987e29 --- /dev/null +++ b/domain/consensus/model/interface_processes_finalitymanager.go @@ -0,0 +1,9 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// FinalityManager provides method to validate that a block does not violate finality +type FinalityManager interface { + VirtualFinalityPoint(stagingArea *StagingArea) (*externalapi.DomainHash, error) + FinalityPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_processes_ghostdagmanager.go b/domain/consensus/model/interface_processes_ghostdagmanager.go new file mode 100644 index 0000000..2b5970c --- /dev/null +++ b/domain/consensus/model/interface_processes_ghostdagmanager.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// GHOSTDAGManager resolves and manages GHOSTDAG block data +type GHOSTDAGManager interface { + GHOSTDAG(stagingArea *StagingArea, blockHash *externalapi.DomainHash) error + ChooseSelectedParent(stagingArea *StagingArea, blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error) + Less(blockHashA *externalapi.DomainHash, ghostdagDataA *externalapi.BlockGHOSTDAGData, + blockHashB *externalapi.DomainHash, ghostdagDataB *externalapi.BlockGHOSTDAGData) bool + GetSortedMergeSet(stagingArea *StagingArea, current *externalapi.DomainHash) ([]*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_processes_headertipsmanager.go b/domain/consensus/model/interface_processes_headertipsmanager.go new file mode 100644 index 0000000..f35ad45 --- /dev/null +++ b/domain/consensus/model/interface_processes_headertipsmanager.go @@ -0,0 +1,8 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// HeadersSelectedTipManager manages the state of the headers selected tip +type HeadersSelectedTipManager interface { + AddHeaderTip(stagingArea *StagingArea, hash *externalapi.DomainHash) error +} diff --git a/domain/consensus/model/interface_processes_mergedepthmanager.go b/domain/consensus/model/interface_processes_mergedepthmanager.go new file mode 100644 index 0000000..b714d44 --- /dev/null +++ b/domain/consensus/model/interface_processes_mergedepthmanager.go @@ -0,0 +1,10 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// MergeDepthManager is used to validate mergeDepth for blocks +type MergeDepthManager interface { + CheckBoundedMergeDepth(stagingArea *StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error + NonBoundedMergeDepthViolatingBlues(stagingArea *StagingArea, blockHash, mergeDepthRoot *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + VirtualMergeDepthRoot(stagingArea *StagingArea) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_processes_parentsmanager.go b/domain/consensus/model/interface_processes_parentsmanager.go new file mode 100644 index 0000000..436e8c1 --- /dev/null +++ b/domain/consensus/model/interface_processes_parentsmanager.go @@ -0,0 +1,9 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ParentsManager lets is a wrapper above header parents that replaces empty parents with genesis when needed. +type ParentsManager interface { + ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents + Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents +} diff --git a/domain/consensus/model/interface_processes_pastmediantimemanager.go b/domain/consensus/model/interface_processes_pastmediantimemanager.go new file mode 100644 index 0000000..d09eef1 --- /dev/null +++ b/domain/consensus/model/interface_processes_pastmediantimemanager.go @@ -0,0 +1,10 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// PastMedianTimeManager provides a method to resolve the +// past median time of a block +type PastMedianTimeManager interface { + PastMedianTime(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (int64, error) + InvalidateVirtualPastMedianTimeCache() +} diff --git a/domain/consensus/model/interface_processes_pruningmanager.go b/domain/consensus/model/interface_processes_pruningmanager.go new file mode 100644 index 0000000..e9511ac --- /dev/null +++ b/domain/consensus/model/interface_processes_pruningmanager.go @@ -0,0 +1,18 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// PruningManager resolves and manages the current pruning point +type PruningManager interface { + UpdatePruningPointByVirtual(stagingArea *StagingArea) error + IsValidPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (bool, error) + ArePruningPointsViolatingFinality(stagingArea *StagingArea, pruningPoints []externalapi.BlockHeader) (bool, error) + ArePruningPointsInValidChain(stagingArea *StagingArea) (bool, error) + ClearImportedPruningPointData() error + AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error + UpdatePruningPointIfRequired() error + PruneAllBlocksBelow(stagingArea *StagingArea, pruningPointHash *externalapi.DomainHash) error + PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) + ExpectedHeaderPruningPoint(stagingArea *StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) + TrustedBlockAssociatedGHOSTDAGDataBlockHashes(stagingArea *StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_processes_pruningproofmanager.go b/domain/consensus/model/interface_processes_pruningproofmanager.go new file mode 100644 index 0000000..250b8c7 --- /dev/null +++ b/domain/consensus/model/interface_processes_pruningproofmanager.go @@ -0,0 +1,10 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// PruningProofManager builds, validates and applies pruning proofs. +type PruningProofManager interface { + BuildPruningPointProof(stagingArea *StagingArea) (*externalapi.PruningPointProof, error) + ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error + ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error +} diff --git a/domain/consensus/model/interface_processes_reachabilitytree.go b/domain/consensus/model/interface_processes_reachabilitytree.go new file mode 100644 index 0000000..327c93e --- /dev/null +++ b/domain/consensus/model/interface_processes_reachabilitytree.go @@ -0,0 +1,14 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ReachabilityManager maintains a structure that allows to answer +// reachability queries in sub-linear time +type ReachabilityManager interface { + Init(stagingArea *StagingArea) error + AddBlock(stagingArea *StagingArea, blockHash *externalapi.DomainHash) error + IsReachabilityTreeAncestorOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + IsDAGAncestorOf(stagingArea *StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) + UpdateReindexRoot(stagingArea *StagingArea, selectedTip *externalapi.DomainHash) error + FindNextAncestor(stagingArea *StagingArea, descendant, ancestor *externalapi.DomainHash) (*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/interface_processes_syncmanager.go b/domain/consensus/model/interface_processes_syncmanager.go new file mode 100644 index 0000000..952fede --- /dev/null +++ b/domain/consensus/model/interface_processes_syncmanager.go @@ -0,0 +1,16 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// SyncManager exposes functions to support sync between spectred nodes +type SyncManager interface { + GetHashesBetween(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, maxBlocks uint64) ( + hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) + GetAnticone(stagingArea *StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) + GetMissingBlockBodyHashes(stagingArea *StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) + CreateBlockLocator(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash, limit uint32) ( + externalapi.BlockLocator, error) + CreateHeadersSelectedChainBlockLocator(stagingArea *StagingArea, lowHash, highHash *externalapi.DomainHash) ( + externalapi.BlockLocator, error) + GetSyncInfo(stagingArea *StagingArea) (*externalapi.SyncInfo, error) +} diff --git a/domain/consensus/model/interface_processes_transactionvalidator.go b/domain/consensus/model/interface_processes_transactionvalidator.go new file mode 100644 index 0000000..7b5824b --- /dev/null +++ b/domain/consensus/model/interface_processes_transactionvalidator.go @@ -0,0 +1,16 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TransactionValidator exposes a set of validation classes, after which +// it's possible to determine whether a transaction is valid +type TransactionValidator interface { + ValidateTransactionInIsolation(transaction *externalapi.DomainTransaction, povDAAScore uint64) error + ValidateTransactionInContextIgnoringUTXO(stagingArea *StagingArea, tx *externalapi.DomainTransaction, + povBlockHash *externalapi.DomainHash, povBlockPastMedianTime int64) error + ValidateTransactionInContextAndPopulateFee(stagingArea *StagingArea, + tx *externalapi.DomainTransaction, povBlockHash *externalapi.DomainHash) error + PopulateMass(transaction *externalapi.DomainTransaction) +} diff --git a/domain/consensus/model/multiset.go b/domain/consensus/model/multiset.go new file mode 100644 index 0000000..77edf8e --- /dev/null +++ b/domain/consensus/model/multiset.go @@ -0,0 +1,12 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// Multiset represents a secp256k1 multiset +type Multiset interface { + Add(data []byte) + Remove(data []byte) + Hash() *externalapi.DomainHash + Serialize() []byte + Clone() Multiset +} diff --git a/domain/consensus/model/reachabilitydata.go b/domain/consensus/model/reachabilitydata.go new file mode 100644 index 0000000..fa852a6 --- /dev/null +++ b/domain/consensus/model/reachabilitydata.go @@ -0,0 +1,118 @@ +package model + +import ( + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// MutableReachabilityData represents a node in the reachability tree +// of some DAG block. It mainly provides the ability to query *tree* +// reachability with O(1) query time. It does so by managing an +// index interval for each node and making sure all nodes in its +// subtree are indexed within the interval, so the query +// B ∈ subtree(A) simply becomes B.interval ⊂ A.interval. +// +// The main challenge of maintaining such intervals is that our tree +// is an ever-growing tree and as such pre-allocated intervals may +// not suffice as per future events. This is where the reindexing +// algorithm below comes into place. +// We use the reasonable assumption that the initial root interval +// (e.g., [0, 2^64-1]) should always suffice for any practical use- +// case, and so reindexing should always succeed unless more than +// 2^64 blocks are added to the DAG/tree. +// +// In addition, we keep a future covering set for every node. +// This set allows to query reachability over the entirety of the DAG. +// See documentation of FutureCoveringTreeNodeSet for additional details. + +// ReachabilityData is a read-only version of a block's MutableReachabilityData +// Use CloneWritable to edit the MutableReachabilityData. +type ReachabilityData interface { + Children() []*externalapi.DomainHash + Parent() *externalapi.DomainHash + Interval() *ReachabilityInterval + FutureCoveringSet() FutureCoveringTreeNodeSet + CloneMutable() MutableReachabilityData + Equal(other ReachabilityData) bool +} + +// MutableReachabilityData represents a block's MutableReachabilityData, with ability to edit it +type MutableReachabilityData interface { + ReachabilityData + + AddChild(child *externalapi.DomainHash) + SetParent(parent *externalapi.DomainHash) + SetInterval(interval *ReachabilityInterval) + SetFutureCoveringSet(futureCoveringSet FutureCoveringTreeNodeSet) +} + +// ReachabilityInterval represents an interval to be used within the +// tree reachability algorithm. See ReachabilityTreeNode for further +// details. +type ReachabilityInterval struct { + Start uint64 + End uint64 +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &ReachabilityInterval{0, 0} + +// Equal returns whether ri equals to other +func (ri *ReachabilityInterval) Equal(other *ReachabilityInterval) bool { + if ri == nil || other == nil { + return ri == other + } + + if ri.Start != other.Start { + return false + } + + if ri.End != other.End { + return false + } + + return true +} + +// Clone returns a clone of ReachabilityInterval +func (ri *ReachabilityInterval) Clone() *ReachabilityInterval { + return &ReachabilityInterval{ + Start: ri.Start, + End: ri.End, + } +} + +func (ri *ReachabilityInterval) String() string { + return fmt.Sprintf("[%d,%d]", ri.Start, ri.End) +} + +// FutureCoveringTreeNodeSet represents a collection of blocks in the future of +// a certain block. Once a block B is added to the DAG, every block A_i in +// B's selected parent anticone must register B in its FutureCoveringTreeNodeSet. This allows +// to relatively quickly (O(log(|FutureCoveringTreeNodeSet|))) query whether B +// is a descendent (is in the "future") of any block that previously +// registered it. +// +// Note that FutureCoveringTreeNodeSet is meant to be queried only if B is not +// a reachability tree descendant of the block in question, as reachability +// tree queries are always O(1). +// +// See insertNode, hasAncestorOf, and isInPast for further details. +type FutureCoveringTreeNodeSet []*externalapi.DomainHash + +// Clone returns a clone of FutureCoveringTreeNodeSet +func (fctns FutureCoveringTreeNodeSet) Clone() FutureCoveringTreeNodeSet { + //return fctns + return externalapi.CloneHashes(fctns) +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ FutureCoveringTreeNodeSet = []*externalapi.DomainHash{} + +// Equal returns whether fctns equals to other +func (fctns FutureCoveringTreeNodeSet) Equal(other FutureCoveringTreeNodeSet) bool { + return externalapi.HashesEqual(fctns, other) +} diff --git a/domain/consensus/model/staging_area.go b/domain/consensus/model/staging_area.go new file mode 100644 index 0000000..b66264b --- /dev/null +++ b/domain/consensus/model/staging_area.go @@ -0,0 +1,64 @@ +package model + +import ( + "github.com/pkg/errors" +) + +// StagingShard is an interface that enables every store to have it's own Commit logic +// See StagingArea for more details +type StagingShard interface { + Commit(dbTx DBTransaction) error +} + +// StagingShardID is used to identify each of the store's staging shards +type StagingShardID uint64 + +// StagingArea is single changeset inside the consensus database, similar to a transaction in a classic database. +// Each StagingArea consists of multiple StagingShards, one for each dataStore that has any changes within it. +// To enable maximum flexibility for all stores, each has to define it's own Commit method, and pass it to the +// StagingArea through the relevant StagingShard. +// +// When the StagingArea is being Committed, it goes over all it's shards, and commits those one-by-one. +// Since Commit happens in a DatabaseTransaction, a StagingArea is atomic. +type StagingArea struct { + shards map[StagingShardID]StagingShard + isCommitted bool +} + +// NewStagingArea creates a new, empty staging area. +func NewStagingArea() *StagingArea { + return &StagingArea{ + shards: make(map[StagingShardID]StagingShard), + isCommitted: false, + } +} + +// GetOrCreateShard attempts to retrieve a shard with the given name. +// If it does not exist - a new shard is created using `createFunc`. +func (sa *StagingArea) GetOrCreateShard(shardID StagingShardID, createFunc func() StagingShard) StagingShard { + shard, ok := sa.shards[shardID] + if !ok { + shard = createFunc() + sa.shards[shardID] = shard + } + return shard +} + +// Commit goes over all the Shards in the StagingArea and commits them, inside the provided database transaction. +// Note: the transaction itself is not committed, this is the callers responsibility to commit it. +func (sa *StagingArea) Commit(dbTx DBTransaction) error { + if sa.isCommitted { + return errors.New("Attempt to call Commit on already committed stagingArea") + } + + for _, shard := range sa.shards { + err := shard.Commit(dbTx) + if err != nil { + return err + } + } + + sa.isCommitted = true + + return nil +} diff --git a/domain/consensus/model/store.go b/domain/consensus/model/store.go new file mode 100644 index 0000000..c3a43ce --- /dev/null +++ b/domain/consensus/model/store.go @@ -0,0 +1,5 @@ +package model + +// Store is a common interface for data stores +type Store interface { +} diff --git a/domain/consensus/model/subdag.go b/domain/consensus/model/subdag.go new file mode 100644 index 0000000..a8685dc --- /dev/null +++ b/domain/consensus/model/subdag.go @@ -0,0 +1,17 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// SubDAG represents a context-free representation of a partial DAG +type SubDAG struct { + RootHashes []*externalapi.DomainHash + TipHashes []*externalapi.DomainHash + Blocks map[externalapi.DomainHash]*SubDAGBlock +} + +// SubDAGBlock represents a block in a SubDAG +type SubDAGBlock struct { + BlockHash *externalapi.DomainHash + ParentHashes []*externalapi.DomainHash + ChildHashes []*externalapi.DomainHash +} diff --git a/domain/consensus/model/testapi/test_block_builder.go b/domain/consensus/model/testapi/test_block_builder.go new file mode 100644 index 0000000..8e1676f --- /dev/null +++ b/domain/consensus/model/testapi/test_block_builder.go @@ -0,0 +1,23 @@ +package testapi + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestBlockBuilder adds to the main BlockBuilder methods required by tests +type TestBlockBuilder interface { + model.BlockBuilder + + // BuildBlockWithParents builds a block with provided parents, coinbaseData and transactions, + // and returns the block together with its past UTXO-diff from the virtual. + BuildBlockWithParents(parentHashes []*externalapi.DomainHash, coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, externalapi.UTXODiff, error) + + BuildUTXOInvalidHeader(parentHashes []*externalapi.DomainHash) (externalapi.BlockHeader, error) + + BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, + error) + + SetNonceCounter(nonceCounter uint64) +} diff --git a/domain/consensus/model/testapi/test_consensus.go b/domain/consensus/model/testapi/test_consensus.go new file mode 100644 index 0000000..9492f13 --- /dev/null +++ b/domain/consensus/model/testapi/test_consensus.go @@ -0,0 +1,95 @@ +package testapi + +import ( + "io" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +// MineJSONBlockType indicates which type of blocks MineJSON mines +type MineJSONBlockType int + +const ( + // MineJSONBlockTypeUTXOValidBlock indicates for MineJSON to mine valid blocks. + MineJSONBlockTypeUTXOValidBlock MineJSONBlockType = iota + + // MineJSONBlockTypeUTXOInvalidBlock indicates for MineJSON to mine UTXO invalid blocks. + MineJSONBlockTypeUTXOInvalidBlock + + // MineJSONBlockTypeUTXOInvalidHeader indicates for MineJSON to mine UTXO invalid headers. + MineJSONBlockTypeUTXOInvalidHeader +) + +// TestConsensus wraps the Consensus interface with some methods that are needed by tests only +type TestConsensus interface { + externalapi.Consensus + + DAGParams() *dagconfig.Params + DatabaseContext() model.DBManager + Database() database.Database + + BuildBlockWithParents(parentHashes []*externalapi.DomainHash, coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainBlock, externalapi.UTXODiff, error) + + BuildHeaderWithParents(parentHashes []*externalapi.DomainHash) (externalapi.BlockHeader, error) + + BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) + + // AddBlock builds a block with given information, solves it, and adds to the DAG. + // Returns the hash of the added block + AddBlock(parentHashes []*externalapi.DomainHash, coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) + + AddBlockOnTips(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) + + AddUTXOInvalidHeader(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) + + AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, + *externalapi.VirtualChangeSet, error) + UpdatePruningPointByVirtual() error + + ResolveVirtualWithMaxParam(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) + + MineJSON(r io.Reader, blockType MineJSONBlockType) (tips []*externalapi.DomainHash, err error) + ToJSON(w io.Writer) error + + RenderDAGToDot(filename string) error + + AcceptanceDataStore() model.AcceptanceDataStore + BlockHeaderStore() model.BlockHeaderStore + BlockRelationStore() model.BlockRelationStore + BlockStatusStore() model.BlockStatusStore + BlockStore() model.BlockStore + ConsensusStateStore() model.ConsensusStateStore + GHOSTDAGDataStore() model.GHOSTDAGDataStore + GHOSTDAGDataStores() []model.GHOSTDAGDataStore + HeaderTipsStore() model.HeaderSelectedTipStore + MultisetStore() model.MultisetStore + PruningStore() model.PruningStore + ReachabilityDataStore() model.ReachabilityDataStore + UTXODiffStore() model.UTXODiffStore + HeadersSelectedChainStore() model.HeadersSelectedChainStore + DAABlocksStore() model.DAABlocksStore + + BlockBuilder() TestBlockBuilder + BlockProcessor() model.BlockProcessor + BlockValidator() model.BlockValidator + CoinbaseManager() model.CoinbaseManager + ConsensusStateManager() TestConsensusStateManager + FinalityManager() model.FinalityManager + DAGTopologyManager() model.DAGTopologyManager + DAGTraversalManager() model.DAGTraversalManager + DifficultyManager() model.DifficultyManager + GHOSTDAGManager() model.GHOSTDAGManager + HeaderTipsManager() model.HeadersSelectedTipManager + MergeDepthManager() model.MergeDepthManager + PastMedianTimeManager() model.PastMedianTimeManager + PruningManager() model.PruningManager + ReachabilityManager() TestReachabilityManager + SyncManager() model.SyncManager + TransactionValidator() TestTransactionValidator +} diff --git a/domain/consensus/model/testapi/test_consensus_state_manager.go b/domain/consensus/model/testapi/test_consensus_state_manager.go new file mode 100644 index 0000000..d98b8ac --- /dev/null +++ b/domain/consensus/model/testapi/test_consensus_state_manager.go @@ -0,0 +1,15 @@ +package testapi + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestConsensusStateManager adds to the main ConsensusStateManager methods required by tests +type TestConsensusStateManager interface { + model.ConsensusStateManager + AddUTXOToMultiset(multiset model.Multiset, entry externalapi.UTXOEntry, + outpoint *externalapi.DomainOutpoint) error + ResolveBlockStatus(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + useSeparateStagingAreaPerBlock bool) (externalapi.BlockStatus, error) +} diff --git a/domain/consensus/model/testapi/test_reachability_manager.go b/domain/consensus/model/testapi/test_reachability_manager.go new file mode 100644 index 0000000..4a27908 --- /dev/null +++ b/domain/consensus/model/testapi/test_reachability_manager.go @@ -0,0 +1,16 @@ +package testapi + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestReachabilityManager adds to the main ReachabilityManager methods required by tests +type TestReachabilityManager interface { + model.ReachabilityManager + SetReachabilityReindexWindow(reindexWindow uint64) + SetReachabilityReindexSlack(reindexSlack uint64) + ReachabilityReindexSlack() uint64 + ValidateIntervals(root *externalapi.DomainHash) error + GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error) +} diff --git a/domain/consensus/model/testapi/test_transaction_validator.go b/domain/consensus/model/testapi/test_transaction_validator.go new file mode 100644 index 0000000..2857665 --- /dev/null +++ b/domain/consensus/model/testapi/test_transaction_validator.go @@ -0,0 +1,13 @@ +package testapi + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +// TestTransactionValidator adds to the main TransactionValidator methods required by tests +type TestTransactionValidator interface { + model.TransactionValidator + SigCache() *txscript.SigCache + SetSigCache(sigCache *txscript.SigCache) +} diff --git a/domain/consensus/model/utxo_diff_reversal_data.go b/domain/consensus/model/utxo_diff_reversal_data.go new file mode 100644 index 0000000..74d7856 --- /dev/null +++ b/domain/consensus/model/utxo_diff_reversal_data.go @@ -0,0 +1,9 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// UTXODiffReversalData is used by ConsensusStateManager to reverse the UTXODiffs during a re-org +type UTXODiffReversalData struct { + SelectedParentHash *externalapi.DomainHash + SelectedParentUTXODiff externalapi.UTXODiff +} diff --git a/domain/consensus/model/virtual.go b/domain/consensus/model/virtual.go new file mode 100644 index 0000000..e40a0ac --- /dev/null +++ b/domain/consensus/model/virtual.go @@ -0,0 +1,19 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// VirtualBlockHash is a marker hash for the virtual block +var VirtualBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}) + +// VirtualGenesisBlockHash is a marker hash for the virtual genesis block +var VirtualGenesisBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, +}) diff --git a/domain/consensus/processes/blockbuilder/block_builder.go b/domain/consensus/processes/blockbuilder/block_builder.go new file mode 100644 index 0000000..78e8b56 --- /dev/null +++ b/domain/consensus/processes/blockbuilder/block_builder.go @@ -0,0 +1,346 @@ +package blockbuilder + +import ( + "math/big" + "sort" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/mstime" +) + +type blockBuilder struct { + databaseContext model.DBManager + genesisHash *externalapi.DomainHash + + difficultyManager model.DifficultyManager + pastMedianTimeManager model.PastMedianTimeManager + coinbaseManager model.CoinbaseManager + consensusStateManager model.ConsensusStateManager + ghostdagManager model.GHOSTDAGManager + transactionValidator model.TransactionValidator + finalityManager model.FinalityManager + pruningManager model.PruningManager + blockParentBuilder model.BlockParentBuilder + + acceptanceDataStore model.AcceptanceDataStore + blockRelationStore model.BlockRelationStore + multisetStore model.MultisetStore + ghostdagDataStore model.GHOSTDAGDataStore + daaBlocksStore model.DAABlocksStore +} + +// New creates a new instance of a BlockBuilder +func New( + databaseContext model.DBManager, + genesisHash *externalapi.DomainHash, + + difficultyManager model.DifficultyManager, + pastMedianTimeManager model.PastMedianTimeManager, + coinbaseManager model.CoinbaseManager, + consensusStateManager model.ConsensusStateManager, + ghostdagManager model.GHOSTDAGManager, + transactionValidator model.TransactionValidator, + finalityManager model.FinalityManager, + blockParentBuilder model.BlockParentBuilder, + pruningManager model.PruningManager, + + acceptanceDataStore model.AcceptanceDataStore, + blockRelationStore model.BlockRelationStore, + multisetStore model.MultisetStore, + ghostdagDataStore model.GHOSTDAGDataStore, + daaBlocksStore model.DAABlocksStore, +) model.BlockBuilder { + + return &blockBuilder{ + databaseContext: databaseContext, + genesisHash: genesisHash, + + difficultyManager: difficultyManager, + pastMedianTimeManager: pastMedianTimeManager, + coinbaseManager: coinbaseManager, + consensusStateManager: consensusStateManager, + ghostdagManager: ghostdagManager, + transactionValidator: transactionValidator, + finalityManager: finalityManager, + blockParentBuilder: blockParentBuilder, + pruningManager: pruningManager, + + acceptanceDataStore: acceptanceDataStore, + blockRelationStore: blockRelationStore, + multisetStore: multisetStore, + ghostdagDataStore: ghostdagDataStore, + daaBlocksStore: daaBlocksStore, + } +} + +// BuildBlock builds a block over the current state, with the given +// coinbaseData and the given transactions +func (bb *blockBuilder) BuildBlock(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (block *externalapi.DomainBlock, coinbaseHasRedReward bool, err error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "BuildBlock") + defer onEnd() + + stagingArea := model.NewStagingArea() + + return bb.buildBlock(stagingArea, coinbaseData, transactions) +} + +func (bb *blockBuilder) buildBlock(stagingArea *model.StagingArea, coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (block *externalapi.DomainBlock, coinbaseHasRedReward bool, err error) { + + err = bb.validateTransactions(stagingArea, transactions) + if err != nil { + return nil, false, err + } + + newBlockPruningPoint, err := bb.newBlockPruningPoint(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, false, err + } + coinbase, coinbaseHasRedReward, err := bb.newBlockCoinbaseTransaction(stagingArea, coinbaseData) + if err != nil { + return nil, false, err + } + transactionsWithCoinbase := append([]*externalapi.DomainTransaction{coinbase}, transactions...) + + header, err := bb.buildHeader(stagingArea, transactionsWithCoinbase, newBlockPruningPoint) + if err != nil { + return nil, false, err + } + + return &externalapi.DomainBlock{ + Header: header, + Transactions: transactionsWithCoinbase, + }, coinbaseHasRedReward, nil +} + +func (bb *blockBuilder) validateTransactions(stagingArea *model.StagingArea, + transactions []*externalapi.DomainTransaction) error { + + invalidTransactions := make([]ruleerrors.InvalidTransaction, 0) + for _, transaction := range transactions { + err := bb.validateTransaction(stagingArea, transaction) + if err != nil { + ruleError := ruleerrors.RuleError{} + if !errors.As(err, &ruleError) { + return err + } + invalidTransactions = append(invalidTransactions, + ruleerrors.InvalidTransaction{Transaction: transaction, Error: &ruleError}) + } + } + + if len(invalidTransactions) > 0 { + return ruleerrors.NewErrInvalidTransactionsInNewBlock(invalidTransactions) + } + + return nil +} + +func (bb *blockBuilder) validateTransaction( + stagingArea *model.StagingArea, transaction *externalapi.DomainTransaction) error { + + originalEntries := make([]externalapi.UTXOEntry, len(transaction.Inputs)) + for i, input := range transaction.Inputs { + originalEntries[i] = input.UTXOEntry + input.UTXOEntry = nil + } + + defer func() { + for i, input := range transaction.Inputs { + input.UTXOEntry = originalEntries[i] + } + }() + + err := bb.consensusStateManager.PopulateTransactionWithUTXOEntries(stagingArea, transaction) + if err != nil { + return err + } + + virtualPastMedianTime, err := bb.pastMedianTimeManager.PastMedianTime(stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + err = bb.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, transaction, model.VirtualBlockHash, virtualPastMedianTime) + if err != nil { + return err + } + + return bb.transactionValidator.ValidateTransactionInContextAndPopulateFee(stagingArea, transaction, model.VirtualBlockHash) +} + +func (bb *blockBuilder) newBlockCoinbaseTransaction(stagingArea *model.StagingArea, + coinbaseData *externalapi.DomainCoinbaseData) (expectedTransaction *externalapi.DomainTransaction, hasRedReward bool, err error) { + + return bb.coinbaseManager.ExpectedCoinbaseTransaction(stagingArea, model.VirtualBlockHash, coinbaseData) +} + +func (bb *blockBuilder) buildHeader(stagingArea *model.StagingArea, transactions []*externalapi.DomainTransaction, + newBlockPruningPoint *externalapi.DomainHash) (externalapi.BlockHeader, error) { + + daaScore, err := bb.newBlockDAAScore(stagingArea) + if err != nil { + return nil, err + } + + parents, err := bb.newBlockParents(stagingArea, daaScore) + if err != nil { + return nil, err + } + + timeInMilliseconds, err := bb.newBlockTime(stagingArea) + if err != nil { + return nil, err + } + bits, err := bb.newBlockDifficulty(stagingArea) + if err != nil { + return nil, err + } + hashMerkleRoot := bb.newBlockHashMerkleRoot(transactions) + acceptedIDMerkleRoot, err := bb.newBlockAcceptedIDMerkleRoot(stagingArea) + if err != nil { + return nil, err + } + utxoCommitment, err := bb.newBlockUTXOCommitment(stagingArea) + if err != nil { + return nil, err + } + blueWork, err := bb.newBlockBlueWork(stagingArea) + if err != nil { + return nil, err + } + blueScore, err := bb.newBlockBlueScore(stagingArea) + if err != nil { + return nil, err + } + + return blockheader.NewImmutableBlockHeader( + constants.BlockVersion, + parents, + hashMerkleRoot, + acceptedIDMerkleRoot, + utxoCommitment, + timeInMilliseconds, + bits, + 0, + daaScore, + blueScore, + blueWork, + newBlockPruningPoint, + ), nil +} + +func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea, daaScore uint64) ([]externalapi.BlockLevelParents, error) { + virtualBlockRelations, err := bb.blockRelationStore.BlockRelation(bb.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + return bb.blockParentBuilder.BuildParents(stagingArea, daaScore, virtualBlockRelations.Parents) +} + +func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) { + // The timestamp for the block must not be before the median timestamp + // of the last several blocks. Thus, choose the maximum between the + // current time and one second after the past median time. The current + // timestamp is truncated to a millisecond boundary before comparison since a + // block timestamp does not supported a precision greater than one + // millisecond. + newTimestamp := mstime.Now().UnixMilliseconds() + minTimestamp, err := bb.minBlockTime(stagingArea, model.VirtualBlockHash) + if err != nil { + return 0, err + } + if newTimestamp < minTimestamp { + newTimestamp = minTimestamp + } + return newTimestamp, nil +} + +func (bb *blockBuilder) minBlockTime(stagingArea *model.StagingArea, hash *externalapi.DomainHash) (int64, error) { + pastMedianTime, err := bb.pastMedianTimeManager.PastMedianTime(stagingArea, hash) + if err != nil { + return 0, err + } + + return pastMedianTime + 1, nil +} + +func (bb *blockBuilder) newBlockDifficulty(stagingArea *model.StagingArea) (uint32, error) { + return bb.difficultyManager.RequiredDifficulty(stagingArea, model.VirtualBlockHash) +} + +func (bb *blockBuilder) newBlockHashMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash { + return merkle.CalculateHashMerkleRoot(transactions) +} + +func (bb *blockBuilder) newBlockAcceptedIDMerkleRoot(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + newBlockAcceptanceData, err := bb.acceptanceDataStore.Get(bb.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + return bb.calculateAcceptedIDMerkleRoot(newBlockAcceptanceData) +} + +func (bb *blockBuilder) calculateAcceptedIDMerkleRoot(acceptanceData externalapi.AcceptanceData) (*externalapi.DomainHash, error) { + var acceptedTransactions []*externalapi.DomainTransaction + for _, blockAcceptanceData := range acceptanceData { + for _, transactionAcceptance := range blockAcceptanceData.TransactionAcceptanceData { + if !transactionAcceptance.IsAccepted { + continue + } + acceptedTransactions = append(acceptedTransactions, transactionAcceptance.Transaction) + } + } + sort.Slice(acceptedTransactions, func(i, j int) bool { + acceptedTransactionIID := consensushashing.TransactionID(acceptedTransactions[i]) + acceptedTransactionJID := consensushashing.TransactionID(acceptedTransactions[j]) + return acceptedTransactionIID.Less(acceptedTransactionJID) + }) + + return merkle.CalculateIDMerkleRoot(acceptedTransactions), nil +} + +func (bb *blockBuilder) newBlockUTXOCommitment(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + newBlockMultiset, err := bb.multisetStore.Get(bb.databaseContext, stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + newBlockUTXOCommitment := newBlockMultiset.Hash() + return newBlockUTXOCommitment, nil +} + +func (bb *blockBuilder) newBlockDAAScore(stagingArea *model.StagingArea) (uint64, error) { + return bb.daaBlocksStore.DAAScore(bb.databaseContext, stagingArea, model.VirtualBlockHash) +} + +func (bb *blockBuilder) newBlockBlueWork(stagingArea *model.StagingArea) (*big.Int, error) { + virtualGHOSTDAGData, err := bb.ghostdagDataStore.Get(bb.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + return virtualGHOSTDAGData.BlueWork(), nil +} + +func (bb *blockBuilder) newBlockBlueScore(stagingArea *model.StagingArea) (uint64, error) { + virtualGHOSTDAGData, err := bb.ghostdagDataStore.Get(bb.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return 0, err + } + return virtualGHOSTDAGData.BlueScore(), nil +} + +func (bb *blockBuilder) newBlockPruningPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + return bb.pruningManager.ExpectedHeaderPruningPoint(stagingArea, blockHash) +} diff --git a/domain/consensus/processes/blockbuilder/block_builder_test.go b/domain/consensus/processes/blockbuilder/block_builder_test.go new file mode 100644 index 0000000..589dc7e --- /dev/null +++ b/domain/consensus/processes/blockbuilder/block_builder_test.go @@ -0,0 +1,130 @@ +package blockbuilder_test + +import ( + "testing" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestBuildBlockErrorCases(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockBuilderErrorCases") + if err != nil { + t.Fatalf("Error initializing consensus for: %+v", err) + } + defer teardown(false) + + type testData struct { + name string + coinbaseData *externalapi.DomainCoinbaseData + transactions []*externalapi.DomainTransaction + testFunc func(test testData, err error) error + } + + tests := []testData{ + { + "scriptPublicKey too long", + &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: make([]byte, consensusConfig.CoinbasePayloadScriptPublicKeyMaxLength+1), + Version: 0, + }, + ExtraData: nil, + }, + nil, + func(_ testData, err error) error { + if !errors.Is(err, ruleerrors.ErrBadCoinbasePayloadLen) { + return errors.Errorf("Unexpected error: %+v", err) + } + return nil + }, + }, + { + "missing UTXO transactions", + &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + ExtraData: nil, + }, + []*externalapi.DomainTransaction{ + { + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, Index: 0}, + }, + }, + Outputs: nil, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: []byte{0}, + }, + { + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, Index: 0}, + }, + }, + Outputs: nil, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: []byte{1}, + }, + }, + + func(test testData, err error) error { + errInvalidTransactionsInNewBlock := ruleerrors.ErrInvalidTransactionsInNewBlock{} + if !errors.As(err, &errInvalidTransactionsInNewBlock) { + return errors.Errorf("Unexpected error: %+v", err) + } + + if len(errInvalidTransactionsInNewBlock.InvalidTransactions) != len(test.transactions) { + return errors.Errorf("Expected %d transaction but got %d", + len(test.transactions), len(errInvalidTransactionsInNewBlock.InvalidTransactions)) + } + + for i, invalidTx := range errInvalidTransactionsInNewBlock.InvalidTransactions { + if !invalidTx.Transaction.Equal(test.transactions[i]) { + return errors.Errorf("Expected transaction %d to be equal to its corresponding "+ + "test transaction", i) + } + + if !errors.As(invalidTx.Error, &ruleerrors.ErrMissingTxOut{}) { + return errors.Errorf("Unexpected error for transaction %d: %+v", i, invalidTx.Error) + } + } + return nil + }, + }, + } + + for _, test := range tests { + _, _, err = testConsensus.BlockBuilder().BuildBlock(test.coinbaseData, test.transactions) + if err == nil { + t.Errorf("%s: No error from BuildBlock", test.name) + return + } + + err := test.testFunc(test, err) + if err != nil { + t.Errorf("%s: %s", test.name, err) + return + } + } + }) +} diff --git a/domain/consensus/processes/blockbuilder/log.go b/domain/consensus/processes/blockbuilder/log.go new file mode 100644 index 0000000..c5cb14d --- /dev/null +++ b/domain/consensus/processes/blockbuilder/log.go @@ -0,0 +1,7 @@ +package blockbuilder + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BDAG") diff --git a/domain/consensus/processes/blockbuilder/test_block_builder.go b/domain/consensus/processes/blockbuilder/test_block_builder.go new file mode 100644 index 0000000..8eca351 --- /dev/null +++ b/domain/consensus/processes/blockbuilder/test_block_builder.go @@ -0,0 +1,290 @@ +package blockbuilder + +import ( + "encoding/binary" + "math/big" + "sort" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +type testBlockBuilder struct { + *blockBuilder + testConsensus testapi.TestConsensus + nonceCounter uint64 +} + +var tempBlockHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) + +// NewTestBlockBuilder creates an instance of a TestBlockBuilder +func NewTestBlockBuilder(baseBlockBuilder model.BlockBuilder, testConsensus testapi.TestConsensus) testapi.TestBlockBuilder { + return &testBlockBuilder{ + blockBuilder: baseBlockBuilder.(*blockBuilder), + testConsensus: testConsensus, + } +} + +func cleanBlockPrefilledFields(block *externalapi.DomainBlock) { + for _, tx := range block.Transactions { + tx.Fee = 0 + tx.Mass = 0 + tx.ID = nil + + for _, input := range tx.Inputs { + input.UTXOEntry = nil + } + } +} + +// BuildBlockWithParents builds a block with provided parents, coinbaseData and transactions, +// and returns the block together with its past UTXO-diff from the virtual. +func (bb *testBlockBuilder) BuildBlockWithParents(parentHashes []*externalapi.DomainHash, + coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) ( + *externalapi.DomainBlock, externalapi.UTXODiff, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "BuildBlockWithParents") + defer onEnd() + + stagingArea := model.NewStagingArea() + + block, diff, err := bb.buildBlockWithParents(stagingArea, parentHashes, coinbaseData, transactions) + if err != nil { + return nil, nil, err + } + + // It's invalid to insert a block with prefilled fields to consensus, so we + // clean them before returning the block. + cleanBlockPrefilledFields(block) + + return block, diff, nil +} + +func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingArea, + parentHashes []*externalapi.DomainHash, bits uint32, daaScore, blueScore uint64, blueWork *big.Int, + transactions []*externalapi.DomainTransaction) (externalapi.BlockHeader, error) { + + timeInMilliseconds, err := bb.minBlockTime(stagingArea, tempBlockHash) + if err != nil { + return nil, err + } + + hashMerkleRoot := bb.newBlockHashMerkleRoot(transactions) + + pruningPoint, err := bb.newBlockPruningPoint(stagingArea, tempBlockHash) + if err != nil { + return nil, err + } + + parents, err := bb.blockParentBuilder.BuildParents(stagingArea, daaScore, parentHashes) + if err != nil { + return nil, err + } + + for _, blockLevelParents := range parents { + sort.Slice(blockLevelParents, func(i, j int) bool { + return blockLevelParents[i].Less(blockLevelParents[j]) + }) + } + + bb.nonceCounter++ + return blockheader.NewImmutableBlockHeader( + constants.BlockVersion, + parents, + hashMerkleRoot, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + timeInMilliseconds, + bits, + bb.nonceCounter, + daaScore, + blueScore, + blueWork, + pruningPoint, + ), nil +} + +func (bb *testBlockBuilder) buildHeaderWithParents(stagingArea *model.StagingArea, + parentHashes []*externalapi.DomainHash, bits uint32, transactions []*externalapi.DomainTransaction, + acceptanceData externalapi.AcceptanceData, multiset model.Multiset, daaScore, blueScore uint64, blueWork *big.Int) (externalapi.BlockHeader, error) { + + header, err := bb.buildUTXOInvalidHeader(stagingArea, parentHashes, bits, daaScore, blueScore, blueWork, transactions) + if err != nil { + return nil, err + } + + hashMerkleRoot := bb.newBlockHashMerkleRoot(transactions) + acceptedIDMerkleRoot, err := bb.calculateAcceptedIDMerkleRoot(acceptanceData) + if err != nil { + return nil, err + } + utxoCommitment := multiset.Hash() + + return blockheader.NewImmutableBlockHeader( + header.Version(), + header.Parents(), + hashMerkleRoot, + acceptedIDMerkleRoot, + utxoCommitment, + header.TimeInMilliseconds(), + header.Bits(), + header.Nonce(), + header.DAAScore(), + header.BlueScore(), + header.BlueWork(), + header.PruningPoint(), + ), nil +} + +func (bb *testBlockBuilder) buildBlockWithParents(stagingArea *model.StagingArea, parentHashes []*externalapi.DomainHash, + coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) ( + *externalapi.DomainBlock, externalapi.UTXODiff, error) { + + if coinbaseData == nil { + scriptPublicKeyScript, err := txscript.PayToScriptHashScript([]byte{txscript.OpTrue}) + if err != nil { + panic(errors.Wrapf(err, "Couldn't parse opTrueScript. This should never happen")) + } + scriptPublicKey := &externalapi.ScriptPublicKey{Script: scriptPublicKeyScript, Version: constants.MaxScriptPublicKeyVersion} + coinbaseData = &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + ExtraData: []byte{}, + } + } + + bb.blockRelationStore.StageBlockRelation(stagingArea, tempBlockHash, &model.BlockRelations{Parents: parentHashes}) + + err := bb.ghostdagManager.GHOSTDAG(stagingArea, tempBlockHash) + if err != nil { + return nil, nil, err + } + + bits, err := bb.difficultyManager.StageDAADataAndReturnRequiredDifficulty(stagingArea, tempBlockHash, false) + if err != nil { + return nil, nil, err + } + daaScore, err := bb.daaBlocksStore.DAAScore(bb.databaseContext, stagingArea, tempBlockHash) + if err != nil { + return nil, nil, err + } + + ghostdagData, err := bb.ghostdagDataStore.Get(bb.databaseContext, stagingArea, tempBlockHash, false) + if err != nil { + return nil, nil, err + } + blueWork := ghostdagData.BlueWork() + blueScore := ghostdagData.BlueScore() + + selectedParentStatus, err := bb.testConsensus.ConsensusStateManager().ResolveBlockStatus( + stagingArea, ghostdagData.SelectedParent(), false) + if err != nil { + return nil, nil, err + } + if selectedParentStatus == externalapi.StatusDisqualifiedFromChain { + return nil, nil, errors.Errorf("Error building block with selectedParent %s with status DisqualifiedFromChain", + ghostdagData.SelectedParent()) + } + + pastUTXO, acceptanceData, multiset, err := + bb.consensusStateManager.CalculatePastUTXOAndAcceptanceData(stagingArea, tempBlockHash) + if err != nil { + return nil, nil, err + } + + bb.acceptanceDataStore.Stage(stagingArea, tempBlockHash, acceptanceData) + + coinbase, _, err := bb.coinbaseManager.ExpectedCoinbaseTransaction(stagingArea, tempBlockHash, coinbaseData) + if err != nil { + return nil, nil, err + } + transactionsWithCoinbase := append([]*externalapi.DomainTransaction{coinbase}, transactions...) + + err = bb.testConsensus.ReachabilityManager().AddBlock(stagingArea, tempBlockHash) + if err != nil { + return nil, nil, err + } + + header, err := bb.buildHeaderWithParents( + stagingArea, parentHashes, bits, transactionsWithCoinbase, acceptanceData, multiset, daaScore, blueScore, blueWork) + if err != nil { + return nil, nil, err + } + + return &externalapi.DomainBlock{ + Header: header, + Transactions: transactionsWithCoinbase, + }, pastUTXO, nil +} + +func (bb *testBlockBuilder) BuildUTXOInvalidHeader(parentHashes []*externalapi.DomainHash) (externalapi.BlockHeader, + error) { + + block, err := bb.BuildUTXOInvalidBlock(parentHashes) + if err != nil { + return nil, err + } + + return block.Header, nil +} + +func (bb *testBlockBuilder) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, + error) { + + stagingArea := model.NewStagingArea() + + bb.blockRelationStore.StageBlockRelation(stagingArea, tempBlockHash, &model.BlockRelations{Parents: parentHashes}) + + err := bb.ghostdagManager.GHOSTDAG(stagingArea, tempBlockHash) + if err != nil { + return nil, err + } + + bits, err := bb.difficultyManager.StageDAADataAndReturnRequiredDifficulty(stagingArea, tempBlockHash, false) + if err != nil { + return nil, err + } + daaScore, err := bb.daaBlocksStore.DAAScore(bb.databaseContext, stagingArea, tempBlockHash) + if err != nil { + return nil, err + } + + ghostdagData, err := bb.ghostdagDataStore.Get(bb.databaseContext, stagingArea, tempBlockHash, false) + if err != nil { + return nil, err + } + blueWork := ghostdagData.BlueWork() + blueScore := ghostdagData.BlueScore() + + // We use the genesis coinbase so that we'll have something to build merkle root and a new coinbase with + genesisTransactions := bb.testConsensus.DAGParams().GenesisBlock.Transactions + genesisCoinbase := genesisTransactions[transactionhelper.CoinbaseTransactionIndex].Clone() + binary.LittleEndian.PutUint64(genesisCoinbase.Payload[:8], ghostdagData.BlueScore()) + transactions := []*externalapi.DomainTransaction{genesisCoinbase} + + err = bb.testConsensus.ReachabilityManager().AddBlock(stagingArea, tempBlockHash) + if err != nil { + return nil, err + } + + header, err := bb.buildUTXOInvalidHeader(stagingArea, parentHashes, bits, daaScore, blueScore, blueWork, transactions) + if err != nil { + return nil, err + } + + return &externalapi.DomainBlock{ + Header: header, + Transactions: transactions, + }, nil +} + +func (bb *testBlockBuilder) SetNonceCounter(nonceCounter uint64) { + bb.nonceCounter = nonceCounter +} diff --git a/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go new file mode 100644 index 0000000..f87d13e --- /dev/null +++ b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go @@ -0,0 +1,244 @@ +package blockparentbuilder + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" +) + +type blockParentBuilder struct { + databaseContext model.DBManager + blockHeaderStore model.BlockHeaderStore + dagTopologyManager model.DAGTopologyManager + parentsManager model.ParentsManager + reachabilityDataStore model.ReachabilityDataStore + pruningStore model.PruningStore + + genesisHash *externalapi.DomainHash + maxBlockLevel int +} + +// New creates a new instance of a BlockParentBuilder +func New( + databaseContext model.DBManager, + blockHeaderStore model.BlockHeaderStore, + dagTopologyManager model.DAGTopologyManager, + parentsManager model.ParentsManager, + + reachabilityDataStore model.ReachabilityDataStore, + pruningStore model.PruningStore, + + genesisHash *externalapi.DomainHash, + maxBlockLevel int, +) model.BlockParentBuilder { + return &blockParentBuilder{ + databaseContext: databaseContext, + blockHeaderStore: blockHeaderStore, + dagTopologyManager: dagTopologyManager, + parentsManager: parentsManager, + + reachabilityDataStore: reachabilityDataStore, + pruningStore: pruningStore, + genesisHash: genesisHash, + maxBlockLevel: maxBlockLevel, + } +} + +func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, + daaScore uint64, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) { + + // Late on we'll mutate direct parent hashes, so we first clone it. + directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes)) + copy(directParentHashesCopy, directParentHashes) + + pruningPoint, err := bpb.pruningStore.PruningPoint(bpb.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + // The first candidates to be added should be from a parent in the future of the pruning + // point, so later on we'll know that every block that doesn't have reachability data + // (i.e. pruned) is necessarily in the past of the current candidates and cannot be + // considered as a valid candidate. + // This is why we sort the direct parent headers in a way that the first one will be + // in the future of the pruning point. + directParentHeaders := make([]externalapi.BlockHeader, len(directParentHashesCopy)) + firstParentInFutureOfPruningPointIndex := 0 + foundFirstParentInFutureOfPruningPoint := false + for i, directParentHash := range directParentHashesCopy { + isInFutureOfPruningPoint, err := bpb.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, directParentHash) + if err != nil { + return nil, err + } + + if !isInFutureOfPruningPoint { + continue + } + + firstParentInFutureOfPruningPointIndex = i + foundFirstParentInFutureOfPruningPoint = true + break + } + + if !foundFirstParentInFutureOfPruningPoint { + return nil, errors.New("BuildParents should get at least one parent in the future of the pruning point") + } + + oldFirstDirectParent := directParentHashesCopy[0] + directParentHashesCopy[0] = directParentHashesCopy[firstParentInFutureOfPruningPointIndex] + directParentHashesCopy[firstParentInFutureOfPruningPointIndex] = oldFirstDirectParent + + for i, directParentHash := range directParentHashesCopy { + directParentHeader, err := bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, directParentHash) + if err != nil { + return nil, err + } + directParentHeaders[i] = directParentHeader + } + + type blockToReferences map[externalapi.DomainHash][]*externalapi.DomainHash + candidatesByLevelToReferenceBlocksMap := make(map[int]blockToReferences) + + // Direct parents are guaranteed to be in one other's anticones so add them all to + // all the block levels they occupy + for _, directParentHeader := range directParentHeaders { + directParentHash := consensushashing.HeaderHash(directParentHeader) + blockLevel := directParentHeader.BlockLevel(bpb.maxBlockLevel) + for i := 0; i <= blockLevel; i++ { + if _, exists := candidatesByLevelToReferenceBlocksMap[i]; !exists { + candidatesByLevelToReferenceBlocksMap[i] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) + } + candidatesByLevelToReferenceBlocksMap[i][*directParentHash] = []*externalapi.DomainHash{directParentHash} + } + } + + virtualGenesisChildren, err := bpb.dagTopologyManager.Children(stagingArea, model.VirtualGenesisBlockHash) + if err != nil { + return nil, err + } + + virtualGenesisChildrenHeaders := make(map[externalapi.DomainHash]externalapi.BlockHeader, len(virtualGenesisChildren)) + for _, child := range virtualGenesisChildren { + virtualGenesisChildrenHeaders[*child], err = bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, child) + if err != nil { + return nil, err + } + } + + for _, directParentHeader := range directParentHeaders { + for blockLevel, blockLevelParentsInHeader := range bpb.parentsManager.Parents(directParentHeader) { + isEmptyLevel := false + if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists { + candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) + isEmptyLevel = true + } + + for _, parent := range blockLevelParentsInHeader { + isInFutureOfVirtualGenesisChildren := false + hasReachabilityData, err := bpb.reachabilityDataStore.HasReachabilityData(bpb.databaseContext, stagingArea, parent) + if err != nil { + return nil, err + } + if hasReachabilityData { + // If a block is in the future of one of the virtual genesis children it means we have the full DAG between the current block + // and this parent, so there's no need for any indirect reference blocks, and normal reachability queries can be used. + isInFutureOfVirtualGenesisChildren, err = bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, virtualGenesisChildren, parent) + if err != nil { + return nil, err + } + } + + // Reference blocks are the blocks that are used in reachability queries to check if + // a candidate is in the future of another candidate. In most cases this is just the + // block itself, but in the case where a block doesn't have reachability data we need + // to use some blocks in its future as reference instead. + // If we make sure to add a parent in the future of the pruning point first, we can + // know that any pruned candidate that is in the past of some blocks in the pruning + // point anticone should have should be a parent (in the relevant level) of one of + // the virtual genesis children in the pruning point anticone. So we can check which + // virtual genesis children have this block as parent and use those block as + // reference blocks. + var referenceBlocks []*externalapi.DomainHash + if isInFutureOfVirtualGenesisChildren { + referenceBlocks = []*externalapi.DomainHash{parent} + } else { + for childHash, childHeader := range virtualGenesisChildrenHeaders { + childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse + if bpb.parentsManager.ParentsAtLevel(childHeader, blockLevel).Contains(parent) { + referenceBlocks = append(referenceBlocks, &childHash) + } + } + } + + if isEmptyLevel { + candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks + continue + } + + if !isInFutureOfVirtualGenesisChildren { + continue + } + + toRemove := hashset.New() + isAncestorOfAnyCandidate := false + for candidate, candidateReferences := range candidatesByLevelToReferenceBlocksMap[blockLevel] { + candidate := candidate // Assign to a new pointer to avoid `range` pointer reuse + isInFutureOfCurrentCandidate, err := bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidateReferences, parent) + if err != nil { + return nil, err + } + + if isInFutureOfCurrentCandidate { + toRemove.Add(&candidate) + continue + } + + if isAncestorOfAnyCandidate { + continue + } + + isAncestorOfCurrentCandidate, err := bpb.dagTopologyManager.IsAncestorOfAny(stagingArea, parent, candidateReferences) + if err != nil { + return nil, err + } + + if isAncestorOfCurrentCandidate { + isAncestorOfAnyCandidate = true + } + } + + if toRemove.Length() > 0 { + for hash := range toRemove { + delete(candidatesByLevelToReferenceBlocksMap[blockLevel], hash) + } + } + + // We should add the block as a candidate if it's in the future of another candidate + // or in the anticone of all candidates. + if !isAncestorOfAnyCandidate || toRemove.Length() > 0 { + candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks + } + } + } + } + + parents := make([]externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap)) + for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ { + if blockLevel > 0 { + if _, ok := candidatesByLevelToReferenceBlocksMap[blockLevel][*bpb.genesisHash]; ok && len(candidatesByLevelToReferenceBlocksMap[blockLevel]) == 1 { + break + } + } + + levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel])) + for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] { + block := block // Assign to a new pointer to avoid `range` pointer reuse + levelBlocks = append(levelBlocks, &block) + } + + parents = append(parents, levelBlocks) + } + return parents, nil +} diff --git a/domain/consensus/processes/blockprocessor/blocklogger/blocklogger.go b/domain/consensus/processes/blockprocessor/blocklogger/blocklogger.go new file mode 100644 index 0000000..5a55113 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/blocklogger/blocklogger.go @@ -0,0 +1,77 @@ +// Copyright (c) 2015-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blocklogger + +import ( + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/mstime" +) + +// BlockLogger is a type tracking the amount of blocks/headers/transactions to log the time it took to receive them +type BlockLogger struct { + receivedLogBlocks int64 + receivedLogHeaders int64 + receivedLogTransactions int64 + lastBlockLogTime time.Time +} + +// NewBlockLogger creates a new instance with zeroed blocks/headers/transactions/time counters. +func NewBlockLogger() *BlockLogger { + return &BlockLogger{ + receivedLogBlocks: 0, + receivedLogHeaders: 0, + receivedLogTransactions: 0, + lastBlockLogTime: time.Now(), + } +} + +// LogBlock logs a new block blue score as an information message +// to show progress to the user. In order to prevent spam, it limits logging to +// one message every 10 seconds with duration and totals included. +func (bl *BlockLogger) LogBlock(block *externalapi.DomainBlock) { + if len(block.Transactions) == 0 { + bl.receivedLogHeaders++ + } else { + bl.receivedLogBlocks++ + } + + bl.receivedLogTransactions += int64(len(block.Transactions)) + + now := time.Now() + duration := now.Sub(bl.lastBlockLogTime) + if duration < time.Second*10 { + return + } + + // Truncate the duration to 10s of milliseconds. + truncatedDuration := duration.Round(10 * time.Millisecond) + + // Log information about new block blue score. + blockStr := "blocks" + if bl.receivedLogBlocks == 1 { + blockStr = "block" + } + + txStr := "transactions" + if bl.receivedLogTransactions == 1 { + txStr = "transaction" + } + + headerStr := "headers" + if bl.receivedLogBlocks == 1 { + headerStr = "header" + } + + log.Infof("Processed %d %s and %d %s in the last %s (%d %s, %s)", + bl.receivedLogBlocks, blockStr, bl.receivedLogHeaders, headerStr, truncatedDuration, bl.receivedLogTransactions, + txStr, mstime.UnixMilliseconds(block.Header.TimeInMilliseconds())) + + bl.receivedLogBlocks = 0 + bl.receivedLogHeaders = 0 + bl.receivedLogTransactions = 0 + bl.lastBlockLogTime = now +} diff --git a/domain/consensus/processes/blockprocessor/blocklogger/log.go b/domain/consensus/processes/blockprocessor/blocklogger/log.go new file mode 100644 index 0000000..fbda46b --- /dev/null +++ b/domain/consensus/processes/blockprocessor/blocklogger/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blocklogger + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BDAG") diff --git a/domain/consensus/processes/blockprocessor/blockprocessor.go b/domain/consensus/processes/blockprocessor/blockprocessor.go new file mode 100644 index 0000000..cf2f7ef --- /dev/null +++ b/domain/consensus/processes/blockprocessor/blockprocessor.go @@ -0,0 +1,172 @@ +package blockprocessor + +import ( + "time" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/processes/blockprocessor/blocklogger" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// blockProcessor is responsible for processing incoming blocks +// and creating blocks from the current state +type blockProcessor struct { + genesisHash *externalapi.DomainHash + targetTimePerBlock time.Duration + maxBlockLevel int + databaseContext model.DBManager + blockLogger *blocklogger.BlockLogger + + consensusStateManager model.ConsensusStateManager + pruningManager model.PruningManager + blockValidator model.BlockValidator + dagTopologyManager model.DAGTopologyManager + reachabilityManager model.ReachabilityManager + difficultyManager model.DifficultyManager + pastMedianTimeManager model.PastMedianTimeManager + coinbaseManager model.CoinbaseManager + headerTipsManager model.HeadersSelectedTipManager + syncManager model.SyncManager + finalityManager model.FinalityManager + + acceptanceDataStore model.AcceptanceDataStore + blockStore model.BlockStore + blockStatusStore model.BlockStatusStore + blockRelationStore model.BlockRelationStore + multisetStore model.MultisetStore + ghostdagDataStore model.GHOSTDAGDataStore + consensusStateStore model.ConsensusStateStore + pruningStore model.PruningStore + reachabilityDataStore model.ReachabilityDataStore + utxoDiffStore model.UTXODiffStore + blockHeaderStore model.BlockHeaderStore + headersSelectedTipStore model.HeaderSelectedTipStore + finalityStore model.FinalityStore + headersSelectedChainStore model.HeadersSelectedChainStore + daaBlocksStore model.DAABlocksStore + blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore + + stores []model.Store +} + +// New instantiates a new BlockProcessor +func New( + genesisHash *externalapi.DomainHash, + targetTimePerBlock time.Duration, + maxBlockLevel int, + databaseContext model.DBManager, + + consensusStateManager model.ConsensusStateManager, + pruningManager model.PruningManager, + blockValidator model.BlockValidator, + dagTopologyManager model.DAGTopologyManager, + reachabilityManager model.ReachabilityManager, + difficultyManager model.DifficultyManager, + pastMedianTimeManager model.PastMedianTimeManager, + coinbaseManager model.CoinbaseManager, + headerTipsManager model.HeadersSelectedTipManager, + syncManager model.SyncManager, + + acceptanceDataStore model.AcceptanceDataStore, + blockStore model.BlockStore, + blockStatusStore model.BlockStatusStore, + blockRelationStore model.BlockRelationStore, + multisetStore model.MultisetStore, + ghostdagDataStore model.GHOSTDAGDataStore, + consensusStateStore model.ConsensusStateStore, + pruningStore model.PruningStore, + reachabilityDataStore model.ReachabilityDataStore, + utxoDiffStore model.UTXODiffStore, + blockHeaderStore model.BlockHeaderStore, + headersSelectedTipStore model.HeaderSelectedTipStore, + finalityStore model.FinalityStore, + headersSelectedChainStore model.HeadersSelectedChainStore, + daaBlocksStore model.DAABlocksStore, + blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore, +) model.BlockProcessor { + + return &blockProcessor{ + genesisHash: genesisHash, + targetTimePerBlock: targetTimePerBlock, + maxBlockLevel: maxBlockLevel, + databaseContext: databaseContext, + blockLogger: blocklogger.NewBlockLogger(), + pruningManager: pruningManager, + blockValidator: blockValidator, + dagTopologyManager: dagTopologyManager, + reachabilityManager: reachabilityManager, + difficultyManager: difficultyManager, + pastMedianTimeManager: pastMedianTimeManager, + coinbaseManager: coinbaseManager, + headerTipsManager: headerTipsManager, + syncManager: syncManager, + + consensusStateManager: consensusStateManager, + acceptanceDataStore: acceptanceDataStore, + blockStore: blockStore, + blockStatusStore: blockStatusStore, + blockRelationStore: blockRelationStore, + multisetStore: multisetStore, + ghostdagDataStore: ghostdagDataStore, + consensusStateStore: consensusStateStore, + pruningStore: pruningStore, + reachabilityDataStore: reachabilityDataStore, + utxoDiffStore: utxoDiffStore, + blockHeaderStore: blockHeaderStore, + headersSelectedTipStore: headersSelectedTipStore, + finalityStore: finalityStore, + headersSelectedChainStore: headersSelectedChainStore, + daaBlocksStore: daaBlocksStore, + blocksWithTrustedDataDAAWindowStore: blocksWithTrustedDataDAAWindowStore, + + stores: []model.Store{ + consensusStateStore, + acceptanceDataStore, + blockStore, + blockStatusStore, + blockRelationStore, + multisetStore, + ghostdagDataStore, + consensusStateStore, + pruningStore, + reachabilityDataStore, + utxoDiffStore, + blockHeaderStore, + headersSelectedTipStore, + finalityStore, + headersSelectedChainStore, + daaBlocksStore, + blocksWithTrustedDataDAAWindowStore, + }, + } +} + +// ValidateAndInsertBlock validates the given block and, if valid, applies it +// to the current state +func (bp *blockProcessor) ValidateAndInsertBlock(block *externalapi.DomainBlock, + shouldValidateAgainstUTXO bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateAndInsertBlock") + defer onEnd() + + stagingArea := model.NewStagingArea() + return bp.validateAndInsertBlock(stagingArea, block, false, shouldValidateAgainstUTXO, false) +} + +func (bp *blockProcessor) ValidateAndInsertImportedPruningPoint(newPruningPoint *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateAndInsertImportedPruningPoint") + defer onEnd() + + stagingArea := model.NewStagingArea() + return bp.validateAndInsertImportedPruningPoint(stagingArea, newPruningPoint) +} + +func (bp *blockProcessor) ValidateAndInsertBlockWithTrustedData(block *externalapi.BlockWithTrustedData, + shouldValidateAgainstUTXO bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateAndInsertBlockWithTrustedData") + defer onEnd() + + stagingArea := model.NewStagingArea() + + return bp.validateAndInsertBlockWithTrustedData(stagingArea, block, shouldValidateAgainstUTXO) +} diff --git a/domain/consensus/processes/blockprocessor/log.go b/domain/consensus/processes/blockprocessor/log.go new file mode 100644 index 0000000..413d2c6 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/log.go @@ -0,0 +1,7 @@ +package blockprocessor + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BDAG") diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_block.go b/domain/consensus/processes/blockprocessor/validate_and_insert_block.go new file mode 100644 index 0000000..2b31781 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_block.go @@ -0,0 +1,377 @@ +package blockprocessor + +import ( + // we need to embed the utxoset of mainnet genesis here + _ "embed" + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/multiset" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/difficulty" + "github.com/spectre-project/spectred/util/staging" +) + +func (bp *blockProcessor) setBlockStatusAfterBlockValidation( + stagingArea *model.StagingArea, block *externalapi.DomainBlock, isPruningPoint bool) (externalapi.BlockStatus, error) { + + blockHash := consensushashing.BlockHash(block) + + exists, err := bp.blockStatusStore.Exists(bp.databaseContext, stagingArea, blockHash) + if err != nil { + return externalapi.StatusInvalid, err + } + if exists { + status, err := bp.blockStatusStore.Get(bp.databaseContext, stagingArea, blockHash) + if err != nil { + return externalapi.StatusInvalid, err + } + + if status == externalapi.StatusUTXOValid { + // A block cannot have status StatusUTXOValid just after finishing bp.validateBlock, because + // if it's the case it should have been rejected as duplicate block. + // The only exception is the pruning point because its status is manually set before inserting + // the block. + if !isPruningPoint { + return externalapi.StatusInvalid, errors.Errorf("block %s that is not the pruning point is not expected to be valid "+ + "before adding to to the consensus state manager", blockHash) + } + log.Debugf("Block %s is the pruning point and has status %s, so leaving its status untouched", + blockHash, status) + return status, nil + } + } + + isHeaderOnlyBlock := isHeaderOnlyBlock(block) + if isHeaderOnlyBlock { + log.Debugf("Block %s is a header-only block so setting its status as %s", + blockHash, externalapi.StatusHeaderOnly) + bp.blockStatusStore.Stage(stagingArea, blockHash, externalapi.StatusHeaderOnly) + return externalapi.StatusHeaderOnly, nil + } + + log.Debugf("Block %s has body so setting its status as %s", + blockHash, externalapi.StatusUTXOPendingVerification) + bp.blockStatusStore.Stage(stagingArea, blockHash, externalapi.StatusUTXOPendingVerification) + return externalapi.StatusUTXOPendingVerification, nil +} + +func (bp *blockProcessor) updateVirtualAcceptanceDataAfterImportingPruningPoint(stagingArea *model.StagingArea) error { + _, virtualAcceptanceData, virtualMultiset, err := + bp.consensusStateManager.CalculatePastUTXOAndAcceptanceData(stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + log.Debugf("Staging virtual acceptance data after importing the pruning point") + bp.acceptanceDataStore.Stage(stagingArea, model.VirtualBlockHash, virtualAcceptanceData) + + log.Debugf("Staging virtual multiset after importing the pruning point") + bp.multisetStore.Stage(stagingArea, model.VirtualBlockHash, virtualMultiset) + return nil +} + +func (bp *blockProcessor) validateAndInsertBlock(stagingArea *model.StagingArea, block *externalapi.DomainBlock, + isPruningPoint bool, shouldValidateAgainstUTXO bool, isBlockWithTrustedData bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) { + + blockHash := consensushashing.HeaderHash(block.Header) + err := bp.validateBlock(stagingArea, block, isBlockWithTrustedData) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + status, err := bp.setBlockStatusAfterBlockValidation(stagingArea, block, isPruningPoint) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + var oldHeadersSelectedTip *externalapi.DomainHash + hasHeaderSelectedTip, err := bp.headersSelectedTipStore.Has(bp.databaseContext, stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + if hasHeaderSelectedTip { + var err error + oldHeadersSelectedTip, err = bp.headersSelectedTipStore.HeadersSelectedTip(bp.databaseContext, stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + shouldAddHeaderSelectedTip := false + if !hasHeaderSelectedTip { + shouldAddHeaderSelectedTip = true + } else { + pruningPoint, err := bp.pruningStore.PruningPoint(bp.databaseContext, stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + isInSelectedChainOfPruningPoint, err := bp.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, blockHash) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + // Don't set blocks in the anticone of the pruning point as header selected tip. + shouldAddHeaderSelectedTip = isInSelectedChainOfPruningPoint + } + + if shouldAddHeaderSelectedTip { + // Don't set blocks in the anticone of the pruning point as header selected tip. + err = bp.headerTipsManager.AddHeaderTip(stagingArea, blockHash) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + bp.loadUTXODataForGenesis(stagingArea, block) + var selectedParentChainChanges *externalapi.SelectedChainPath + var virtualUTXODiff externalapi.UTXODiff + var reversalData *model.UTXODiffReversalData + isHeaderOnlyBlock := isHeaderOnlyBlock(block) + if !isHeaderOnlyBlock { + // Attempt to add the block to the virtual + selectedParentChainChanges, virtualUTXODiff, reversalData, err = bp.consensusStateManager.AddBlock(stagingArea, blockHash, shouldValidateAgainstUTXO) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + if hasHeaderSelectedTip { + err := bp.updateReachabilityReindexRoot(stagingArea, oldHeadersSelectedTip) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + if !isHeaderOnlyBlock && shouldValidateAgainstUTXO { + // Trigger pruning, which will check if the pruning point changed and delete the data if it did. + err = bp.pruningManager.UpdatePruningPointByVirtual(stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + err = staging.CommitAllChanges(bp.databaseContext, stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + if reversalData != nil { + err = bp.consensusStateManager.ReverseUTXODiffs(blockHash, reversalData) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + } + + err = bp.pruningManager.UpdatePruningPointIfRequired() + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + log.Debug(logger.NewLogClosure(func() string { + hashrate := difficulty.GetHashrateString(difficulty.CompactToBig(block.Header.Bits()), bp.targetTimePerBlock) + return fmt.Sprintf("Block %s validated and inserted, network hashrate: %s", blockHash, hashrate) + })) + + var logClosureErr error + log.Debug(logger.NewLogClosure(func() string { + virtualGhostDAGData, err := bp.ghostdagDataStore.Get(bp.databaseContext, stagingArea, model.VirtualBlockHash, false) + if database.IsNotFoundError(err) { + return fmt.Sprintf("Cannot log data for non-existent virtual") + } + + if err != nil { + logClosureErr = err + return fmt.Sprintf("Failed to get virtual GHOSTDAG data: %s", err) + } + headerCount := bp.blockHeaderStore.Count(stagingArea) + blockCount := bp.blockStore.Count(stagingArea) + return fmt.Sprintf("New virtual's blue score: %d. Block count: %d. Header count: %d", + virtualGhostDAGData.BlueScore(), blockCount, headerCount) + })) + if logClosureErr != nil { + return nil, externalapi.StatusInvalid, logClosureErr + } + + virtualParents, err := bp.dagTopologyManager.Parents(stagingArea, model.VirtualBlockHash) + if database.IsNotFoundError(err) { + virtualParents = nil + } else if err != nil { + return nil, externalapi.StatusInvalid, err + } + + bp.pastMedianTimeManager.InvalidateVirtualPastMedianTimeCache() + + bp.blockLogger.LogBlock(block) + + return &externalapi.VirtualChangeSet{ + VirtualSelectedParentChainChanges: selectedParentChainChanges, + VirtualUTXODiff: virtualUTXODiff, + VirtualParents: virtualParents, + }, status, nil +} + +func (bp *blockProcessor) loadUTXODataForGenesis(stagingArea *model.StagingArea, block *externalapi.DomainBlock) { + isGenesis := len(block.Header.DirectParents()) == 0 + if !isGenesis { + return + } + blockHash := consensushashing.BlockHash(block) + // Note: The applied UTXO set and multiset do not satisfy the UTXO commitment + // of Mainnet's genesis. This is why any block that will be built on top of genesis + // will have a wrong UTXO commitment as well, and will not be able to get to a consensus + // with the rest of the network. + // This is why getting direct blocks on top of genesis is forbidden, and the only way to + // get a newer state for a node with genesis only is by requesting a proof for a recent + // pruning point. + // The actual UTXO set that fits Mainnet's genesis' UTXO commitment was removed from the codebase in order + // to make reduce the consensus initialization time and the compiled binary size, but can be still + // found here for anyone to verify: https://github.com/spectre-project/spectred/blob/dbf18d8052f000ba0079be9e79b2d6f5a98b74ca/domain/consensus/processes/blockprocessor/resources/utxos.gz + bp.consensusStateStore.StageVirtualUTXODiff(stagingArea, utxo.NewUTXODiff()) + bp.utxoDiffStore.Stage(stagingArea, blockHash, utxo.NewUTXODiff(), nil) + bp.multisetStore.Stage(stagingArea, blockHash, multiset.New()) +} + +func isHeaderOnlyBlock(block *externalapi.DomainBlock) bool { + return len(block.Transactions) == 0 +} + +func (bp *blockProcessor) updateReachabilityReindexRoot(stagingArea *model.StagingArea, + oldHeadersSelectedTip *externalapi.DomainHash) error { + + headersSelectedTip, err := bp.headersSelectedTipStore.HeadersSelectedTip(bp.databaseContext, stagingArea) + if err != nil { + return err + } + + if headersSelectedTip.Equal(oldHeadersSelectedTip) { + return nil + } + + return bp.reachabilityManager.UpdateReindexRoot(stagingArea, headersSelectedTip) +} + +func (bp *blockProcessor) checkBlockStatus(stagingArea *model.StagingArea, block *externalapi.DomainBlock) error { + hash := consensushashing.BlockHash(block) + isHeaderOnlyBlock := isHeaderOnlyBlock(block) + exists, err := bp.blockStatusStore.Exists(bp.databaseContext, stagingArea, hash) + if err != nil { + return err + } + if !exists { + return nil + } + + status, err := bp.blockStatusStore.Get(bp.databaseContext, stagingArea, hash) + if err != nil { + return err + } + + if status == externalapi.StatusInvalid { + return errors.Wrapf(ruleerrors.ErrKnownInvalid, "block %s is a known invalid block", hash) + } + + if !isHeaderOnlyBlock { + hasBlock, err := bp.blockStore.HasBlock(bp.databaseContext, stagingArea, hash) + if err != nil { + return err + } + if hasBlock { + return errors.Wrapf(ruleerrors.ErrDuplicateBlock, "block %s already exists", hash) + } + } else { + hasHeader, err := bp.blockHeaderStore.HasBlockHeader(bp.databaseContext, stagingArea, hash) + if err != nil { + return err + } + if hasHeader { + return errors.Wrapf(ruleerrors.ErrDuplicateBlock, "block %s header already exists", hash) + } + } + + return nil +} + +func (bp *blockProcessor) validatePreProofOfWork(stagingArea *model.StagingArea, block *externalapi.DomainBlock) error { + blockHash := consensushashing.BlockHash(block) + + hasValidatedHeader, err := bp.hasValidatedHeader(stagingArea, blockHash) + if err != nil { + return err + } + + if hasValidatedHeader { + log.Debugf("Block %s header was already validated, so skip the rest of validatePreProofOfWork", blockHash) + return nil + } + + err = bp.blockValidator.ValidateHeaderInIsolation(stagingArea, blockHash) + if err != nil { + return err + } + return nil +} + +func (bp *blockProcessor) validatePostProofOfWork(stagingArea *model.StagingArea, block *externalapi.DomainBlock, isBlockWithTrustedData bool) error { + blockHash := consensushashing.BlockHash(block) + + isHeaderOnlyBlock := isHeaderOnlyBlock(block) + if !isHeaderOnlyBlock { + bp.blockStore.Stage(stagingArea, blockHash, block) + err := bp.blockValidator.ValidateBodyInIsolation(stagingArea, blockHash) + if err != nil { + return err + } + } + + hasValidatedHeader, err := bp.hasValidatedHeader(stagingArea, blockHash) + if err != nil { + return err + } + + if !hasValidatedHeader { + err = bp.blockValidator.ValidateHeaderInContext(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + } + + if !isHeaderOnlyBlock { + err = bp.blockValidator.ValidateBodyInContext(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + } else { + log.Debugf("Skipping ValidateBodyInContext for block %s because it's header only", blockHash) + } + + return nil +} + +// hasValidatedHeader returns whether the block header was validated. It returns +// true in any case the block header was validated, whether it was validated as a +// header-only block or as a block with body. +func (bp *blockProcessor) hasValidatedHeader(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + exists, err := bp.blockStatusStore.Exists(bp.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + if !exists { + return false, nil + } + + status, err := bp.blockStatusStore.Get(bp.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + return status != externalapi.StatusInvalid, nil +} diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_block_test.go b/domain/consensus/processes/blockprocessor/validate_and_insert_block_test.go new file mode 100644 index 0000000..bde8e54 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_block_test.go @@ -0,0 +1,218 @@ +package blockprocessor_test + +import ( + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestBlockStatus(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockStatus") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + checkStatus := func(hash *externalapi.DomainHash, expectedStatus externalapi.BlockStatus) { + blockStatus, err := tc.BlockStatusStore().Get(tc.DatabaseContext(), model.NewStagingArea(), hash) + if err != nil { + t.Fatalf("BlockStatusStore().Get: %+v", err) + } + + if blockStatus != expectedStatus { + t.Fatalf("Expected to have status %s but got %s", expectedStatus, blockStatus) + } + } + + tipHash := consensusConfig.GenesisHash + for i := 0; i < 2; i++ { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + checkStatus(tipHash, externalapi.StatusUTXOValid) + } + + headerHash, _, err := tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{tipHash}) + if err != nil { + t.Fatalf("AddUTXOInvalidHeader: %+v", err) + } + + checkStatus(headerHash, externalapi.StatusHeaderOnly) + + nonChainBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + checkStatus(nonChainBlockHash, externalapi.StatusUTXOPendingVerification) + + disqualifiedBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + disqualifiedBlock.Header = blockheader.NewImmutableBlockHeader( + disqualifiedBlock.Header.Version(), + disqualifiedBlock.Header.Parents(), + disqualifiedBlock.Header.HashMerkleRoot(), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{}), // This should disqualify the block + disqualifiedBlock.Header.UTXOCommitment(), + disqualifiedBlock.Header.TimeInMilliseconds(), + disqualifiedBlock.Header.Bits(), + disqualifiedBlock.Header.Nonce(), + disqualifiedBlock.Header.DAAScore(), + disqualifiedBlock.Header.BlueScore(), + disqualifiedBlock.Header.BlueWork(), + disqualifiedBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(disqualifiedBlock, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + checkStatus(consensushashing.BlockHash(disqualifiedBlock), externalapi.StatusDisqualifiedFromChain) + + invalidBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + invalidBlock.Transactions[0].Version = constants.MaxTransactionVersion + 1 // This should invalidate the block + invalidBlock.Header = blockheader.NewImmutableBlockHeader( + disqualifiedBlock.Header.Version(), + disqualifiedBlock.Header.Parents(), + merkle.CalculateHashMerkleRoot(invalidBlock.Transactions), + disqualifiedBlock.Header.AcceptedIDMerkleRoot(), + disqualifiedBlock.Header.UTXOCommitment(), + disqualifiedBlock.Header.TimeInMilliseconds(), + disqualifiedBlock.Header.Bits(), + disqualifiedBlock.Header.Nonce(), + disqualifiedBlock.Header.DAAScore(), + disqualifiedBlock.Header.BlueScore(), + disqualifiedBlock.Header.BlueWork(), + disqualifiedBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(invalidBlock, true) + if err == nil { + t.Fatalf("block is expected to be invalid") + } + if !errors.As(err, &ruleerrors.RuleError{}) { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + checkStatus(consensushashing.BlockHash(invalidBlock), externalapi.StatusInvalid) + }) +} + +// TestValidateAndInsertErrors tests the error cases on "validateBlock" function. +func TestValidateAndInsertErrors(t *testing.T) { + // All the tests below tests the error cases in "validateAndInsertBlock" function. + // Each test is covering the error cases in a sub-function in "validateAndInsertBlock" function. + // Currently, implemented only for some of the errors. + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockStatus") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + tipHash, emptyCoinbase, tx1 := initData(consensusConfig) + + // Tests all the error case on the function: "checkBlockStatus"(sub-function in function validateBlock) + blockWithStatusInvalid, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, + &emptyCoinbase, []*externalapi.DomainTransaction{tx1, tx1}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true) + if err == nil { + t.Fatalf("Test ValidateAndInsertBlock: Expected an error, because the block is invalid.") + } + err = tc.ValidateAndInsertBlock(blockWithStatusInvalid, true) + if err == nil || !errors.Is(err, ruleerrors.ErrKnownInvalid) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrKnownInvalid, err) + } + if !strings.Contains(err.Error(), "is a known invalid block") { + t.Fatalf("Test ValidateAndInsertBlock: Expected an diff error, got: %+v.", err) + } + + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + // resend the same block. + err = tc.ValidateAndInsertBlock(block, true) + if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateBlock) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrDuplicateBlock, err) + } + if !strings.Contains(err.Error(), " already exists") { + t.Fatalf("Test ValidateAndInsertBlock: Expected an diff error, got: %+v.", err) + } + + onlyHeader, err := tc.BuildBlock(&emptyCoinbase, []*externalapi.DomainTransaction{}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + onlyHeader.Transactions = []*externalapi.DomainTransaction{} + err = tc.ValidateAndInsertBlock(onlyHeader, true) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + // resend the same header. + err = tc.ValidateAndInsertBlock(onlyHeader, true) + if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateBlock) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrDuplicateBlock, err) + } + if !strings.Contains(err.Error(), "header already exists") { + t.Fatalf("Test ValidateAndInsertBlock: Expected an diff error, got: %+v.", err) + } + + }) +} + +func initData(consensusConfig *consensus.Config) (*externalapi.DomainHash, externalapi.DomainCoinbaseData, *externalapi.DomainTransaction) { + return consensusConfig.GenesisHash, + externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + }, + + &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + LockTime: 1, + SubnetworkID: externalapi.DomainSubnetworkID{0x01}, + Gas: 1, + Payload: []byte{0x01}, + Fee: 0, + Mass: 1, + ID: externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}), + } +} diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_block_with_trusted_data.go b/domain/consensus/processes/blockprocessor/validate_and_insert_block_with_trusted_data.go new file mode 100644 index 0000000..4a7ba21 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_block_with_trusted_data.go @@ -0,0 +1,98 @@ +package blockprocessor + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func (bp *blockProcessor) validateAndInsertBlockWithTrustedData(stagingArea *model.StagingArea, + block *externalapi.BlockWithTrustedData, validateUTXO bool) (*externalapi.VirtualChangeSet, externalapi.BlockStatus, error) { + + blockHash := consensushashing.BlockHash(block.Block) + for i, daaBlock := range block.DAAWindow { + hash := consensushashing.HeaderHash(daaBlock.Header) + bp.blocksWithTrustedDataDAAWindowStore.Stage(stagingArea, blockHash, uint64(i), &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: hash, + GHOSTDAGData: daaBlock.GHOSTDAGData, + }) + bp.blockHeaderStore.Stage(stagingArea, hash, daaBlock.Header) + } + + blockReplacedGHOSTDAGData, err := bp.ghostdagDataWithoutPrunedBlocks(stagingArea, block.GHOSTDAGData[0].GHOSTDAGData) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + bp.ghostdagDataStore.Stage(stagingArea, blockHash, blockReplacedGHOSTDAGData, false) + + for _, pair := range block.GHOSTDAGData { + bp.ghostdagDataStore.Stage(stagingArea, pair.Hash, pair.GHOSTDAGData, true) + } + + bp.daaBlocksStore.StageDAAScore(stagingArea, blockHash, block.Block.Header.DAAScore()) + return bp.validateAndInsertBlock(stagingArea, block.Block, false, validateUTXO, true) +} + +func (bp *blockProcessor) ghostdagDataWithoutPrunedBlocks(stagingArea *model.StagingArea, + data *externalapi.BlockGHOSTDAGData) (*externalapi.BlockGHOSTDAGData, error) { + mergeSetBlues := make([]*externalapi.DomainHash, 0, len(data.MergeSetBlues())) + for _, blockHash := range data.MergeSetBlues() { + isPruned, err := bp.isPruned(stagingArea, blockHash) + if err != nil { + return nil, err + } + if isPruned { + if data.SelectedParent().Equal(blockHash) { + mergeSetBlues = append(mergeSetBlues, model.VirtualGenesisBlockHash) + } + continue + } + + mergeSetBlues = append(mergeSetBlues, blockHash) + } + + mergeSetReds := make([]*externalapi.DomainHash, 0, len(data.MergeSetReds())) + for _, blockHash := range data.MergeSetReds() { + isPruned, err := bp.isPruned(stagingArea, blockHash) + if err != nil { + return nil, err + } + if isPruned { + continue + } + + mergeSetReds = append(mergeSetReds, blockHash) + } + + selectedParent := data.SelectedParent() + isPruned, err := bp.isPruned(stagingArea, data.SelectedParent()) + if err != nil { + return nil, err + } + + if isPruned { + selectedParent = model.VirtualGenesisBlockHash + } + + return externalapi.NewBlockGHOSTDAGData( + data.BlueScore(), + data.BlueWork(), + selectedParent, + mergeSetBlues, + mergeSetReds, + data.BluesAnticoneSizes(), + ), nil +} + +func (bp *blockProcessor) isPruned(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + _, err := bp.ghostdagDataStore.Get(bp.databaseContext, stagingArea, blockHash, false) + if database.IsNotFoundError(err) { + return true, nil + } + if err != nil { + return false, err + } + + return false, nil +} diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point.go b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point.go new file mode 100644 index 0000000..a660135 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point.go @@ -0,0 +1,47 @@ +package blockprocessor + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +func (bp *blockProcessor) validateAndInsertImportedPruningPoint( + stagingArea *model.StagingArea, newPruningPointHash *externalapi.DomainHash) error { + + log.Info("Checking that the given pruning point is the expected pruning point") + + isValidPruningPoint, err := bp.pruningManager.IsValidPruningPoint(stagingArea, newPruningPointHash) + if err != nil { + return err + } + + if !isValidPruningPoint { + return errors.Wrapf(ruleerrors.ErrUnexpectedPruningPoint, "%s is not a valid pruning point", + newPruningPointHash) + } + + arePruningPointsInValidChain, err := bp.pruningManager.ArePruningPointsInValidChain(stagingArea) + if err != nil { + return err + } + + if !arePruningPointsInValidChain { + return errors.Wrapf(ruleerrors.ErrInvalidPruningPointsChain, "pruning points do not compose a valid "+ + "chain to genesis") + } + + log.Infof("Updating consensus state manager according to the new pruning point %s", newPruningPointHash) + err = bp.consensusStateManager.ImportPruningPointUTXOSet(stagingArea, newPruningPointHash) + if err != nil { + return err + } + + err = bp.updateVirtualAcceptanceDataAfterImportingPruningPoint(stagingArea) + if err != nil { + return err + } + + return nil +} diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go new file mode 100644 index 0000000..3a84263 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go @@ -0,0 +1,733 @@ +package blockprocessor_test + +import ( + "math" + "testing" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/dagconfig" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +func addBlock(tc testapi.TestConsensus, parentHashes []*externalapi.DomainHash, t *testing.T) *externalapi.DomainHash { + block, _, err := tc.BuildBlockWithParents(parentHashes, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + blockHash := consensushashing.BlockHash(block) + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + return blockHash +} + +func TestValidateAndInsertImportedPruningPoint(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + // This is done to reduce the pruning depth to 6 blocks + finalityDepth := 5 + consensusConfig.FinalityDuration = time.Duration(finalityDepth) * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + consensusConfig.PruningProofM = 1 + + syncConsensuses := func(tcSyncerRef, tcSynceeRef *testapi.TestConsensus, updatePruningPointJustAfterImportingPruningPoint bool) { + tcSyncer, tcSyncee := *tcSyncerRef, *tcSynceeRef + pruningPointProof, err := tcSyncer.BuildPruningPointProof() + if err != nil { + t.Fatalf("BuildPruningPointProof: %+v", err) + } + + err = tcSyncee.ValidatePruningPointProof(pruningPointProof) + if err != nil { + t.Fatalf("ValidatePruningPointProof: %+v", err) + } + + stagingConfig := *consensusConfig + stagingConfig.SkipAddingGenesis = true + synceeStaging, _, err := factory.NewTestConsensus(&stagingConfig, "TestValidateAndInsertPruningPointSyncerStaging") + if err != nil { + t.Fatalf("Error setting up synceeStaging: %+v", err) + } + + err = synceeStaging.ApplyPruningPointProof(pruningPointProof) + if err != nil { + t.Fatalf("ApplyPruningPointProof: %+v", err) + } + + pruningPointHeaders, err := tcSyncer.PruningPointHeaders() + if err != nil { + t.Fatalf("PruningPointHeaders: %+v", err) + } + + arePruningPointsViolatingFinality, err := tcSyncee.ArePruningPointsViolatingFinality(pruningPointHeaders) + if err != nil { + t.Fatalf("ArePruningPointsViolatingFinality: %+v", err) + } + + if arePruningPointsViolatingFinality { + t.Fatalf("unexpected finality violation") + } + + err = synceeStaging.ImportPruningPoints(pruningPointHeaders) + if err != nil { + t.Fatalf("PruningPointHeaders: %+v", err) + } + + pruningPointAndItsAnticone, err := tcSyncer.PruningPointAndItsAnticone() + if err != nil { + t.Fatalf("PruningPointAndItsAnticone: %+v", err) + } + + for _, blockHash := range pruningPointAndItsAnticone { + block, _, err := tcSyncer.GetBlock(blockHash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + blockDAAWindowHashes, err := tcSyncer.BlockDAAWindowHashes(blockHash) + if err != nil { + t.Fatalf("BlockDAAWindowHashes: %+v", err) + } + + ghostdagDataBlockHashes, err := tcSyncer.TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash) + if err != nil { + t.Fatalf("TrustedBlockAssociatedGHOSTDAGDataBlockHashes: %+v", err) + } + + blockWithTrustedData := &externalapi.BlockWithTrustedData{ + Block: block, + DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(blockDAAWindowHashes)), + GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(ghostdagDataBlockHashes)), + } + + for i, daaBlockHash := range blockDAAWindowHashes { + trustedDataDataDAAHeader, err := tcSyncer.TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i)) + if err != nil { + t.Fatalf("TrustedDataDataDAAHeader: %+v", err) + } + blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, trustedDataDataDAAHeader) + } + + for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes { + data, err := tcSyncer.TrustedGHOSTDAGData(ghostdagDataBlockHash) + if err != nil { + t.Fatalf("TrustedGHOSTDAGData: %+v", err) + } + blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: ghostdagDataBlockHash, + GHOSTDAGData: data, + }) + } + + err = synceeStaging.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) + if err != nil { + t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err) + } + } + + syncerVirtualSelectedParent, err := tcSyncer.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("GetVirtualSelectedParent: %+v", err) + } + + pruningPoint, err := tcSyncer.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + missingHeaderHashes, _, err := tcSyncer.GetHashesBetween(pruningPoint, syncerVirtualSelectedParent, math.MaxUint64) + if err != nil { + t.Fatalf("GetHashesBetween: %+v", err) + } + + for i, blocksHash := range missingHeaderHashes { + blockInfo, err := synceeStaging.GetBlockInfo(blocksHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if blockInfo.Exists { + continue + } + + header, err := tcSyncer.GetBlockHeader(blocksHash) + if err != nil { + t.Fatalf("GetBlockHeader: %+v", err) + } + + err = synceeStaging.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false) + if err != nil { + t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err) + } + } + + var fromOutpoint *externalapi.DomainOutpoint + var pruningPointUTXOs []*externalapi.OutpointAndUTXOEntryPair + const step = 100_000 + for { + outpointAndUTXOEntryPairs, err := tcSyncer.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step) + if err != nil { + t.Fatalf("GetPruningPointUTXOs: %+v", err) + } + fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint + pruningPointUTXOs = append(pruningPointUTXOs, outpointAndUTXOEntryPairs...) + if len(outpointAndUTXOEntryPairs) < step { + break + } + } + + err = synceeStaging.AppendImportedPruningPointUTXOs(pruningPointUTXOs) + if err != nil { + t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err) + } + + virtualSelectedParent, err := tcSyncer.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("GetVirtualSelectedParent: %+v", err) + } + + // Check that ValidateAndInsertImportedPruningPoint fails for invalid pruning point + err = synceeStaging.ValidateAndInsertImportedPruningPoint(virtualSelectedParent) + if !errors.Is(err, ruleerrors.ErrUnexpectedPruningPoint) { + t.Fatalf("Unexpected error: %+v", err) + } + + err = synceeStaging.ClearImportedPruningPointData() + if err != nil { + t.Fatalf("ClearImportedPruningPointData: %+v", err) + } + err = synceeStaging.AppendImportedPruningPointUTXOs(makeFakeUTXOs()) + if err != nil { + t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err) + } + + // Check that ValidateAndInsertImportedPruningPoint fails if the UTXO commitment doesn't fit the provided UTXO set. + err = synceeStaging.ValidateAndInsertImportedPruningPoint(pruningPoint) + if !errors.Is(err, ruleerrors.ErrBadPruningPointUTXOSet) { + t.Fatalf("Unexpected error: %+v", err) + } + + err = synceeStaging.ClearImportedPruningPointData() + if err != nil { + t.Fatalf("ClearImportedPruningPointData: %+v", err) + } + err = synceeStaging.AppendImportedPruningPointUTXOs(pruningPointUTXOs) + if err != nil { + t.Fatalf("AppendImportedPruningPointUTXOs: %+v", err) + } + + // Check that ValidateAndInsertImportedPruningPoint works given the right arguments. + err = synceeStaging.ValidateAndInsertImportedPruningPoint(pruningPoint) + if err != nil { + t.Fatalf("ValidateAndInsertImportedPruningPoint: %+v", err) + } + + if updatePruningPointJustAfterImportingPruningPoint { + err = synceeStaging.UpdatePruningPointByVirtual() + if err != nil { + t.Fatal(err) + } + } + + emptyCoinbase := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + + // Check that we can build a block just after importing the pruning point. + _, err = synceeStaging.BuildBlock(emptyCoinbase, nil) + if err != nil { + t.Fatalf("BuildBlock: %+v", err) + } + + // Sync block bodies + headersSelectedTip, err := synceeStaging.GetHeadersSelectedTip() + if err != nil { + t.Fatalf("GetHeadersSelectedTip: %+v", err) + } + + missingBlockHashes, err := synceeStaging.GetMissingBlockBodyHashes(headersSelectedTip) + if err != nil { + t.Fatalf("GetMissingBlockBodyHashes: %+v", err) + } + + for _, blocksHash := range missingBlockHashes { + block, _, err := tcSyncer.GetBlock(blocksHash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + err = synceeStaging.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + } + + synceeTips, err := synceeStaging.Tips() + if err != nil { + t.Fatalf("Tips: %+v", err) + } + + syncerTips, err := tcSyncer.Tips() + if err != nil { + t.Fatalf("Tips: %+v", err) + } + + if !externalapi.HashesEqual(synceeTips, syncerTips) { + t.Fatalf("Syncee's tips are %s while syncer's are %s", synceeTips, syncerTips) + } + + tipHash := addBlock(tcSyncer, syncerTips, t) + tip, _, err := tcSyncer.GetBlock(tipHash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + err = synceeStaging.ValidateAndInsertBlock(tip, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + blockInfo, err := synceeStaging.GetBlockInfo(tipHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if blockInfo.BlockStatus != externalapi.StatusUTXOValid { + t.Fatalf("Tip didn't pass UTXO verification") + } + + synceePruningPoint, err := synceeStaging.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !synceePruningPoint.Equal(pruningPoint) { + t.Fatalf("The syncee pruning point has not changed as exepcted") + } + + *tcSynceeRef = synceeStaging + } + + tcSyncer, teardownSyncer, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncer") + if err != nil { + t.Fatalf("Error setting up tcSyncer: %+v", err) + } + defer teardownSyncer(false) + + tcSyncee1, teardownSyncee1, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncee1") + if err != nil { + t.Fatalf("Error setting up tcSyncee1: %+v", err) + } + defer teardownSyncee1(false) + + const numSharedBlocks = 2 + tipHash := consensusConfig.GenesisHash + for i := 0; i < numSharedBlocks; i++ { + tipHash = addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + block, _, err := tcSyncer.GetBlock(tipHash) + if err != nil { + t.Fatalf("GetBlock: %+v", err) + } + + err = tcSyncee1.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + } + + // Add two side blocks to syncee + tipHashSyncee := tipHash + for i := 0; i < 2; i++ { + tipHashSyncee = addBlock(tcSyncee1, []*externalapi.DomainHash{tipHashSyncee}, t) + } + + for i := 0; i < finalityDepth-numSharedBlocks-2; i++ { + tipHash = addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + } + + // Add block in the anticone of the pruning point to test such situation + pruningPointAnticoneBlock := addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + tipHash = addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + nextPruningPoint := addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + + tipHash = addBlock(tcSyncer, []*externalapi.DomainHash{pruningPointAnticoneBlock, nextPruningPoint}, t) + + // Add blocks until the pruning point changes + for { + tipHash = addBlock(tcSyncer, []*externalapi.DomainHash{tipHash}, t) + + pruningPoint, err := tcSyncer.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + + pruningPoint, err := tcSyncer.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !pruningPoint.Equal(nextPruningPoint) { + t.Fatalf("Unexpected pruning point %s", pruningPoint) + } + + tcSyncee1Ref := &tcSyncee1 + syncConsensuses(&tcSyncer, tcSyncee1Ref, false) + + // Test a situation where a consensus with pruned headers syncs another fresh consensus. + tcSyncee2, teardownSyncee2, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncee2") + if err != nil { + t.Fatalf("Error setting up tcSyncee2: %+v", err) + } + defer teardownSyncee2(false) + + syncConsensuses(tcSyncee1Ref, &tcSyncee2, false) + + // Check the regular sync but try to update the pruning point after the pruning point was imported. It tests a situation where the node + // was restarted before the virtual was resolved and then it calls UpdatePruningPointByVirtual on init. + tcSyncee3, teardownSyncee3, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncee3") + if err != nil { + t.Fatalf("Error setting up tcSyncee1: %+v", err) + } + defer teardownSyncee3(false) + + syncConsensuses(&tcSyncer, &tcSyncee3, true) + }) +} + +func makeFakeUTXOs() []*externalapi.OutpointAndUTXOEntryPair { + return []*externalapi.OutpointAndUTXOEntryPair{ + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: 0, + }, + UTXOEntry: utxo.NewUTXOEntry( + 0, + &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + false, + 0, + ), + }, + { + Outpoint: &externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: 1, + }, + UTXOEntry: utxo.NewUTXOEntry( + 2, + &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + true, + 3, + ), + }, + } +} + +func TestGetPruningPointUTXOs(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // This is done to reduce the pruning depth to 8 blocks + finalityDepth := 4 + consensusConfig.FinalityDuration = time.Duration(finalityDepth) * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + + consensusConfig.BlockCoinbaseMaturity = 0 + + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestGetPruningPointUTXOs") + if err != nil { + t.Fatalf("Error setting up testConsensus: %+v", err) + } + defer teardown(false) + + // Create a block that accepts the genesis coinbase so that we won't have script problems down the line + emptyCoinbase := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + blockAboveGenesis, err := testConsensus.BuildBlock(emptyCoinbase, nil) + if err != nil { + t.Fatalf("Error building block above genesis: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(blockAboveGenesis, true) + if err != nil { + t.Fatalf("Error validating and inserting block above genesis: %+v", err) + } + + // Create a block whose coinbase we could spend + scriptPublicKey, redeemScript := testutils.OpTrueScript() + coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey} + blockWithSpendableCoinbase, err := testConsensus.BuildBlock(coinbaseData, nil) + if err != nil { + t.Fatalf("Error building block with spendable coinbase: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true) + if err != nil { + t.Fatalf("Error validating and inserting block with spendable coinbase: %+v", err) + } + + // Create a transaction that adds a lot of UTXOs to the UTXO set + transactionToSpend := blockWithSpendableCoinbase.Transactions[0] + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, nil) + if err != nil { + t.Fatalf("Error creating signature script: %+v", err) + } + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(transactionToSpend), + Index: 0, + }, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum, + } + + outputs := make([]*externalapi.DomainTransactionOutput, 900) + for i := 0; i < len(outputs); i++ { + outputs[i] = &externalapi.DomainTransactionOutput{ + ScriptPublicKey: scriptPublicKey, + Value: 10000, + } + } + spendingTransaction := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: outputs, + Payload: []byte{}, + } + + // Create a block with that includes the above transaction + includingBlock, err := testConsensus.BuildBlock(emptyCoinbase, []*externalapi.DomainTransaction{spendingTransaction}) + if err != nil { + t.Fatalf("Error building including block: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(includingBlock, true) + if err != nil { + t.Fatalf("Error validating and inserting including block: %+v", err) + } + + // Add enough blocks to move the pruning point + for { + block, err := testConsensus.BuildBlock(emptyCoinbase, nil) + if err != nil { + t.Fatalf("Error building block: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("Error validating and inserting block: %+v", err) + } + + pruningPoint, err := testConsensus.PruningPoint() + if err != nil { + t.Fatalf("Error getting the pruning point: %+v", err) + } + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + pruningPoint, err := testConsensus.PruningPoint() + if err != nil { + t.Fatalf("Error getting the pruning point: %+v", err) + } + + pruningRelations, err := testConsensus.BlockRelationStore().BlockRelation( + testConsensus.DatabaseContext(), model.NewStagingArea(), pruningPoint) + if err != nil { + t.Fatalf("BlockRelation(): %+v", err) + } + + if len(pruningRelations.Parents) != 1 && pruningRelations.Parents[0] != consensushashing.BlockHash(includingBlock) { + t.Fatalf("includingBlock should be pruning point's only parent") + } + + // Get pruning point UTXOs in a loop + var allOutpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair + const step = 100_000 + var fromOutpoint *externalapi.DomainOutpoint + for { + outpointAndUTXOEntryPairs, err := testConsensus.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step) + if err != nil { + t.Fatalf("Error getting pruning point UTXOs: %+v", err) + } + allOutpointAndUTXOEntryPairs = append(allOutpointAndUTXOEntryPairs, outpointAndUTXOEntryPairs...) + fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint + + if len(outpointAndUTXOEntryPairs) < step { + break + } + } + + expected := len(outputs) + 1 + // Make sure the length of the UTXOs is exactly spendingTransaction.Outputs + 1 coinbase + // output (includingBlock's coinbase) + if len(allOutpointAndUTXOEntryPairs) != expected { + t.Fatalf("Returned an unexpected amount of UTXOs. "+ + "Want: %d, got: %d", expected, len(allOutpointAndUTXOEntryPairs)) + } + + // Make sure all spendingTransaction.Outputs are in the returned UTXOs + spendingTransactionID := consensushashing.TransactionID(spendingTransaction) + for i := range outputs { + found := false + for _, outpointAndUTXOEntryPair := range allOutpointAndUTXOEntryPairs { + outpoint := outpointAndUTXOEntryPair.Outpoint + if outpoint.TransactionID == *spendingTransactionID && outpoint.Index == uint32(i) { + found = true + break + } + } + if !found { + t.Fatalf("Outpoint %s:%d not found amongst the returned UTXOs", spendingTransactionID, i) + } + } + }) +} + +func BenchmarkGetPruningPointUTXOs(b *testing.B) { + consensusConfig := consensus.Config{Params: dagconfig.DevnetParams} + + // This is done to reduce the pruning depth to 200 blocks + finalityDepth := 100 + consensusConfig.FinalityDuration = time.Duration(finalityDepth) * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + + consensusConfig.SkipProofOfWork = true + consensusConfig.BlockCoinbaseMaturity = 0 + + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(&consensusConfig, "TestGetPruningPointUTXOs") + if err != nil { + b.Fatalf("Error setting up testConsensus: %+v", err) + } + defer teardown(false) + + // Create a block whose coinbase we could spend + scriptPublicKey, redeemScript := testutils.OpTrueScript() + coinbaseData := &externalapi.DomainCoinbaseData{ScriptPublicKey: scriptPublicKey} + blockWithSpendableCoinbase, err := testConsensus.BuildBlock(coinbaseData, nil) + if err != nil { + b.Fatalf("Error building block with spendable coinbase: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(blockWithSpendableCoinbase, true) + if err != nil { + b.Fatalf("Error validating and inserting block with spendable coinbase: %+v", err) + } + + addBlockWithLotsOfOutputs := func(b *testing.B, transactionToSpend *externalapi.DomainTransaction) *externalapi.DomainBlock { + // Create a transaction that adds a lot of UTXOs to the UTXO set + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, nil) + if err != nil { + b.Fatalf("Error creating signature script: %+v", err) + } + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(transactionToSpend), + Index: 0, + }, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum, + } + outputs := make([]*externalapi.DomainTransactionOutput, 900) + for i := 0; i < len(outputs); i++ { + outputs[i] = &externalapi.DomainTransactionOutput{ + ScriptPublicKey: scriptPublicKey, + Value: 10000, + } + } + transaction := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: outputs, + Payload: []byte{}, + } + + // Create a block that includes the above transaction + block, err := testConsensus.BuildBlock(coinbaseData, []*externalapi.DomainTransaction{transaction}) + if err != nil { + b.Fatalf("Error building block: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(block, true) + if err != nil { + b.Fatalf("Error validating and inserting block: %+v", err) + } + + return block + } + + // Add finalityDepth blocks, each containing lots of outputs + tip := blockWithSpendableCoinbase + for i := 0; i < finalityDepth; i++ { + tip = addBlockWithLotsOfOutputs(b, tip.Transactions[0]) + } + + // Add enough blocks to move the pruning point + for { + block, err := testConsensus.BuildBlock(coinbaseData, nil) + if err != nil { + b.Fatalf("Error building block: %+v", err) + } + err = testConsensus.ValidateAndInsertBlock(block, true) + if err != nil { + b.Fatalf("Error validating and inserting block: %+v", err) + } + + pruningPoint, err := testConsensus.PruningPoint() + if err != nil { + b.Fatalf("Error getting the pruning point: %+v", err) + } + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + pruningPoint, err := testConsensus.PruningPoint() + if err != nil { + b.Fatalf("Error getting the pruning point: %+v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Get pruning point UTXOs in a loop + step := 100 + var fromOutpoint *externalapi.DomainOutpoint + for { + outpointAndUTXOEntryPairs, err := testConsensus.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step) + if err != nil { + b.Fatalf("Error getting pruning point UTXOs: %+v", err) + } + fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint + + if len(outpointAndUTXOEntryPairs) < step { + break + } + } + } +} diff --git a/domain/consensus/processes/blockprocessor/validate_block.go b/domain/consensus/processes/blockprocessor/validate_block.go new file mode 100644 index 0000000..9d10965 --- /dev/null +++ b/domain/consensus/processes/blockprocessor/validate_block.go @@ -0,0 +1,85 @@ +package blockprocessor + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/util/staging" +) + +func (bp *blockProcessor) validateBlock(stagingArea *model.StagingArea, block *externalapi.DomainBlock, isBlockWithTrustedData bool) error { + blockHash := consensushashing.HeaderHash(block.Header) + log.Debugf("Validating block %s", blockHash) + + // Since genesis has a lot of special cases validation rules, we make sure it's not added unintentionally + // on uninitialized node. + if blockHash.Equal(bp.genesisHash) && bp.blockStore.Count(stagingArea) != 0 { + return errors.Wrapf(ruleerrors.ErrGenesisOnInitializedConsensus, "Cannot add genesis to an initialized consensus") + } + + err := bp.checkBlockStatus(stagingArea, block) + if err != nil { + return err + } + + hasValidatedHeader, err := bp.hasValidatedHeader(stagingArea, blockHash) + if err != nil { + return err + } + + if !hasValidatedHeader { + log.Debugf("Staging block %s header", blockHash) + bp.blockHeaderStore.Stage(stagingArea, blockHash, block.Header) + } else { + log.Debugf("Block %s header is already known, so no need to stage it", blockHash) + } + + // If any validation until (included) proof-of-work fails, simply + // return an error without writing anything in the database. + // This is to prevent spamming attacks. + err = bp.validatePreProofOfWork(stagingArea, block) + if err != nil { + return err + } + + if !hasValidatedHeader { + err = bp.blockValidator.ValidatePruningPointViolationAndProofOfWorkAndDifficulty(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + } + + // If in-context validations fail, discard all changes and store the + // block with StatusInvalid. + err = bp.validatePostProofOfWork(stagingArea, block, isBlockWithTrustedData) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + // We mark invalid blocks with status externalapi.StatusInvalid except in the + // case of the following errors: + // ErrMissingParents - If we got ErrMissingParents the block shouldn't be + // considered as invalid because it could be added later on when its + // parents are present. + // ErrBadMerkleRoot - if we get ErrBadMerkleRoot we shouldn't mark the + // block as invalid because later on we can get the block with + // transactions that fits the merkle root. + // ErrPrunedBlock - ErrPrunedBlock is an error that rejects a block body and + // not the block as a whole, so we shouldn't mark it as invalid. + if !errors.As(err, &ruleerrors.ErrMissingParents{}) && + !errors.Is(err, ruleerrors.ErrBadMerkleRoot) && + !errors.Is(err, ruleerrors.ErrPrunedBlock) { + // Use a new stagingArea so we save only the block status + stagingArea := model.NewStagingArea() + hash := consensushashing.BlockHash(block) + bp.blockStatusStore.Stage(stagingArea, hash, externalapi.StatusInvalid) + commitErr := staging.CommitAllChanges(bp.databaseContext, stagingArea) + if commitErr != nil { + return commitErr + } + } + } + return err + } + return nil +} diff --git a/domain/consensus/processes/blockvalidator/block_body_in_context.go b/domain/consensus/processes/blockvalidator/block_body_in_context.go new file mode 100644 index 0000000..9593b32 --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_body_in_context.go @@ -0,0 +1,201 @@ +package blockvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/virtual" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// ValidateBodyInContext validates block bodies in the context of the current +// consensus state +func (v *blockValidator) ValidateBodyInContext(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateBodyInContext") + defer onEnd() + + if !isBlockWithTrustedData { + err := v.checkBlockIsNotPruned(stagingArea, blockHash) + if err != nil { + return err + } + } + + err := v.checkBlockTransactions(stagingArea, blockHash) + if err != nil { + return err + } + + if !isBlockWithTrustedData { + err := v.checkParentBlockBodiesExist(stagingArea, blockHash) + if err != nil { + return err + } + + err = v.checkCoinbaseSubsidy(stagingArea, blockHash) + if err != nil { + return err + } + } + return nil +} + +// checkBlockIsNotPruned Checks we don't add block bodies to pruned blocks +func (v *blockValidator) checkBlockIsNotPruned(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + hasValidatedHeader, err := v.hasValidatedHeader(stagingArea, blockHash) + if err != nil { + return err + } + + // If we don't add block body to a header only block it can't be in the past + // of the tips, because it'll be a new tip. + if !hasValidatedHeader { + return nil + } + + tips, err := v.consensusStateStore.Tips(stagingArea, v.databaseContext) + if err != nil { + return err + } + + isAncestorOfSomeTips, err := v.dagTopologyManagers[0].IsAncestorOfAny(stagingArea, blockHash, tips) + if err != nil { + return err + } + + // A header only block in the past of one of the tips has to be pruned + if isAncestorOfSomeTips { + return errors.Wrapf(ruleerrors.ErrPrunedBlock, "cannot add block body to a pruned block %s", blockHash) + } + + return nil +} + +func (v *blockValidator) checkParentBlockBodiesExist( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + + missingParentHashes := []*externalapi.DomainHash{} + parents, err := v.dagTopologyManagers[0].Parents(stagingArea, blockHash) + if err != nil { + return err + } + + if virtual.ContainsOnlyVirtualGenesis(parents) { + return nil + } + + for _, parent := range parents { + hasBlock, err := v.blockStore.HasBlock(v.databaseContext, stagingArea, parent) + if err != nil { + return err + } + + if !hasBlock { + pruningPoint, err := v.pruningStore.PruningPoint(v.databaseContext, stagingArea) + if err != nil { + return err + } + + isInPastOfPruningPoint, err := v.dagTopologyManagers[0].IsAncestorOf(stagingArea, parent, pruningPoint) + if err != nil { + return err + } + + // If a block parent is in the past of the pruning point + // it means its body will never be used, so it's ok if + // it's missing. + // This will usually happen during IBD when getting the blocks + // in the pruning point anticone. + if isInPastOfPruningPoint { + log.Debugf("Block %s parent %s is missing a body, but is in the past of the pruning point", + blockHash, parent) + continue + } + + log.Debugf("Block %s parent %s is missing a body", blockHash, parent) + + missingParentHashes = append(missingParentHashes, parent) + } + } + + if len(missingParentHashes) > 0 { + return ruleerrors.NewErrMissingParents(missingParentHashes) + } + + return nil +} + +func (v *blockValidator) checkBlockTransactions( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + + block, err := v.blockStore.Block(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + // Ensure all transactions in the block are finalized. + pastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(stagingArea, blockHash) + if err != nil { + return err + } + for _, tx := range block.Transactions { + if err = v.transactionValidator.ValidateTransactionInContextIgnoringUTXO(stagingArea, tx, blockHash, pastMedianTime); err != nil { + return err + } + } + + return nil +} + +func (v *blockValidator) checkCoinbaseSubsidy( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + + pruningPoint, err := v.pruningStore.PruningPoint(v.databaseContext, stagingArea) + if err != nil { + return err + } + + parents, err := v.dagTopologyManagers[0].Parents(stagingArea, blockHash) + if err != nil { + return err + } + + for _, parent := range parents { + isInFutureOfPruningPoint, err := v.dagTopologyManagers[0].IsAncestorOf(stagingArea, pruningPoint, parent) + if err != nil { + return err + } + + // The pruning proof ( https://github.com/spectre-project/docs/blob/main/Reference/prunality/Prunality.pdf ) concludes + // that it's impossible for a block to be merged if it was created in the anticone of the pruning point that was + // present at the time of the block creation. So if such situation happens we can be sure that it happens during + // IBD and that this block has at least pruningDepth-finalityInterval confirmations. + if !isInFutureOfPruningPoint { + return nil + } + } + + block, err := v.blockStore.Block(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + expectedSubsidy, err := v.coinbaseManager.CalcBlockSubsidy(stagingArea, blockHash) + if err != nil { + return err + } + + _, _, subsidy, err := v.coinbaseManager.ExtractCoinbaseDataBlueScoreAndSubsidy(block.Transactions[transactionhelper.CoinbaseTransactionIndex]) + if err != nil { + return err + } + + if expectedSubsidy != subsidy { + return errors.Wrapf(ruleerrors.ErrWrongCoinbaseSubsidy, "the subsidy specified on the coinbase of %s is "+ + "wrong: expected %d but got %d", blockHash, expectedSubsidy, subsidy) + } + + return nil +} diff --git a/domain/consensus/processes/blockvalidator/block_body_in_context_test.go b/domain/consensus/processes/blockvalidator/block_body_in_context_test.go new file mode 100644 index 0000000..90fa04a --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_body_in_context_test.go @@ -0,0 +1,240 @@ +package blockvalidator_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestCheckBlockIsNotPruned(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // This is done to reduce the pruning depth to 6 blocks + consensusConfig.FinalityDuration = 2 * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + + // When pruning, blocks in the DAA window of the pruning point and its + // anticone are kept for the sake of IBD. Setting this value to zero + // forces all DAA windows to be empty, and as such, no blocks are kept + // below the pruning point + consensusConfig.DifficultyAdjustmentWindowSize = 0 + + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockIsNotPruned") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Add blocks until the pruning point changes + tipHash := consensusConfig.GenesisHash + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + beforePruningBlock, _, err := tc.GetBlock(tipHash) + if err != nil { + t.Fatalf("beforePruningBlock: %+v", err) + } + + for { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + pruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + + err = tc.ValidateAndInsertBlock(beforePruningBlock, true) + if !errors.Is(err, ruleerrors.ErrPrunedBlock) { + t.Fatalf("Unexpected error: %+v", err) + } + + beforePruningBlockBlockStatus, err := tc.BlockStatusStore().Get(tc.DatabaseContext(), model.NewStagingArea(), + consensushashing.BlockHash(beforePruningBlock)) + if err != nil { + t.Fatalf("BlockStatusStore().Get: %+v", err) + } + + // Check that the block still has header only status although it got rejected. + if beforePruningBlockBlockStatus != externalapi.StatusHeaderOnly { + t.Fatalf("Unexpected status %s", beforePruningBlockBlockStatus) + } + }) +} + +func TestCheckParentBlockBodiesExist(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // This is done to reduce the pruning depth to 6 blocks + consensusConfig.FinalityDuration = 2 * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentBlockBodiesExist") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + headerHash, _, err := tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{consensusConfig.GenesisHash}) + if err != nil { + t.Fatalf("AddUTXOInvalidHeader: %+v", err) + } + + _, _, err = tc.AddUTXOInvalidBlock([]*externalapi.DomainHash{headerHash}) + errMissingParents := &ruleerrors.ErrMissingParents{} + if !errors.As(err, errMissingParents) { + t.Fatalf("Unexpected error: %+v", err) + } + + if !externalapi.HashesEqual(errMissingParents.MissingParentHashes, []*externalapi.DomainHash{headerHash}) { + t.Fatalf("unexpected missing parents %s", errMissingParents.MissingParentHashes) + } + + // Add blocks until the pruning point changes + tipHash := consensusConfig.GenesisHash + anticonePruningBlock, err := tc.BuildUTXOInvalidBlock([]*externalapi.DomainHash{tipHash}) + if err != nil { + t.Fatalf("BuildUTXOInvalidBlock: %+v", err) + } + + // Add only the header of anticonePruningBlock + err = tc.ValidateAndInsertBlock(&externalapi.DomainBlock{ + Header: anticonePruningBlock.Header, + Transactions: nil, + }, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + for { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddUTXOInvalidHeader: %+v", err) + } + + pruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + + // Add anticonePruningBlock's body and check that it's valid to point to + // a header only block in the past of the pruning point. + err = tc.ValidateAndInsertBlock(anticonePruningBlock, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + }) +} + +func TestIsFinalizedTransaction(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestIsFinalizedTransaction") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Build a small DAG + outerParents := []*externalapi.DomainHash{consensusConfig.GenesisHash} + for i := 0; i < 5; i++ { + var innerParents []*externalapi.DomainHash + for i := 0; i < 4; i++ { + blockHash, _, err := tc.AddBlock(outerParents, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + innerParents = append(innerParents, blockHash) + } + outerParents = []*externalapi.DomainHash{} + for i := 0; i < 3; i++ { + blockHash, _, err := tc.AddBlock(innerParents, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + outerParents = append(outerParents, blockHash) + } + } + + block, err := tc.BuildBlock( + &externalapi.DomainCoinbaseData{&externalapi.ScriptPublicKey{}, nil}, nil) + if err != nil { + t.Fatalf("Error getting block: %+v", err) + } + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("Error Inserting block: %+v", err) + } + blockHash := consensushashing.BlockHash(block) + blockDAAScore, err := tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), stagingArea, blockHash) + if err != nil { + t.Fatalf("Error getting block DAA score : %+v", err) + } + blockParents := block.Header.DirectParents() + parentToSpend, _, err := tc.GetBlock(blockParents[0]) + if err != nil { + t.Fatalf("Error getting block1: %+v", err) + } + + checkForLockTimeAndSequence := func(lockTime, sequence uint64, shouldPass bool) { + tx, err := testutils.CreateTransaction(parentToSpend.Transactions[0], 1) + if err != nil { + t.Fatalf("Error creating tx: %+v", err) + } + + tx.LockTime = lockTime + tx.Inputs[0].Sequence = sequence + + _, _, err = tc.AddBlock(blockParents, nil, []*externalapi.DomainTransaction{tx}) + if (shouldPass && err != nil) || (!shouldPass && !errors.Is(err, ruleerrors.ErrUnfinalizedTx)) { + t.Fatalf("shouldPass: %t Unexpected error: %+v", shouldPass, err) + } + } + + // Check that the same DAAScore or higher fails, but lower passes. + checkForLockTimeAndSequence(blockDAAScore+1, 0, false) + checkForLockTimeAndSequence(blockDAAScore, 0, false) + checkForLockTimeAndSequence(blockDAAScore-1, 0, true) + + pastMedianTime, err := tc.PastMedianTimeManager().PastMedianTime(stagingArea, consensushashing.BlockHash(block)) + if err != nil { + t.Fatalf("PastMedianTime: %+v", err) + } + // Check that the same pastMedianTime or higher fails, but lower passes. + checkForLockTimeAndSequence(uint64(pastMedianTime)+1, 0, false) + checkForLockTimeAndSequence(uint64(pastMedianTime), 0, false) + checkForLockTimeAndSequence(uint64(pastMedianTime)-1, 0, true) + + // We check that if the transaction is marked as finalized it'll pass for any lock time. + checkForLockTimeAndSequence(uint64(pastMedianTime), constants.MaxTxInSequenceNum, true) + checkForLockTimeAndSequence(2, constants.MaxTxInSequenceNum, true) + }) +} diff --git a/domain/consensus/processes/blockvalidator/block_body_in_isolation.go b/domain/consensus/processes/blockvalidator/block_body_in_isolation.go new file mode 100644 index 0000000..b4a1f0c --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_body_in_isolation.go @@ -0,0 +1,248 @@ +package blockvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// ValidateBodyInIsolation validates block bodies in isolation from the current +// consensus state +func (v *blockValidator) ValidateBodyInIsolation(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateBodyInContext") + defer onEnd() + + block, err := v.blockStore.Block(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + err = v.checkNoPrefilledInputs(block) + if err != nil { + return err + } + + err = v.checkBlockHashMerkleRoot(block) + if err != nil { + return err + } + + err = v.checkBlockContainsAtLeastOneTransaction(block) + if err != nil { + return err + } + + err = v.checkFirstBlockTransactionIsCoinbase(block) + if err != nil { + return err + } + + err = v.checkBlockContainsOnlyOneCoinbase(block) + if err != nil { + return err + } + + err = v.checkCoinbaseBlueScore(block) + if err != nil { + return err + } + + err = v.checkBlockTransactionOrder(block) + if err != nil { + return err + } + + err = v.checkTransactionsInIsolation(block) + if err != nil { + return err + } + + err = v.checkBlockMass(block) + if err != nil { + return err + } + + err = v.checkBlockDuplicateTransactions(block) + if err != nil { + return err + } + + err = v.checkBlockDoubleSpends(block) + if err != nil { + return err + } + + err = v.checkBlockHasNoChainedTransactions(block) + if err != nil { + return err + } + + err = v.validateGasLimit(block) + if err != nil { + return err + } + + return nil +} + +func (v *blockValidator) checkCoinbaseBlueScore(block *externalapi.DomainBlock) error { + coinbaseBlueScore, _, _, err := v.coinbaseManager.ExtractCoinbaseDataBlueScoreAndSubsidy(block.Transactions[transactionhelper.CoinbaseTransactionIndex]) + if err != nil { + return err + } + if coinbaseBlueScore != block.Header.BlueScore() { + return errors.Wrapf(ruleerrors.ErrUnexpectedCoinbaseBlueScore, "block blue score of %d is not the expected "+ + "value of %d", coinbaseBlueScore, block.Header.BlueScore()) + } + return nil +} + +func (v *blockValidator) checkBlockContainsAtLeastOneTransaction(block *externalapi.DomainBlock) error { + if len(block.Transactions) == 0 { + return errors.Wrapf(ruleerrors.ErrNoTransactions, "block does not contain "+ + "any transactions") + } + return nil +} + +func (v *blockValidator) checkFirstBlockTransactionIsCoinbase(block *externalapi.DomainBlock) error { + if !transactionhelper.IsCoinBase(block.Transactions[transactionhelper.CoinbaseTransactionIndex]) { + return errors.Wrapf(ruleerrors.ErrFirstTxNotCoinbase, "first transaction in "+ + "block is not a coinbase") + } + return nil +} + +func (v *blockValidator) checkBlockContainsOnlyOneCoinbase(block *externalapi.DomainBlock) error { + for i, tx := range block.Transactions[transactionhelper.CoinbaseTransactionIndex+1:] { + if transactionhelper.IsCoinBase(tx) { + return errors.Wrapf(ruleerrors.ErrMultipleCoinbases, "block contains second coinbase at "+ + "index %d", i+transactionhelper.CoinbaseTransactionIndex+1) + } + } + return nil +} + +func (v *blockValidator) checkBlockTransactionOrder(block *externalapi.DomainBlock) error { + for i, tx := range block.Transactions[transactionhelper.CoinbaseTransactionIndex+1:] { + if i != 0 && subnetworks.Less(tx.SubnetworkID, block.Transactions[i].SubnetworkID) { + return errors.Wrapf(ruleerrors.ErrTransactionsNotSorted, "transactions must be sorted by subnetwork") + } + } + return nil +} + +func (v *blockValidator) checkTransactionsInIsolation(block *externalapi.DomainBlock) error { + for _, tx := range block.Transactions { + err := v.transactionValidator.ValidateTransactionInIsolation(tx, block.Header.DAAScore()) + if err != nil { + return errors.Wrapf(err, "transaction %s failed isolation "+ + "check", consensushashing.TransactionID(tx)) + } + } + + return nil +} + +func (v *blockValidator) checkBlockHashMerkleRoot(block *externalapi.DomainBlock) error { + calculatedHashMerkleRoot := merkle.CalculateHashMerkleRoot(block.Transactions) + if !block.Header.HashMerkleRoot().Equal(calculatedHashMerkleRoot) { + return errors.Wrapf(ruleerrors.ErrBadMerkleRoot, "block hash merkle root is invalid - block "+ + "header indicates %s, but calculated value is %s", + block.Header.HashMerkleRoot(), calculatedHashMerkleRoot) + } + return nil +} + +func (v *blockValidator) checkBlockDuplicateTransactions(block *externalapi.DomainBlock) error { + existingTxIDs := make(map[externalapi.DomainTransactionID]struct{}) + for _, tx := range block.Transactions { + id := consensushashing.TransactionID(tx) + if _, exists := existingTxIDs[*id]; exists { + return errors.Wrapf(ruleerrors.ErrDuplicateTx, "block contains duplicate "+ + "transaction %s", id) + } + existingTxIDs[*id] = struct{}{} + } + return nil +} + +func (v *blockValidator) checkBlockDoubleSpends(block *externalapi.DomainBlock) error { + usedOutpoints := make(map[externalapi.DomainOutpoint]*externalapi.DomainTransactionID) + for _, tx := range block.Transactions { + for _, input := range tx.Inputs { + txID := consensushashing.TransactionID(tx) + if spendingTxID, exists := usedOutpoints[input.PreviousOutpoint]; exists { + return errors.Wrapf(ruleerrors.ErrDoubleSpendInSameBlock, "transaction %s spends "+ + "outpoint %s that was already spent by "+ + "transaction %s in this block", txID, + input.PreviousOutpoint, spendingTxID) + } + usedOutpoints[input.PreviousOutpoint] = txID + } + } + return nil +} + +func (v *blockValidator) checkBlockHasNoChainedTransactions(block *externalapi.DomainBlock) error { + + transactions := block.Transactions + transactionsSet := make(map[externalapi.DomainTransactionID]struct{}, len(transactions)) + for _, transaction := range transactions { + txID := consensushashing.TransactionID(transaction) + transactionsSet[*txID] = struct{}{} + } + + for _, transaction := range transactions { + for i, transactionInput := range transaction.Inputs { + if _, ok := transactionsSet[transactionInput.PreviousOutpoint.TransactionID]; ok { + txID := consensushashing.TransactionID(transaction) + return errors.Wrapf(ruleerrors.ErrChainedTransactions, "block contains chained "+ + "transactions: Input %d of transaction %s spend "+ + "an output of transaction %s", i, txID, transactionInput.PreviousOutpoint.TransactionID) + } + } + } + + return nil +} + +func (v *blockValidator) validateGasLimit(block *externalapi.DomainBlock) error { + // TODO: implement this + return nil +} + +func (v *blockValidator) checkBlockMass(block *externalapi.DomainBlock) error { + mass := uint64(0) + for _, transaction := range block.Transactions { + v.transactionValidator.PopulateMass(transaction) + + massBefore := mass + mass += transaction.Mass + if mass > v.maxBlockMass || mass < massBefore { + return errors.Wrapf(ruleerrors.ErrBlockMassTooHigh, "block exceeded the mass limit of %d", + v.maxBlockMass) + } + } + + return nil +} + +func (v *blockValidator) checkNoPrefilledInputs(block *externalapi.DomainBlock) error { + for _, tx := range block.Transactions { + for i, input := range tx.Inputs { + if input.UTXOEntry != nil { + return errors.Errorf("input %d in transaction %s has a prefilled UTXO entry", + i, consensushashing.TransactionID(tx)) + } + } + } + + return nil +} diff --git a/domain/consensus/processes/blockvalidator/block_body_in_isolation_test.go b/domain/consensus/processes/blockvalidator/block_body_in_isolation_test.go new file mode 100644 index 0000000..4642518 --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_body_in_isolation_test.go @@ -0,0 +1,1307 @@ +package blockvalidator_test + +import ( + "bytes" + "math" + "math/big" + "reflect" + "runtime" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +func TestBlockValidator_ValidateBodyInIsolation(t *testing.T) { + tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){ + CheckBlockSanity, + CheckBlockHashMerkleRoot, + BlockMass, + CheckBlockDuplicateTransactions, + CheckBlockContainsOnlyOneCoinbase, + CheckBlockDoubleSpends, + CheckFirstBlockTransactionIsCoinbase, + } + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestChainedTransactions") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + for _, test := range tests { + testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name() + t.Run(testName, func(t *testing.T) { + test(t, tc, consensusConfig) + }) + } + }) +} + +func TestChainedTransactions(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestChainedTransactions") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + fundingBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block1, _, err := tc.GetBlock(block1Hash) + if err != nil { + t.Fatalf("Error getting block1: %+v", err) + } + + tx1, err := testutils.CreateTransaction(block1.Transactions[0], 1) + if err != nil { + t.Fatalf("Error creating tx1: %+v", err) + } + + chainedTx, err := testutils.CreateTransaction(tx1, 1) + if err != nil { + t.Fatalf("Error creating chainedTx: %+v", err) + } + + // Check that a block is invalid if it contains chained transactions + _, _, err = tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, + []*externalapi.DomainTransaction{tx1, chainedTx}) + if !errors.Is(err, ruleerrors.ErrChainedTransactions) { + t.Fatalf("unexpected error %+v", err) + } + + block2Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, nil, nil) + if err != nil { + t.Fatalf("unexpected error %+v", err) + } + + block2, _, err := tc.GetBlock(block2Hash) + if err != nil { + t.Fatalf("Error getting block2: %+v", err) + } + + tx2, err := testutils.CreateTransaction(block2.Transactions[0], 1) + if err != nil { + t.Fatalf("Error creating tx2: %+v", err) + } + + // Check that a block is valid if it contains two non chained transactions + _, _, err = tc.AddBlock([]*externalapi.DomainHash{block2Hash}, nil, + []*externalapi.DomainTransaction{tx1, tx2}) + if err != nil { + t.Fatalf("unexpected error %+v", err) + } + }) +} + +// CheckBlockSanity tests the CheckBlockSanity function to ensure it works +// as expected. +func CheckBlockSanity(t *testing.T, tc testapi.TestConsensus, _ *consensus.Config) { + blockHash := consensushashing.BlockHash(&exampleValidBlock) + if len(exampleValidBlock.Transactions) < 3 { + t.Fatalf("Too few transactions in block, expect at least 3, got %v", len(exampleValidBlock.Transactions)) + } + + stagingArea := model.NewStagingArea() + + tc.BlockStore().Stage(stagingArea, blockHash, &exampleValidBlock) + + err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err != nil { + t.Fatalf("Failed validating block in isolation: %v", err) + } + + // Test with block with wrong transactions sorting order + blockHash = consensushashing.BlockHash(&blockWithWrongTxOrder) + tc.BlockStore().Stage(stagingArea, blockHash, &blockWithWrongTxOrder) + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if !errors.Is(err, ruleerrors.ErrTransactionsNotSorted) { + t.Errorf("CheckBlockSanity: Expected ErrTransactionsNotSorted error, instead got %v", err) + } + + // Test a block with invalid parents order + // We no longer require blocks to have ordered parents + blockHash = consensushashing.BlockHash(&unOrderedParentsBlock) + tc.BlockStore().Stage(stagingArea, blockHash, &unOrderedParentsBlock) + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err != nil { + t.Errorf("CheckBlockSanity: Expected block to be be body in isolation valid, got error instead: %v", err) + } +} + +var unOrderedParentsBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0x00000000, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, + 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, + 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, + 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + }}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x7e, 0xe2, 0x10, 0x4e, 0x21, 0x2f, 0x2a, 0xb1, + 0x7d, 0x22, 0xf5, 0xe8, 0xa0, 0x98, 0xef, 0x53, + 0x83, 0xae, 0x59, 0x1f, 0x83, 0xf3, 0x78, 0x5d, + 0x30, 0xae, 0x3e, 0xb3, 0x06, 0x08, 0x6f, 0x79, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x80, 0xf7, 0x00, 0xe3, 0x16, 0x3d, 0x04, 0x95, + 0x5b, 0x7e, 0xaf, 0x84, 0x7e, 0x1b, 0x6b, 0x06, + 0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25, + 0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x80, 0xf7, 0x00, 0xe3, 0x16, 0x3d, 0x04, 0x95, + 0x5b, 0x7e, 0xaf, 0x84, 0x7e, 0x1b, 0x6b, 0x06, + 0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25, + 0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2, + }), + 0x5cd18053000, + 0x207fffff, + 0x1, + 0, + 9, + big.NewInt(0), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }), + ), + Transactions: []*externalapi.DomainTransaction{ + { + Version: 0, + Inputs: nil, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x12a05f200, // 5000000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x51, + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDCoinbase, + Payload: []byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x2123e300, // 556000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x108e20f00, // 4444000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, + 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, + 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, + 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, + }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 + Index: 1, + }, + SignatureScript: []byte{ + 0x47, // OP_DATA_71 + 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, + 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, + 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, + 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, + 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, + 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, + 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, + 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, + 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, + 0x41, // OP_DATA_65 + 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, + 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, + 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, + 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, + 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, + 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, + 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, + 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, + 0x0f, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, + 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, + 0xad, 0xbe, 0x7e, 0x10, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x11d260c0, // 299000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, + 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, + 0xb3, 0x40, 0x9c, 0xd9, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, + 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, + 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, + 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, + }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, + 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, + 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, + 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, + 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, + 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, + 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, + 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, + 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, + 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, + 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, + 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, + 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, + 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, + 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, + 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, + 0xbb, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, + 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, + 0xf2, 0xeb, 0x9e, 0xe0, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + }, +} + +// exampleValidBlock defines a sample valid block +var exampleValidBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0x00000000, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, + 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, + 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, + 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, + }), + }}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, + 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, + 0x0a, 0xa6, 0xd5, 0x38, 0x97, 0x4d, 0xa5, 0x0f, + 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x8a, 0xb7, 0xd6, 0x73, 0x1b, 0xe6, 0xc5, 0xd3, + 0x5d, 0x4e, 0x2c, 0xc9, 0x57, 0x88, 0x30, 0x65, + 0x81, 0xb8, 0xa0, 0x68, 0x77, 0xc4, 0x02, 0x1e, + 0x3c, 0xb1, 0x16, 0x8f, 0x5f, 0x6b, 0x45, 0x87, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{}), + 0x17305aa654a, + 0x207fffff, + 1, + 0, + 9, + big.NewInt(0), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }), + ), + Transactions: []*externalapi.DomainTransaction{ + { + Version: 0, + Inputs: nil, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x12a05f200, // 5000000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, + 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, + 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDCoinbase, + Payload: []byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + }, + Sequence: math.MaxUint64, + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, + 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, + 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, + 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + }, + Sequence: math.MaxUint64, + }, + }, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x2123e300, // 556000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x108e20f00, // 4444000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, + 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, + 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, + 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, + }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 + Index: 1, + }, + SignatureScript: []byte{ + 0x47, // OP_DATA_71 + 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, + 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, + 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, + 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, + 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, + 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, + 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, + 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, + 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, + 0x41, // OP_DATA_65 + 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, + 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, + 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, + 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, + 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, + 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, + 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, + 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, + 0x0f, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, + 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, + 0xad, 0xbe, 0x7e, 0x10, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x11d260c0, // 299000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, + 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, + 0xb3, 0x40, 0x9c, 0xd9, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, + 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, + 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, + 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, + }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, + 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, + 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, + 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, + 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, + 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, + 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, + 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, + 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, + 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, + 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, + 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, + 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, + 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, + 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, + 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, + 0xbb, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, + 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, + 0xf2, 0xeb, 0x9e, 0xe0, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + }, +} + +// blockWithWrongTxOrder defines invalid block 100,000 of the block DAG. +var blockWithWrongTxOrder = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, + 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, + 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, + 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, + }), + }}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xd5, 0xd2, 0x32, 0xe4, 0xbe, 0x9c, 0x33, 0xbd, + 0xf1, 0x0a, 0xd2, 0x9d, 0x0c, 0xbd, 0xe5, 0xae, + 0xcb, 0x1a, 0xf9, 0x5a, 0x3e, 0xfb, 0xf3, 0xc7, + 0x2b, 0x4d, 0x10, 0xa6, 0xbd, 0x5f, 0x07, 0xe7, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xa0, 0x69, 0x2d, 0x16, 0xb5, 0xd7, 0xe4, 0xf3, + 0xcd, 0xc7, 0xc9, 0xaf, 0xfb, 0xd2, 0x1b, 0x85, + 0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90, + 0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e, + }), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x69, 0x2d, 0x16, 0xb5, 0xd7, 0xe4, 0xf3, + 0xcd, 0xc7, 0xc9, 0xaf, 0xfb, 0xd2, 0x1b, 0x85, + 0x0b, 0x79, 0xf5, 0x29, 0x6d, 0x1c, 0xaa, 0x90, + 0x2f, 0x01, 0xd4, 0x83, 0x9b, 0x2a, 0x04, 0x5e, + }), + 0x5cd16eaa000, + 0x207fffff, + 1, + 0, + 9, + big.NewInt(0), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }), + ), + Transactions: []*externalapi.DomainTransaction{ + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x9b, 0x22, 0x59, 0x44, 0x66, 0xf0, 0xbe, 0x50, + 0x7c, 0x1c, 0x8a, 0xf6, 0x06, 0x27, 0xe6, 0x33, + 0x38, 0x7e, 0xd1, 0xd5, 0x8c, 0x42, 0x59, 0x1a, + 0x31, 0xac, 0x9a, 0xa6, 0x2e, 0xd5, 0x2b, 0x0f, + }), + Index: 0xffffffff, + }, + SignatureScript: nil, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x12a05f200, // 5000000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0xa9, 0x14, 0xda, 0x17, 0x45, 0xe9, 0xb5, 0x49, + 0xbd, 0x0b, 0xfa, 0x1a, 0x56, 0x99, 0x71, 0xc7, + 0x7e, 0xba, 0x30, 0xcd, 0x5a, 0x4b, 0x87, + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDCoinbase, + Payload: []byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + }, + Sequence: math.MaxUint64, + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x4b, 0xb0, 0x75, 0x35, 0xdf, 0xd5, 0x8e, 0x0b, + 0x3c, 0xd6, 0x4f, 0xd7, 0x15, 0x52, 0x80, 0x87, + 0x2a, 0x04, 0x71, 0xbc, 0xf8, 0x30, 0x95, 0x52, + 0x6a, 0xce, 0x0e, 0x38, 0xc6, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + }, + Sequence: math.MaxUint64, + }, + }, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x03, 0x2e, 0x38, 0xe9, 0xc0, 0xa8, 0x4c, 0x60, + 0x46, 0xd6, 0x87, 0xd1, 0x05, 0x56, 0xdc, 0xac, + 0xc4, 0x1d, 0x27, 0x5e, 0xc5, 0x5f, 0xc0, 0x07, + 0x79, 0xac, 0x88, 0xfd, 0xf3, 0x57, 0xa1, 0x87, + }), // 87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03 + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xc3, 0x52, 0xd3, + 0xdd, 0x99, 0x3a, 0x98, 0x1b, 0xeb, 0xa4, 0xa6, + 0x3a, 0xd1, 0x5c, 0x20, 0x92, 0x75, 0xca, 0x94, + 0x70, 0xab, 0xfc, 0xd5, 0x7d, 0xa9, 0x3b, 0x58, + 0xe4, 0xeb, 0x5d, 0xce, 0x82, 0x02, 0x21, 0x00, + 0x84, 0x07, 0x92, 0xbc, 0x1f, 0x45, 0x60, 0x62, + 0x81, 0x9f, 0x15, 0xd3, 0x3e, 0xe7, 0x05, 0x5c, + 0xf7, 0xb5, 0xee, 0x1a, 0xf1, 0xeb, 0xcc, 0x60, + 0x28, 0xd9, 0xcd, 0xb1, 0xc3, 0xaf, 0x77, 0x48, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xf4, 0x6d, 0xb5, 0xe9, 0xd6, 0x1a, 0x9d, + 0xc2, 0x7b, 0x8d, 0x64, 0xad, 0x23, 0xe7, 0x38, + 0x3a, 0x4e, 0x6c, 0xa1, 0x64, 0x59, 0x3c, 0x25, + 0x27, 0xc0, 0x38, 0xc0, 0x85, 0x7e, 0xb6, 0x7e, + 0xe8, 0xe8, 0x25, 0xdc, 0xa6, 0x50, 0x46, 0xb8, + 0x2c, 0x93, 0x31, 0x58, 0x6c, 0x82, 0xe0, 0xfd, + 0x1f, 0x63, 0x3f, 0x25, 0xf8, 0x7c, 0x16, 0x1b, + 0xc6, 0xf8, 0xa6, 0x30, 0x12, 0x1d, 0xf2, 0xb3, + 0xd3, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0x2123e300, // 556000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xc3, 0x98, 0xef, 0xa9, 0xc3, 0x92, 0xba, 0x60, + 0x13, 0xc5, 0xe0, 0x4e, 0xe7, 0x29, 0x75, 0x5e, + 0xf7, 0xf5, 0x8b, 0x32, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x108e20f00, // 4444000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x94, 0x8c, 0x76, 0x5a, 0x69, 0x14, 0xd4, 0x3f, + 0x2a, 0x7a, 0xc1, 0x77, 0xda, 0x2c, 0x2f, 0x6b, + 0x52, 0xde, 0x3d, 0x7c, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: externalapi.DomainSubnetworkID{11}, + Payload: []byte{}, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc3, 0x3e, 0xbf, 0xf2, 0xa7, 0x09, 0xf1, 0x3d, + 0x9f, 0x9a, 0x75, 0x69, 0xab, 0x16, 0xa3, 0x27, + 0x86, 0xaf, 0x7d, 0x7e, 0x2d, 0xe0, 0x92, 0x65, + 0xe4, 0x1c, 0x61, 0xd0, 0x78, 0x29, 0x4e, 0xcf, + }), // cf4e2978d0611ce46592e02d7e7daf8627a316ab69759a9f3df109a7f2bf3ec3 + Index: 1, + }, + SignatureScript: []byte{ + 0x47, // OP_DATA_71 + 0x30, 0x44, 0x02, 0x20, 0x03, 0x2d, 0x30, 0xdf, + 0x5e, 0xe6, 0xf5, 0x7f, 0xa4, 0x6c, 0xdd, 0xb5, + 0xeb, 0x8d, 0x0d, 0x9f, 0xe8, 0xde, 0x6b, 0x34, + 0x2d, 0x27, 0x94, 0x2a, 0xe9, 0x0a, 0x32, 0x31, + 0xe0, 0xba, 0x33, 0x3e, 0x02, 0x20, 0x3d, 0xee, + 0xe8, 0x06, 0x0f, 0xdc, 0x70, 0x23, 0x0a, 0x7f, + 0x5b, 0x4a, 0xd7, 0xd7, 0xbc, 0x3e, 0x62, 0x8c, + 0xbe, 0x21, 0x9a, 0x88, 0x6b, 0x84, 0x26, 0x9e, + 0xae, 0xb8, 0x1e, 0x26, 0xb4, 0xfe, 0x01, + 0x41, // OP_DATA_65 + 0x04, 0xae, 0x31, 0xc3, 0x1b, 0xf9, 0x12, 0x78, + 0xd9, 0x9b, 0x83, 0x77, 0xa3, 0x5b, 0xbc, 0xe5, + 0xb2, 0x7d, 0x9f, 0xff, 0x15, 0x45, 0x68, 0x39, + 0xe9, 0x19, 0x45, 0x3f, 0xc7, 0xb3, 0xf7, 0x21, + 0xf0, 0xba, 0x40, 0x3f, 0xf9, 0x6c, 0x9d, 0xee, + 0xb6, 0x80, 0xe5, 0xfd, 0x34, 0x1c, 0x0f, 0xc3, + 0xa7, 0xb9, 0x0d, 0xa4, 0x63, 0x1e, 0xe3, 0x95, + 0x60, 0x63, 0x9d, 0xb4, 0x62, 0xe9, 0xcb, 0x85, + 0x0f, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0xb0, 0xdc, 0xbf, 0x97, 0xea, 0xbf, 0x44, 0x04, + 0xe3, 0x1d, 0x95, 0x24, 0x77, 0xce, 0x82, 0x2d, + 0xad, 0xbe, 0x7e, 0x10, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + { + Value: 0x11d260c0, // 299000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x6b, 0x12, 0x81, 0xee, 0xc2, 0x5a, 0xb4, 0xe1, + 0xe0, 0x79, 0x3f, 0xf4, 0xe0, 0x8a, 0xb1, 0xab, + 0xb3, 0x40, 0x9c, 0xd9, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + { + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x0b, 0x60, 0x72, 0xb3, 0x86, 0xd4, 0xa7, 0x73, + 0x23, 0x52, 0x37, 0xf6, 0x4c, 0x11, 0x26, 0xac, + 0x3b, 0x24, 0x0c, 0x84, 0xb9, 0x17, 0xa3, 0x90, + 0x9b, 0xa1, 0xc4, 0x3d, 0xed, 0x5f, 0x51, 0xf4, + }), // f4515fed3dc4a19b90a317b9840c243bac26114cf637522373a7d486b372600b + Index: 0, + }, + SignatureScript: []byte{ + 0x49, // OP_DATA_73 + 0x30, 0x46, 0x02, 0x21, 0x00, 0xbb, 0x1a, 0xd2, + 0x6d, 0xf9, 0x30, 0xa5, 0x1c, 0xce, 0x11, 0x0c, + 0xf4, 0x4f, 0x7a, 0x48, 0xc3, 0xc5, 0x61, 0xfd, + 0x97, 0x75, 0x00, 0xb1, 0xae, 0x5d, 0x6b, 0x6f, + 0xd1, 0x3d, 0x0b, 0x3f, 0x4a, 0x02, 0x21, 0x00, + 0xc5, 0xb4, 0x29, 0x51, 0xac, 0xed, 0xff, 0x14, + 0xab, 0xba, 0x27, 0x36, 0xfd, 0x57, 0x4b, 0xdb, + 0x46, 0x5f, 0x3e, 0x6f, 0x8d, 0xa1, 0x2e, 0x2c, + 0x53, 0x03, 0x95, 0x4a, 0xca, 0x7f, 0x78, 0xf3, + 0x01, // 73-byte signature + 0x41, // OP_DATA_65 + 0x04, 0xa7, 0x13, 0x5b, 0xfe, 0x82, 0x4c, 0x97, + 0xec, 0xc0, 0x1e, 0xc7, 0xd7, 0xe3, 0x36, 0x18, + 0x5c, 0x81, 0xe2, 0xaa, 0x2c, 0x41, 0xab, 0x17, + 0x54, 0x07, 0xc0, 0x94, 0x84, 0xce, 0x96, 0x94, + 0xb4, 0x49, 0x53, 0xfc, 0xb7, 0x51, 0x20, 0x65, + 0x64, 0xa9, 0xc2, 0x4d, 0xd0, 0x94, 0xd4, 0x2f, + 0xdb, 0xfd, 0xd5, 0xaa, 0xd3, 0xe0, 0x63, 0xce, + 0x6a, 0xf4, 0xcf, 0xaa, 0xea, 0x4e, 0xa1, 0x4f, + 0xbb, // 65-byte pubkey + }, + Sequence: math.MaxUint64, + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{ + { + Value: 0xf4240, // 1000000 + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{ + 0x76, // OP_DUP + 0xa9, // OP_HASH160 + 0x14, // OP_DATA_20 + 0x39, 0xaa, 0x3d, 0x56, 0x9e, 0x06, 0xa1, 0xd7, + 0x92, 0x6d, 0xc4, 0xbe, 0x11, 0x93, 0xc9, 0x9b, + 0xf2, 0xeb, 0x9e, 0xe0, + 0x88, // OP_EQUALVERIFY + 0xac, // OP_CHECKSIG + }, Version: 0}, + }, + }, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + }, + }, +} + +func CheckBlockHashMerkleRoot(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + blockWithInvalidMerkleRoot := block.Clone() + blockWithInvalidMerkleRoot.Transactions[0].Version += 1 + + err = tc.ValidateAndInsertBlock(blockWithInvalidMerkleRoot, true) + if !errors.Is(err, ruleerrors.ErrBadMerkleRoot) { + t.Fatalf("Unexpected error: %+v", err) + } + + // Check that a block with invalid merkle root is not marked as invalid + // and can be re-added with the right transactions. + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } +} + +func BlockMass(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := initBlockWithInvalidBlockMass(consensusConfig, tc) + if err != nil { + t.Fatalf("Error BuildBlockWithParents : %+v", err) + } + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err == nil || !errors.Is(err, ruleerrors.ErrBlockMassTooHigh) { + t.Fatalf("ValidateBodyInIsolationTest: TestBlockMass:"+ + " Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrBlockMassTooHigh, err) + } +} + +func initBlockWithInvalidBlockMass(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) { + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + bigSignatureScript := bytes.Repeat([]byte("01"), 500000) + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bigSignatureScript, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 10, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, + &externalapi.ScriptPublicKey{}, + true, + uint64(5)), + } + tx := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + Payload: []byte{}, + } + + return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx}) +} + +func CheckBlockDuplicateTransactions(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := initBlockWithDuplicateTransaction(consensusConfig, tc) + if err != nil { + t.Fatalf("Error BuildBlockWithParents : %+v", err) + } + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err == nil || !errors.Is(err, ruleerrors.ErrDuplicateTx) { + t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDuplicateTransactions:"+ + " Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDuplicateTx, err) + } +} + +func initBlockWithDuplicateTransaction(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) { + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bytes.Repeat([]byte("01"), 10), + Sequence: constants.MaxTxInSequenceNum, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, + &externalapi.ScriptPublicKey{}, + true, + uint64(5)), + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + SubnetworkID: subnetworks.SubnetworkIDNative, + } + + return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx, tx}) +} + +func CheckBlockContainsOnlyOneCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := initBlockWithMoreThanOneCoinbase(consensusConfig, tc) + if err != nil { + t.Fatalf("Error BuildBlockWithParents : %+v", err) + } + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err == nil || !errors.Is(err, ruleerrors.ErrMultipleCoinbases) { + t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockContainsOnlyOneCoinbase:"+ + " Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrMultipleCoinbases, err) + } +} + +func initBlockWithMoreThanOneCoinbase(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) { + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bytes.Repeat([]byte("01"), 10), + Sequence: constants.MaxTxInSequenceNum, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, + &externalapi.ScriptPublicKey{}, + true, + uint64(5)), + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + SubnetworkID: subnetworks.SubnetworkIDCoinbase, + } + + return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, []*externalapi.DomainTransaction{tx}) +} + +func CheckBlockDoubleSpends(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := initBlockWithDoubleSpends(consensusConfig, tc) + if err != nil { + t.Fatalf("Error BuildBlockWithParents : %+v", err) + } + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + + err = tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err == nil || !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) { + t.Fatalf("ValidateBodyInIsolationTest: TestCheckBlockDoubleSpends:"+ + " Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrDoubleSpendInSameBlock, err) + } +} + +func initBlockWithDoubleSpends(consensusConfig *consensus.Config, tc testapi.TestConsensus) (*externalapi.DomainBlock, externalapi.UTXODiff, error) { + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bytes.Repeat([]byte("01"), 10), + Sequence: constants.MaxTxInSequenceNum, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, + &externalapi.ScriptPublicKey{}, + true, + uint64(5)), + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + SubnetworkID: subnetworks.SubnetworkIDNative, + } + txInputSameOutpoint := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bytes.Repeat([]byte("02"), 10), + Sequence: constants.MaxTxInSequenceNum, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, + &externalapi.ScriptPublicKey{}, + true, + uint64(4)), + } + txSameOutpoint := &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{&txInputSameOutpoint}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + SubnetworkID: subnetworks.SubnetworkIDNative, + } + + return tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, + &emptyCoinbase, []*externalapi.DomainTransaction{tx, txSameOutpoint}) +} + +func CheckFirstBlockTransactionIsCoinbase(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + + block := initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig) + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + + err := tc.BlockValidator().ValidateBodyInIsolation(stagingArea, blockHash) + if err == nil || !errors.Is(err, ruleerrors.ErrFirstTxNotCoinbase) { + t.Fatalf("ValidateBodyInIsolationTest: TestCheckFirstBlockTransactionIsCoinbase:"+ + " Unexpected error: Expected to: %v, but got : %v", ruleerrors.ErrFirstTxNotCoinbase, err) + } +} + +func initBlockWithFirstTransactionDifferentThanCoinbase(consensusConfig *consensus.Config) *externalapi.DomainBlock { + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: bytes.Repeat([]byte("01"), 10), + Sequence: constants.MaxTxInSequenceNum, + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{{uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 2}, Version: 0}}, {uint64(0xFFFF), + &externalapi.ScriptPublicKey{Script: []byte{1, 3}, Version: 0}}}, + SubnetworkID: subnetworks.SubnetworkIDNative, + } + + return &externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + constants.BlockVersion, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{consensusConfig.GenesisHash}}, + merkle.CalculateHashMerkleRoot([]*externalapi.DomainTransaction{tx}), + &externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x80, 0xf7, 0x00, 0xe3, 0x16, 0x3d, 0x04, 0x95, + 0x5b, 0x7e, 0xaf, 0x84, 0x7e, 0x1b, 0x6b, 0x06, + 0x4e, 0x06, 0xba, 0x64, 0xd7, 0x61, 0xda, 0x25, + 0x1a, 0x0e, 0x21, 0xd4, 0x64, 0x49, 0x02, 0xa2, + }), + 0x5cd18053000, + 0x207fffff, + 0x1, + 0, + 0, + big.NewInt(0), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + })), + Transactions: []*externalapi.DomainTransaction{tx}, + } +} diff --git a/domain/consensus/processes/blockvalidator/block_header_in_context.go b/domain/consensus/processes/blockvalidator/block_header_in_context.go new file mode 100644 index 0000000..d86228d --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_header_in_context.go @@ -0,0 +1,246 @@ +package blockvalidator + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// ValidateHeaderInContext validates block headers in the context of the current +// consensus state +func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateHeaderInContext") + defer onEnd() + + header, err := v.blockHeaderStore.BlockHeader(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + hasValidatedHeader, err := v.hasValidatedHeader(stagingArea, blockHash) + if err != nil { + return err + } + + if !hasValidatedHeader { + var logErr error + log.Debug(logger.NewLogClosure(func() string { + var ghostdagData *externalapi.BlockGHOSTDAGData + ghostdagData, logErr = v.ghostdagDataStores[0].Get(v.databaseContext, stagingArea, blockHash, false) + if err != nil { + return "" + } + + return fmt.Sprintf("block %s blue score is %d", blockHash, ghostdagData.BlueScore()) + })) + + if logErr != nil { + return logErr + } + } + + err = v.validateMedianTime(stagingArea, header) + if err != nil { + return err + } + + err = v.checkMergeSizeLimit(stagingArea, blockHash) + if err != nil { + return err + } + + // If needed - calculate reachability data right before calling CheckBoundedMergeDepth, + // since it's used to find a block's finality point. + // This might not be required if this block's header has previously been received during + // headers-first synchronization. + hasReachabilityData, err := v.reachabilityStore.HasReachabilityData(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + if !hasReachabilityData { + err = v.reachabilityManager.AddBlock(stagingArea, blockHash) + if err != nil { + return err + } + } + + if !isBlockWithTrustedData { + err = v.checkIndirectParents(stagingArea, header) + if err != nil { + return err + } + } + + err = v.mergeDepthManager.CheckBoundedMergeDepth(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + + err = v.checkDAAScore(stagingArea, blockHash, header) + if err != nil { + return err + } + + err = v.checkBlueWork(stagingArea, blockHash, header) + if err != nil { + return err + } + + err = v.checkHeaderBlueScore(stagingArea, blockHash, header) + if err != nil { + return err + } + + if !isBlockWithTrustedData { + err = v.validateHeaderPruningPoint(stagingArea, blockHash) + if err != nil { + return err + } + } + + return nil +} + +func (v *blockValidator) hasValidatedHeader(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + exists, err := v.blockStatusStore.Exists(v.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + if !exists { + return false, nil + } + + status, err := v.blockStatusStore.Get(v.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + return status == externalapi.StatusHeaderOnly, nil +} + +// checkParentsIncest validates that no parent is an ancestor of another parent +func (v *blockValidator) checkParentsIncest(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + parents, err := v.dagTopologyManagers[0].Parents(stagingArea, blockHash) + if err != nil { + return err + } + + for _, parentA := range parents { + for _, parentB := range parents { + if parentA.Equal(parentB) { + continue + } + + isAAncestorOfB, err := v.dagTopologyManagers[0].IsAncestorOf(stagingArea, parentA, parentB) + if err != nil { + return err + } + + if isAAncestorOfB { + return errors.Wrapf(ruleerrors.ErrInvalidParentsRelation, "parent %s is an "+ + "ancestor of another parent %s", + parentA, + parentB, + ) + } + } + } + return nil +} + +func (v *blockValidator) validateMedianTime(stagingArea *model.StagingArea, header externalapi.BlockHeader) error { + if len(header.DirectParents()) == 0 { + return nil + } + + // Ensure the timestamp for the block header is not before the + // median time of the last several blocks (medianTimeBlocks). + hash := consensushashing.HeaderHash(header) + pastMedianTime, err := v.pastMedianTimeManager.PastMedianTime(stagingArea, hash) + if err != nil { + return err + } + + if header.TimeInMilliseconds() <= pastMedianTime { + return errors.Wrapf(ruleerrors.ErrTimeTooOld, "block timestamp of %d is not after expected %d", + header.TimeInMilliseconds(), pastMedianTime) + } + + return nil +} + +func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, hash *externalapi.DomainHash) error { + ghostdagData, err := v.ghostdagDataStores[0].Get(v.databaseContext, stagingArea, hash, false) + if err != nil { + return err + } + + mergeSetSize := len(ghostdagData.MergeSetBlues()) + len(ghostdagData.MergeSetReds()) + + if uint64(mergeSetSize) > v.mergeSetSizeLimit { + return errors.Wrapf(ruleerrors.ErrViolatingMergeLimit, + "The block merges %d blocks > %d merge set size limit", mergeSetSize, v.mergeSetSizeLimit) + } + + return nil +} + +func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error { + expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DAAScore(), header.DirectParents()) + if err != nil { + return err + } + + areParentsEqual := externalapi.ParentsEqual(header.Parents(), expectedParents) + if !areParentsEqual { + return errors.Wrapf(ruleerrors.ErrUnexpectedParents, "unexpected indirect block parents") + } + return nil +} + +func (v *blockValidator) checkDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + header externalapi.BlockHeader) error { + + expectedDAAScore, err := v.daaBlocksStore.DAAScore(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + if header.DAAScore() != expectedDAAScore { + return errors.Wrapf(ruleerrors.ErrUnexpectedDAAScore, "block DAA score of %d is not the expected value of %d", header.DAAScore(), expectedDAAScore) + } + return nil +} + +func (v *blockValidator) checkBlueWork(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + header externalapi.BlockHeader) error { + + ghostdagData, err := v.ghostdagDataStores[0].Get(v.databaseContext, stagingArea, blockHash, false) + if err != nil { + return err + } + expectedBlueWork := ghostdagData.BlueWork() + if header.BlueWork().Cmp(expectedBlueWork) != 0 { + return errors.Wrapf(ruleerrors.ErrUnexpectedBlueWork, "block blue work of %d is not the expected value of %d", header.BlueWork(), expectedBlueWork) + } + return nil +} + +func (v *blockValidator) checkHeaderBlueScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + header externalapi.BlockHeader) error { + + ghostdagData, err := v.ghostdagDataStores[0].Get(v.databaseContext, stagingArea, blockHash, false) + if err != nil { + return err + } + if header.BlueScore() != ghostdagData.BlueScore() { + return errors.Wrapf(ruleerrors.ErrUnexpectedBlueWork, "block blue work of %d is not the expected "+ + "value of %d", header.BlueWork(), ghostdagData.BlueScore()) + } + return nil +} diff --git a/domain/consensus/processes/blockvalidator/block_header_in_context_test.go b/domain/consensus/processes/blockvalidator/block_header_in_context_test.go new file mode 100644 index 0000000..a8f5064 --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_header_in_context_test.go @@ -0,0 +1,254 @@ +package blockvalidator_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestValidateMedianTime(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestValidateMedianTime") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + addBlock := func(blockTime int64, parents []*externalapi.DomainHash, expectedErr error) (*externalapi.DomainBlock, *externalapi.DomainHash) { + block, _, err := tc.BuildBlockWithParents(parents, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + newHeader := block.Header.ToMutable() + newHeader.SetTimeInMilliseconds(blockTime) + block.Header = newHeader.ToImmutable() + err = tc.ValidateAndInsertBlock(block, true) + if !errors.Is(err, expectedErr) { + t.Fatalf("expected error %s but got %+v", expectedErr, err) + } + + return block, consensushashing.BlockHash(block) + } + + pastMedianTime := func(parents ...*externalapi.DomainHash) int64 { + stagingArea := model.NewStagingArea() + var tempHash externalapi.DomainHash + tc.BlockRelationStore().StageBlockRelation(stagingArea, &tempHash, &model.BlockRelations{ + Parents: parents, + Children: nil, + }) + + err = tc.GHOSTDAGManager().GHOSTDAG(stagingArea, &tempHash) + if err != nil { + t.Fatalf("GHOSTDAG: %+v", err) + } + + pastMedianTime, err := tc.PastMedianTimeManager().PastMedianTime(stagingArea, &tempHash) + if err != nil { + t.Fatalf("PastMedianTime: %+v", err) + } + + return pastMedianTime + } + + tip := consensusConfig.GenesisBlock + tipHash := consensusConfig.GenesisHash + + blockTime := tip.Header.TimeInMilliseconds() + + for i := 0; i < 10; i++ { + blockTime += 100 + _, tipHash = addBlock(blockTime, []*externalapi.DomainHash{tipHash}, nil) + } + + // Checks that a block is invalid if it has timestamp equals to past median time + addBlock(pastMedianTime(tipHash), []*externalapi.DomainHash{tipHash}, ruleerrors.ErrTimeTooOld) + + // Checks that a block is valid if its timestamp is after past median time + addBlock(pastMedianTime(tipHash)+1, []*externalapi.DomainHash{tipHash}, nil) + + // Checks that a block is invalid if its timestamp is before past median time + addBlock(pastMedianTime(tipHash)-1, []*externalapi.DomainHash{tipHash}, ruleerrors.ErrTimeTooOld) + }) +} + +func TestCheckParentsIncest(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentsIncest") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + a, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + b, _, err := tc.AddBlock([]*externalapi.DomainHash{a}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + c, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + version := constants.BlockVersion + directParentsRelationBlock := &externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + version, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{a, b}}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + 0, + 0, + 0, + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: nil, + } + + err = tc.ValidateAndInsertBlock(directParentsRelationBlock, true) + if !errors.Is(err, ruleerrors.ErrInvalidParentsRelation) { + t.Fatalf("unexpected error %+v", err) + } + + indirectParentsRelationBlock := &externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + version, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{consensusConfig.GenesisHash, b}}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + 0, + 0, + 0, + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: nil, + } + + err = tc.ValidateAndInsertBlock(indirectParentsRelationBlock, true) + if !errors.Is(err, ruleerrors.ErrInvalidParentsRelation) { + t.Fatalf("unexpected error %+v", err) + } + + // Try to add block with unrelated parents + _, _, err = tc.AddBlock([]*externalapi.DomainHash{b, c}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %s", err) + } + }) +} + +func TestCheckMergeSizeLimit(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.MergeSetSizeLimit = 5 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckMergeSizeLimit") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + chain1TipHash := consensusConfig.GenesisHash + // We add a chain larger by one than chain2 below, to make this one the selected chain + for i := uint64(0); i < consensusConfig.MergeSetSizeLimit+1; i++ { + chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + chain2TipHash := consensusConfig.GenesisHash + // We add a merge set of size exactly MergeSetSizeLimit (to violate the limit), + // since selected parent is also counted + for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ { + chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + _, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash, chain2TipHash}, nil, nil) + if !errors.Is(err, ruleerrors.ErrViolatingMergeLimit) { + t.Fatalf("unexpected error: %+v", err) + } + }) +} + +func TestVirtualSelectionViolatingMergeSizeLimit(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.MergeSetSizeLimit = 2 * uint64(consensusConfig.K) + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestVirtualSelectionViolatingMergeSizeLimit") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + chain1TipHash := consensusConfig.GenesisHash + // We add a chain larger than chain2 below, to make this one the selected chain + for i := uint64(0); i < consensusConfig.MergeSetSizeLimit; i++ { + chain1TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1TipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + chain2TipHash := consensusConfig.GenesisHash + // We add a merge set of size exactly MergeSetSizeLimit-1 (to still not violate the limit) + for i := uint64(0); i < consensusConfig.MergeSetSizeLimit-1; i++ { + chain2TipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2TipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + // We now add a single block over genesis which is expected to exceed the limit + _, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + stagingArea := model.NewStagingArea() + virtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + t.Fatalf("GetVirtualSelectedParent: %+v", err) + } + selectedParentAnticone, err := tc.DAGTraversalManager().AnticoneFromVirtualPOV(stagingArea, virtualSelectedParent) + if err != nil { + t.Fatalf("AnticoneFromVirtualPOV: %+v", err) + } + + // Test if Virtual's mergeset is too large + // Note: the selected parent itself is also counted in the mergeset limit + if len(selectedParentAnticone)+1 > (int)(consensusConfig.MergeSetSizeLimit) { + t.Fatalf("Virtual's mergset size (%d) exeeds merge set limit (%d)", + len(selectedParentAnticone)+1, consensusConfig.MergeSetSizeLimit) + } + }) +} diff --git a/domain/consensus/processes/blockvalidator/block_header_in_isolation.go b/domain/consensus/processes/blockvalidator/block_header_in_isolation.go new file mode 100644 index 0000000..0ad31ee --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_header_in_isolation.go @@ -0,0 +1,78 @@ +package blockvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/mstime" +) + +// ValidateHeaderInIsolation validates block headers in isolation from the current +// consensus state +func (v *blockValidator) ValidateHeaderInIsolation(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidateHeaderInIsolation") + defer onEnd() + + header, err := v.blockHeaderStore.BlockHeader(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + //todo : drop this + //log.Info("blockHash %s - genesisHash %s", blockHash, v.genesisHash) + + if !blockHash.Equal(v.genesisHash) { + err = v.checkBlockVersion(header) + if err != nil { + return err + } + } + + err = v.checkBlockTimestampInIsolation(header) + if err != nil { + return err + } + + err = v.checkParentsLimit(header) + if err != nil { + return err + } + + return nil +} + +func (v *blockValidator) checkParentsLimit(header externalapi.BlockHeader) error { + hash := consensushashing.HeaderHash(header) + if len(header.DirectParents()) == 0 && !hash.Equal(v.genesisHash) { + return errors.Wrapf(ruleerrors.ErrNoParents, "block has no parents") + } + + if uint64(len(header.DirectParents())) > uint64(v.maxBlockParents) { + return errors.Wrapf(ruleerrors.ErrTooManyParents, "block header has %d parents, but the maximum allowed amount "+ + "is %d", len(header.DirectParents()), v.maxBlockParents) + } + return nil +} + +func (v *blockValidator) checkBlockVersion(header externalapi.BlockHeader) error { + if header.Version() != constants.BlockVersion { + return errors.Wrapf( + ruleerrors.ErrWrongBlockVersion, "The block version should be %d", constants.BlockVersion) + } + return nil +} + +func (v *blockValidator) checkBlockTimestampInIsolation(header externalapi.BlockHeader) error { + blockTimestamp := header.TimeInMilliseconds() + now := mstime.Now().UnixMilliseconds() + maxCurrentTime := now + int64(v.timestampDeviationTolerance)*v.targetTimePerBlock.Milliseconds() + if blockTimestamp > maxCurrentTime { + return errors.Wrapf( + ruleerrors.ErrTimeTooMuchInTheFuture, "The block timestamp is in the future.") + } + return nil +} diff --git a/domain/consensus/processes/blockvalidator/block_header_in_isolation_test.go b/domain/consensus/processes/blockvalidator/block_header_in_isolation_test.go new file mode 100644 index 0000000..274de49 --- /dev/null +++ b/domain/consensus/processes/blockvalidator/block_header_in_isolation_test.go @@ -0,0 +1,127 @@ +package blockvalidator_test + +import ( + "reflect" + "runtime" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/util/mstime" +) + +func TestBlockValidator_ValidateHeaderInIsolation(t *testing.T) { + tests := []func(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config){ + CheckParentsLimit, + CheckBlockVersion, + CheckBlockTimestampInIsolation, + } + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestBlockValidator_ValidateHeaderInIsolation") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + for _, test := range tests { + testName := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name() + t.Run(testName, func(t *testing.T) { + test(t, tc, consensusConfig) + }) + } + }) +} + +func CheckParentsLimit(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + for i := externalapi.KType(0); i < consensusConfig.MaxBlockParents+1; i++ { + _, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + tips, err := tc.Tips() + if err != nil { + t.Fatalf("Tips: %+v", err) + } + + _, _, err = tc.AddBlock(tips, nil, nil) + if !errors.Is(err, ruleerrors.ErrTooManyParents) { + t.Fatalf("Unexpected error: %+v", err) + } +} + +func CheckBlockVersion(t *testing.T, tc testapi.TestConsensus, consensusConfig *consensus.Config) { + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + expectedVersion := constants.BlockVersion + block.Header = blockheader.NewImmutableBlockHeader( + expectedVersion+1, + block.Header.Parents(), + block.Header.HashMerkleRoot(), + block.Header.AcceptedIDMerkleRoot(), + block.Header.UTXOCommitment(), + block.Header.TimeInMilliseconds(), + block.Header.Bits(), + block.Header.Nonce(), + block.Header.DAAScore(), + block.Header.BlueScore(), + block.Header.BlueWork(), + block.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(block, true) + if !errors.Is(err, ruleerrors.ErrWrongBlockVersion) { + t.Fatalf("Unexpected error: %+v", err) + } +} + +func CheckBlockTimestampInIsolation(t *testing.T, tc testapi.TestConsensus, cfg *consensus.Config) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckBlockTimestampInIsolation") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + // Give 10 seconds slack to take care of the test duration + timestamp := mstime.Now().UnixMilliseconds() + + int64(consensusConfig.TimestampDeviationTolerance)*consensusConfig.TargetTimePerBlock.Milliseconds() + 10_000 + + block.Header = blockheader.NewImmutableBlockHeader( + block.Header.Version(), + block.Header.Parents(), + block.Header.HashMerkleRoot(), + block.Header.AcceptedIDMerkleRoot(), + block.Header.UTXOCommitment(), + timestamp, + block.Header.Bits(), + block.Header.Nonce(), + block.Header.DAAScore(), + block.Header.BlueScore(), + block.Header.BlueWork(), + block.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(block, true) + if !errors.Is(err, ruleerrors.ErrTimeTooMuchInTheFuture) { + t.Fatalf("Unexpected error: %+v", err) + } + }) +} diff --git a/domain/consensus/processes/blockvalidator/blockvalidator.go b/domain/consensus/processes/blockvalidator/blockvalidator.go new file mode 100644 index 0000000..575be3a --- /dev/null +++ b/domain/consensus/processes/blockvalidator/blockvalidator.go @@ -0,0 +1,135 @@ +package blockvalidator + +import ( + "math/big" + "time" + + "github.com/spectre-project/spectred/util/txmass" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/difficulty" +) + +// blockValidator exposes a set of validation classes, after which +// it's possible to determine whether either a block is valid +type blockValidator struct { + powMax *big.Int + skipPoW bool + genesisHash *externalapi.DomainHash + enableNonNativeSubnetworks bool + powMaxBits uint32 + maxBlockMass uint64 + mergeSetSizeLimit uint64 + maxBlockParents externalapi.KType + timestampDeviationTolerance int + targetTimePerBlock time.Duration + maxBlockLevel int + + databaseContext model.DBReader + difficultyManager model.DifficultyManager + pastMedianTimeManager model.PastMedianTimeManager + transactionValidator model.TransactionValidator + ghostdagManagers []model.GHOSTDAGManager + dagTopologyManagers []model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + coinbaseManager model.CoinbaseManager + mergeDepthManager model.MergeDepthManager + pruningStore model.PruningStore + reachabilityManager model.ReachabilityManager + finalityManager model.FinalityManager + blockParentBuilder model.BlockParentBuilder + pruningManager model.PruningManager + parentsManager model.ParentsManager + + blockStore model.BlockStore + ghostdagDataStores []model.GHOSTDAGDataStore + blockHeaderStore model.BlockHeaderStore + blockStatusStore model.BlockStatusStore + reachabilityStore model.ReachabilityDataStore + consensusStateStore model.ConsensusStateStore + daaBlocksStore model.DAABlocksStore + + txMassCalculator *txmass.Calculator +} + +// New instantiates a new BlockValidator +func New(powMax *big.Int, + skipPoW bool, + genesisHash *externalapi.DomainHash, + enableNonNativeSubnetworks bool, + maxBlockMass uint64, + mergeSetSizeLimit uint64, + maxBlockParents externalapi.KType, + timestampDeviationTolerance int, + targetTimePerBlock time.Duration, + maxBlockLevel int, + + databaseContext model.DBReader, + + difficultyManager model.DifficultyManager, + pastMedianTimeManager model.PastMedianTimeManager, + transactionValidator model.TransactionValidator, + ghostdagManagers []model.GHOSTDAGManager, + dagTopologyManagers []model.DAGTopologyManager, + dagTraversalManager model.DAGTraversalManager, + coinbaseManager model.CoinbaseManager, + mergeDepthManager model.MergeDepthManager, + reachabilityManager model.ReachabilityManager, + finalityManager model.FinalityManager, + blockParentBuilder model.BlockParentBuilder, + pruningManager model.PruningManager, + parentsManager model.ParentsManager, + + pruningStore model.PruningStore, + blockStore model.BlockStore, + ghostdagDataStores []model.GHOSTDAGDataStore, + blockHeaderStore model.BlockHeaderStore, + blockStatusStore model.BlockStatusStore, + reachabilityStore model.ReachabilityDataStore, + consensusStateStore model.ConsensusStateStore, + daaBlocksStore model.DAABlocksStore, + + txMassCalculator *txmass.Calculator, +) model.BlockValidator { + + return &blockValidator{ + powMax: powMax, + skipPoW: skipPoW, + genesisHash: genesisHash, + enableNonNativeSubnetworks: enableNonNativeSubnetworks, + powMaxBits: difficulty.BigToCompact(powMax), + maxBlockMass: maxBlockMass, + mergeSetSizeLimit: mergeSetSizeLimit, + maxBlockParents: maxBlockParents, + maxBlockLevel: maxBlockLevel, + + timestampDeviationTolerance: timestampDeviationTolerance, + targetTimePerBlock: targetTimePerBlock, + databaseContext: databaseContext, + difficultyManager: difficultyManager, + pastMedianTimeManager: pastMedianTimeManager, + transactionValidator: transactionValidator, + ghostdagManagers: ghostdagManagers, + dagTopologyManagers: dagTopologyManagers, + dagTraversalManager: dagTraversalManager, + coinbaseManager: coinbaseManager, + mergeDepthManager: mergeDepthManager, + reachabilityManager: reachabilityManager, + finalityManager: finalityManager, + blockParentBuilder: blockParentBuilder, + pruningManager: pruningManager, + parentsManager: parentsManager, + + pruningStore: pruningStore, + blockStore: blockStore, + ghostdagDataStores: ghostdagDataStores, + blockHeaderStore: blockHeaderStore, + blockStatusStore: blockStatusStore, + reachabilityStore: reachabilityStore, + consensusStateStore: consensusStateStore, + daaBlocksStore: daaBlocksStore, + + txMassCalculator: txMassCalculator, + } +} diff --git a/domain/consensus/processes/blockvalidator/header_pruning_point.go b/domain/consensus/processes/blockvalidator/header_pruning_point.go new file mode 100644 index 0000000..a66405e --- /dev/null +++ b/domain/consensus/processes/blockvalidator/header_pruning_point.go @@ -0,0 +1,30 @@ +package blockvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +func (v *blockValidator) validateHeaderPruningPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + if blockHash.Equal(v.genesisHash) { + return nil + } + + header, err := v.blockHeaderStore.BlockHeader(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + expectedPruningPoint, err := v.pruningManager.ExpectedHeaderPruningPoint(stagingArea, blockHash) + if err != nil { + return err + } + + if !header.PruningPoint().Equal(expectedPruningPoint) { + return errors.Wrapf(ruleerrors.ErrUnexpectedPruningPoint, "block pruning point of %s is not the expected hash of %s", header.PruningPoint(), expectedPruningPoint) + } + + return nil +} diff --git a/domain/consensus/processes/blockvalidator/log.go b/domain/consensus/processes/blockvalidator/log.go new file mode 100644 index 0000000..a4ff1af --- /dev/null +++ b/domain/consensus/processes/blockvalidator/log.go @@ -0,0 +1,7 @@ +package blockvalidator + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BLVA") diff --git a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go new file mode 100644 index 0000000..0ee12be --- /dev/null +++ b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go @@ -0,0 +1,258 @@ +package blockvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" + "github.com/spectre-project/spectred/domain/consensus/utils/virtual" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func (v *blockValidator) ValidatePruningPointViolationAndProofOfWorkAndDifficulty(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error { + + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointViolationAndProofOfWorkAndDifficulty") + defer onEnd() + + header, err := v.blockHeaderStore.BlockHeader(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + err = v.checkParentNotVirtualGenesis(header) + if err != nil { + return err + } + + err = v.checkParentHeadersExist(stagingArea, header, isBlockWithTrustedData) + if err != nil { + return err + } + + err = v.setParents(stagingArea, blockHash, header, isBlockWithTrustedData) + if err != nil { + return err + } + + err = v.checkParentsIncest(stagingArea, blockHash) + if err != nil { + return err + } + + if !isBlockWithTrustedData { + err = v.checkPruningPointViolation(stagingArea, blockHash) + if err != nil { + return err + } + } + + if !blockHash.Equal(v.genesisHash) { + err = v.checkProofOfWork(header) + if err != nil { + return err + } + } + + err = v.validateDifficulty(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + + return nil +} + +func (v *blockValidator) setParents(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + header externalapi.BlockHeader, + isBlockWithTrustedData bool) error { + + for level := 0; level <= header.BlockLevel(v.maxBlockLevel); level++ { + var parents []*externalapi.DomainHash + for _, parent := range v.parentsManager.ParentsAtLevel(header, level) { + _, err := v.ghostdagDataStores[level].Get(v.databaseContext, stagingArea, parent, false) + isNotFoundError := database.IsNotFoundError(err) + if !isNotFoundError && err != nil { + return err + } + + if isNotFoundError { + if level == 0 && !isBlockWithTrustedData { + return errors.Errorf("direct parent %s is missing: only block with prefilled information can have some missing parents", parent) + } + continue + } + + parents = append(parents, parent) + } + + if len(parents) == 0 { + parents = append(parents, model.VirtualGenesisBlockHash) + } + + err := v.dagTopologyManagers[level].SetParents(stagingArea, blockHash, parents) + if err != nil { + return err + } + } + + return nil +} + +func (v *blockValidator) validateDifficulty(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + isBlockWithTrustedData bool) error { + + if !isBlockWithTrustedData { + // We need to calculate GHOSTDAG for the block in order to check its difficulty and blue work + err := v.ghostdagManagers[0].GHOSTDAG(stagingArea, blockHash) + if err != nil { + return err + } + } + + header, err := v.blockHeaderStore.BlockHeader(v.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + blockLevel := header.BlockLevel(v.maxBlockLevel) + for i := 1; i <= blockLevel; i++ { + err = v.ghostdagManagers[i].GHOSTDAG(stagingArea, blockHash) + if err != nil { + return err + } + } + + // Ensure the difficulty specified in the block header matches + // the calculated difficulty based on the previous block and + // difficulty retarget rules. + expectedBits, err := v.difficultyManager.StageDAADataAndReturnRequiredDifficulty(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + + if header.Bits() != expectedBits { + return errors.Wrapf(ruleerrors.ErrUnexpectedDifficulty, "block difficulty of %d is not the expected value of %d", header.Bits(), expectedBits) + } + + return nil +} + +// checkProofOfWork ensures the block header bits which indicate the target +// difficulty is in min/max range and that the block hash is less than the +// target difficulty as claimed. +// +// The flags modify the behavior of this function as follows: +// - BFNoPoWCheck: The check to ensure the block hash is less than the target +// difficulty is not performed. +func (v *blockValidator) checkProofOfWork(header externalapi.BlockHeader) error { + // The target difficulty must be larger than zero. + state := pow.NewState(header.ToMutable()) + target := &state.Target + if target.Sign() <= 0 { + return errors.Wrapf(ruleerrors.ErrNegativeTarget, "block target difficulty of %064x is too low", + target) + } + + // The target difficulty must be less than the maximum allowed. + if target.Cmp(v.powMax) > 0 { + return errors.Wrapf(ruleerrors.ErrTargetTooHigh, "block target difficulty of %064x is "+ + "higher than max of %064x", target, v.powMax) + } + + // The block pow must be valid unless the flag to avoid proof of work checks is set. + if !v.skipPoW { + valid := state.CheckProofOfWork() + if !valid { + return errors.Wrap(ruleerrors.ErrInvalidPoW, "block has invalid proof of work") + } + } + return nil +} + +func (v *blockValidator) checkParentNotVirtualGenesis(header externalapi.BlockHeader) error { + for _, parent := range header.DirectParents() { + if parent.Equal(model.VirtualGenesisBlockHash) { + return errors.Wrapf(ruleerrors.ErrVirtualGenesisParent, "block header cannot have the virtual genesis as parent") + } + } + + return nil +} + +func (v *blockValidator) checkParentHeadersExist(stagingArea *model.StagingArea, + header externalapi.BlockHeader, + isBlockWithTrustedData bool) error { + + if isBlockWithTrustedData { + return nil + } + + missingParentHashes := []*externalapi.DomainHash{} + for _, parent := range header.DirectParents() { + parentHeaderExists, err := v.blockHeaderStore.HasBlockHeader(v.databaseContext, stagingArea, parent) + if err != nil { + return err + } + if !parentHeaderExists { + parentStatus, err := v.blockStatusStore.Get(v.databaseContext, stagingArea, parent) + if err != nil { + if !database.IsNotFoundError(err) { + return err + } + } else if parentStatus == externalapi.StatusInvalid { + return errors.Wrapf(ruleerrors.ErrInvalidAncestorBlock, "parent %s is invalid", parent) + } + + missingParentHashes = append(missingParentHashes, parent) + continue + } + } + + if len(missingParentHashes) > 0 { + return ruleerrors.NewErrMissingParents(missingParentHashes) + } + + return nil +} +func (v *blockValidator) checkPruningPointViolation(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + // check if the pruning point is on past of at least one parent of the header's parents. + + hasPruningPoint, err := v.pruningStore.HasPruningPoint(v.databaseContext, stagingArea) + if err != nil { + return err + } + + //If hasPruningPoint has a false value, it means that it's the genesis - so no violation can exist. + if !hasPruningPoint { + return nil + } + + pruningPoint, err := v.pruningStore.PruningPoint(v.databaseContext, stagingArea) + if err != nil { + return err + } + + parents, err := v.dagTopologyManagers[0].Parents(stagingArea, blockHash) + if err != nil { + return err + } + + if virtual.ContainsOnlyVirtualGenesis(parents) { + return nil + } + + isAncestorOfAny, err := v.dagTopologyManagers[0].IsAncestorOfAny(stagingArea, pruningPoint, parents) + if err != nil { + return err + } + + if !isAncestorOfAny { + return errors.Wrapf(ruleerrors.ErrPruningPointViolation, + "expected pruning point %s to be in block %s past.", pruningPoint, blockHash) + } + return nil +} diff --git a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty_test.go b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty_test.go new file mode 100644 index 0000000..892f8fa --- /dev/null +++ b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty_test.go @@ -0,0 +1,367 @@ +package blockvalidator_test + +import ( + "math" + "math/big" + "math/rand" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/domain/consensus/utils/mining" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/util/difficulty" +) + +// TestPOW tests the validation of the block's POW. +func TestPOW(t *testing.T) { + // We set the flag "skip pow" to be false (second argument in the function) for not skipping the check of POW and validate its correctness. + testutils.ForAllNets(t, false, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPOW") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Builds and checks block with invalid POW. + invalidBlockWrongPOW, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + invalidBlockWrongPOW = solveBlockWithWrongPOW(invalidBlockWrongPOW) + err = tc.ValidateAndInsertBlock(invalidBlockWrongPOW, true) + if !errors.Is(err, ruleerrors.ErrInvalidPoW) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrInvalidPoW, err) + } + + abovePowMaxBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + + abovePowMaxTarget := big.NewInt(0).Add(big.NewInt(1), consensusConfig.PowMax) + abovePowMaxBlock.Header = blockheader.NewImmutableBlockHeader( + abovePowMaxBlock.Header.Version(), + abovePowMaxBlock.Header.Parents(), + abovePowMaxBlock.Header.HashMerkleRoot(), + abovePowMaxBlock.Header.AcceptedIDMerkleRoot(), + abovePowMaxBlock.Header.UTXOCommitment(), + abovePowMaxBlock.Header.TimeInMilliseconds(), + difficulty.BigToCompact(abovePowMaxTarget), + abovePowMaxBlock.Header.Nonce(), + abovePowMaxBlock.Header.DAAScore(), + abovePowMaxBlock.Header.BlueScore(), + abovePowMaxBlock.Header.BlueWork(), + abovePowMaxBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(abovePowMaxBlock, true) + if !errors.Is(err, ruleerrors.ErrTargetTooHigh) { + t.Fatalf("Unexpected error: %+v", err) + } + + negativeTargetBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + + negativeTargetBlock.Header = blockheader.NewImmutableBlockHeader( + negativeTargetBlock.Header.Version(), + negativeTargetBlock.Header.Parents(), + negativeTargetBlock.Header.HashMerkleRoot(), + negativeTargetBlock.Header.AcceptedIDMerkleRoot(), + negativeTargetBlock.Header.UTXOCommitment(), + negativeTargetBlock.Header.TimeInMilliseconds(), + 0x00800000, + negativeTargetBlock.Header.Nonce(), + negativeTargetBlock.Header.DAAScore(), + negativeTargetBlock.Header.BlueScore(), + negativeTargetBlock.Header.BlueWork(), + negativeTargetBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(negativeTargetBlock, true) + if !errors.Is(err, ruleerrors.ErrNegativeTarget) { + t.Fatalf("Unexpected error: %+v", err) + } + + // test on a valid block. + validBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + random := rand.New(rand.NewSource(0)) + // Difficulty is too high on mainnet to actually mine. + if consensusConfig.Name != "spectre-mainnet" { + mining.SolveBlock(validBlock, random) + err = tc.ValidateAndInsertBlock(validBlock, true) + if err != nil { + t.Fatal(err) + } + } + }) +} + +// solveBlockWithWrongPOW increments the given block's nonce until it gets wrong POW (for test!). +func solveBlockWithWrongPOW(block *externalapi.DomainBlock) *externalapi.DomainBlock { + header := block.Header.ToMutable() + state := pow.NewState(header) + for i := uint64(0); i < math.MaxUint64; i++ { + state.Nonce = i + if !state.CheckProofOfWork() { + header.SetNonce(state.Nonce) + block.Header = header.ToImmutable() + return block + } + } + + panic("Failed to solve block! cannot find a invalid POW for the test") +} + +func TestCheckParentHeadersExist(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckParentHeadersExist") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + orphanBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + + parentHash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{}) // Non existing parent hash + orphanBlock.Header = blockheader.NewImmutableBlockHeader( + orphanBlock.Header.Version(), + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + parentHash, + }}, + orphanBlock.Header.HashMerkleRoot(), + orphanBlock.Header.AcceptedIDMerkleRoot(), + orphanBlock.Header.UTXOCommitment(), + orphanBlock.Header.TimeInMilliseconds(), + orphanBlock.Header.Bits(), + orphanBlock.Header.Nonce(), + orphanBlock.Header.DAAScore(), + orphanBlock.Header.BlueScore(), + orphanBlock.Header.BlueWork(), + orphanBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(orphanBlock, true) + errMissingParents := &ruleerrors.ErrMissingParents{} + if !errors.As(err, errMissingParents) { + t.Fatalf("Unexpected error: %+v", err) + } + + if !externalapi.HashesEqual(errMissingParents.MissingParentHashes, []*externalapi.DomainHash{parentHash}) { + t.Fatalf("unexpected missing parents %s", errMissingParents.MissingParentHashes) + } + + invalidBlock, _, err := tc.BuildBlockWithParents( + []*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + + invalidBlock.Transactions[0].Version = constants.MaxTransactionVersion + 1 // This should invalidate the block + invalidBlock.Header = blockheader.NewImmutableBlockHeader( + invalidBlock.Header.Version(), + invalidBlock.Header.Parents(), + merkle.CalculateHashMerkleRoot(invalidBlock.Transactions), + orphanBlock.Header.AcceptedIDMerkleRoot(), + orphanBlock.Header.UTXOCommitment(), + orphanBlock.Header.TimeInMilliseconds(), + orphanBlock.Header.Bits(), + orphanBlock.Header.Nonce(), + orphanBlock.Header.DAAScore(), + orphanBlock.Header.BlueScore(), + orphanBlock.Header.BlueWork(), + orphanBlock.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(invalidBlock, true) + if !errors.Is(err, ruleerrors.ErrTransactionVersionIsUnknown) { + t.Fatalf("Unexpected error: %+v", err) + } + + invalidBlockHash := consensushashing.BlockHash(invalidBlock) + + invalidBlockChild, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + + invalidBlockChild.Header = blockheader.NewImmutableBlockHeader( + invalidBlockChild.Header.Version(), + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{invalidBlockHash}}, + invalidBlockChild.Header.HashMerkleRoot(), + invalidBlockChild.Header.AcceptedIDMerkleRoot(), + invalidBlockChild.Header.UTXOCommitment(), + invalidBlockChild.Header.TimeInMilliseconds(), + invalidBlockChild.Header.Bits(), + invalidBlockChild.Header.Nonce(), + invalidBlockChild.Header.DAAScore(), + invalidBlockChild.Header.BlueScore(), + invalidBlockChild.Header.BlueWork(), + invalidBlockChild.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(invalidBlockChild, true) + if !errors.Is(err, ruleerrors.ErrInvalidAncestorBlock) { + t.Fatalf("Unexpected error: %+v", err) + } + }) +} + +func TestCheckPruningPointViolation(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // This is done to reduce the pruning depth to 6 blocks + consensusConfig.FinalityDuration = 2 * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckPruningPointViolation") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Add blocks until the pruning point changes + tipHash := consensusConfig.GenesisHash + for { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + pruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + if !pruningPoint.Equal(consensusConfig.GenesisHash) { + break + } + } + + tips, err := tc.Tips() + if err != nil { + t.Fatalf("Tips: %+v", err) + } + + blockWithPruningViolation, _, err := tc.BuildBlockWithParents(tips, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + blockWithPruningViolation.Header = blockheader.NewImmutableBlockHeader( + blockWithPruningViolation.Header.Version(), + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{consensusConfig.GenesisHash}}, + blockWithPruningViolation.Header.HashMerkleRoot(), + blockWithPruningViolation.Header.AcceptedIDMerkleRoot(), + blockWithPruningViolation.Header.UTXOCommitment(), + blockWithPruningViolation.Header.TimeInMilliseconds(), + blockWithPruningViolation.Header.Bits(), + blockWithPruningViolation.Header.Nonce(), + blockWithPruningViolation.Header.DAAScore(), + blockWithPruningViolation.Header.BlueScore(), + blockWithPruningViolation.Header.BlueWork(), + blockWithPruningViolation.Header.PruningPoint(), + ) + + err = tc.ValidateAndInsertBlock(blockWithPruningViolation, true) + if !errors.Is(err, ruleerrors.ErrPruningPointViolation) { + t.Fatalf("Unexpected error: %+v", err) + } + }) +} + +// TestValidateDifficulty verifies that in case of a block with an unexpected difficulty, +// an appropriate error message (ErrUnexpectedDifficulty) will be returned on the +// function ValidatePruningPointViolationAndProofOfWorkAndDifficulty. The required difficulty is +// "calculated" by the mocDifficultyManager, where mocDifficultyManager is special implementation +// of the type DifficultyManager for this test (defined below). +func TestValidateDifficulty(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + mocDifficulty := &mocDifficultyManager{genesisDaaScore: consensusConfig.GenesisBlock.Header.DAAScore()} + factory.SetTestDifficultyManager(func(_ model.DBReader, _ model.GHOSTDAGManager, _ model.GHOSTDAGDataStore, + _ model.BlockHeaderStore, daaBlocksStore model.DAABlocksStore, _ model.DAGTopologyManager, + _ model.DAGTraversalManager, _ *big.Int, _ int, _ bool, _ time.Duration, + _ *externalapi.DomainHash, _ uint32) model.DifficultyManager { + + mocDifficulty.daaBlocksStore = daaBlocksStore + return mocDifficulty + }) + genesisDifficulty := consensusConfig.GenesisBlock.Header.Bits() + mocDifficulty.testDifficulty = genesisDifficulty + mocDifficulty.testGenesisBits = genesisDifficulty + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestValidateDifficulty") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{consensusConfig.GenesisHash}, &emptyCoinbase, nil) + if err != nil { + t.Fatalf("TestValidateDifficulty: Failed build block with parents: %v.", err) + } + blockHash := consensushashing.BlockHash(block) + stagingArea := model.NewStagingArea() + tc.BlockStore().Stage(stagingArea, blockHash, block) + tc.BlockHeaderStore().Stage(stagingArea, blockHash, block.Header) + wrongTestDifficulty := mocDifficulty.testDifficulty + uint32(5) + mocDifficulty.testDifficulty = wrongTestDifficulty + + err = tc.BlockValidator().ValidatePruningPointViolationAndProofOfWorkAndDifficulty(stagingArea, blockHash, false) + if err == nil || !errors.Is(err, ruleerrors.ErrUnexpectedDifficulty) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrUnexpectedDifficulty, err) + } + }) +} + +type mocDifficultyManager struct { + testDifficulty uint32 + testGenesisBits uint32 + daaBlocksStore model.DAABlocksStore + genesisDaaScore uint64 +} + +// RequiredDifficulty returns the difficulty required for the test +func (dm *mocDifficultyManager) RequiredDifficulty(*model.StagingArea, *externalapi.DomainHash) (uint32, error) { + return dm.testDifficulty, nil +} + +// StageDAADataAndReturnRequiredDifficulty returns the difficulty required for the test +func (dm *mocDifficultyManager) StageDAADataAndReturnRequiredDifficulty(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (uint32, error) { + // Populate daaBlocksStore with fake values + dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, dm.genesisDaaScore) + dm.daaBlocksStore.StageBlockDAAAddedBlocks(stagingArea, blockHash, nil) + + return dm.testDifficulty, nil +} + +func (dm *mocDifficultyManager) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) { + return 0, nil +} diff --git a/domain/consensus/processes/coinbasemanager/coinbasemanager.go b/domain/consensus/processes/coinbasemanager/coinbasemanager.go new file mode 100644 index 0000000..a08241f --- /dev/null +++ b/domain/consensus/processes/coinbasemanager/coinbasemanager.go @@ -0,0 +1,327 @@ +package coinbasemanager + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type coinbaseManager struct { + subsidyGenesisReward uint64 + preDeflationaryPhaseBaseSubsidy uint64 + coinbasePayloadScriptPublicKeyMaxLength uint8 + genesisHash *externalapi.DomainHash + deflationaryPhaseDaaScore uint64 + deflationaryPhaseBaseSubsidy uint64 + + databaseContext model.DBReader + dagTraversalManager model.DAGTraversalManager + ghostdagDataStore model.GHOSTDAGDataStore + acceptanceDataStore model.AcceptanceDataStore + daaBlocksStore model.DAABlocksStore + blockStore model.BlockStore + pruningStore model.PruningStore + blockHeaderStore model.BlockHeaderStore +} + +func (c *coinbaseManager) ExpectedCoinbaseTransaction(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + coinbaseData *externalapi.DomainCoinbaseData) (expectedTransaction *externalapi.DomainTransaction, hasRedReward bool, err error) { + + ghostdagData, err := c.ghostdagDataStore.Get(c.databaseContext, stagingArea, blockHash, true) + if !database.IsNotFoundError(err) && err != nil { + return nil, false, err + } + + // If there's ghostdag data with trusted data we prefer it because we need the original merge set non-pruned merge set. + if database.IsNotFoundError(err) { + ghostdagData, err = c.ghostdagDataStore.Get(c.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, false, err + } + } + + acceptanceData, err := c.acceptanceDataStore.Get(c.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, false, err + } + + daaAddedBlocksSet, err := c.daaAddedBlocksSet(stagingArea, blockHash) + if err != nil { + return nil, false, err + } + + txOuts := make([]*externalapi.DomainTransactionOutput, 0, len(ghostdagData.MergeSetBlues())) + acceptanceDataMap := acceptanceDataFromArrayToMap(acceptanceData) + for _, blue := range ghostdagData.MergeSetBlues() { + txOut, hasReward, err := c.coinbaseOutputForBlueBlock(stagingArea, blue, acceptanceDataMap[*blue], daaAddedBlocksSet) + if err != nil { + return nil, false, err + } + + if hasReward { + txOuts = append(txOuts, txOut) + } + } + + txOut, hasRedReward, err := c.coinbaseOutputForRewardFromRedBlocks( + stagingArea, ghostdagData, acceptanceData, daaAddedBlocksSet, coinbaseData) + if err != nil { + return nil, false, err + } + + if hasRedReward { + txOuts = append(txOuts, txOut) + } + + subsidy, err := c.CalcBlockSubsidy(stagingArea, blockHash) + if err != nil { + return nil, false, err + } + + payload, err := c.serializeCoinbasePayload(ghostdagData.BlueScore(), coinbaseData, subsidy) + if err != nil { + return nil, false, err + } + + return &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{}, + Outputs: txOuts, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDCoinbase, + Gas: 0, + Payload: payload, + }, hasRedReward, nil +} + +func (c *coinbaseManager) daaAddedBlocksSet(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + hashset.HashSet, error) { + + daaAddedBlocks, err := c.daaBlocksStore.DAAAddedBlocks(c.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + return hashset.NewFromSlice(daaAddedBlocks...), nil +} + +// coinbaseOutputForBlueBlock calculates the output that should go into the coinbase transaction of blueBlock +// If blueBlock gets no fee - returns nil for txOut +func (c *coinbaseManager) coinbaseOutputForBlueBlock(stagingArea *model.StagingArea, + blueBlock *externalapi.DomainHash, blockAcceptanceData *externalapi.BlockAcceptanceData, + mergingBlockDAAAddedBlocksSet hashset.HashSet) (*externalapi.DomainTransactionOutput, bool, error) { + + blockReward, err := c.calcMergedBlockReward(stagingArea, blueBlock, blockAcceptanceData, mergingBlockDAAAddedBlocksSet) + if err != nil { + return nil, false, err + } + + if blockReward == 0 { + return nil, false, nil + } + + // the ScriptPublicKey for the coinbase is parsed from the coinbase payload + _, coinbaseData, _, err := c.ExtractCoinbaseDataBlueScoreAndSubsidy(blockAcceptanceData.TransactionAcceptanceData[0].Transaction) + if err != nil { + return nil, false, err + } + + txOut := &externalapi.DomainTransactionOutput{ + Value: blockReward, + ScriptPublicKey: coinbaseData.ScriptPublicKey, + } + + return txOut, true, nil +} + +func (c *coinbaseManager) coinbaseOutputForRewardFromRedBlocks(stagingArea *model.StagingArea, + ghostdagData *externalapi.BlockGHOSTDAGData, acceptanceData externalapi.AcceptanceData, daaAddedBlocksSet hashset.HashSet, + coinbaseData *externalapi.DomainCoinbaseData) (*externalapi.DomainTransactionOutput, bool, error) { + + acceptanceDataMap := acceptanceDataFromArrayToMap(acceptanceData) + totalReward := uint64(0) + for _, red := range ghostdagData.MergeSetReds() { + reward, err := c.calcMergedBlockReward(stagingArea, red, acceptanceDataMap[*red], daaAddedBlocksSet) + if err != nil { + return nil, false, err + } + + totalReward += reward + } + + if totalReward == 0 { + return nil, false, nil + } + + return &externalapi.DomainTransactionOutput{ + Value: totalReward, + ScriptPublicKey: coinbaseData.ScriptPublicKey, + }, true, nil +} + +func acceptanceDataFromArrayToMap(acceptanceData externalapi.AcceptanceData) map[externalapi.DomainHash]*externalapi.BlockAcceptanceData { + acceptanceDataMap := make(map[externalapi.DomainHash]*externalapi.BlockAcceptanceData, len(acceptanceData)) + for _, blockAcceptanceData := range acceptanceData { + acceptanceDataMap[*blockAcceptanceData.BlockHash] = blockAcceptanceData + } + return acceptanceDataMap +} + +// CalcBlockSubsidy returns the subsidy amount a block at the provided blue score +// should have. This is mainly used for determining how much the coinbase for +// newly generated blocks awards as well as validating the coinbase for blocks +// has the expected value. +func (c *coinbaseManager) CalcBlockSubsidy(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint64, error) { + if blockHash.Equal(c.genesisHash) { + return c.subsidyGenesisReward, nil + } + blockDaaScore, err := c.daaBlocksStore.DAAScore(c.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, err + } + if blockDaaScore < c.deflationaryPhaseDaaScore { + return c.preDeflationaryPhaseBaseSubsidy, nil + } + + blockSubsidy := c.calcDeflationaryPeriodBlockSubsidy(blockDaaScore) + return blockSubsidy, nil +} + +func (c *coinbaseManager) calcDeflationaryPeriodBlockSubsidy(blockDaaScore uint64) uint64 { + // We define a year as 365.25 days and a month as 365.25 / 12 = 30.4375 + // secondsPerMonth = 30.4375 * 24 * 60 * 60 + const secondsPerMonth = 2629800 + // Note that this calculation implicitly assumes that block per second = 1 (by assuming daa score diff is in second units). + monthsSinceDeflationaryPhaseStarted := (blockDaaScore - c.deflationaryPhaseDaaScore) / secondsPerMonth + // Return the pre-calculated value from subsidy-per-month table + return c.getDeflationaryPeriodBlockSubsidyFromTable(monthsSinceDeflationaryPhaseStarted) +} + +/* +This table was pre-calculated by calling `calcDeflationaryPeriodBlockSubsidyFloatCalc` for all months until reaching 0 subsidy. +To regenerate this table, run `TestBuildSubsidyTable` in coinbasemanager_test.go (note the `deflationaryPhaseBaseSubsidy` therein) +*/ +var subsidyByDeflationaryMonthTable = []uint64{ + 1200000000, 1175000000, 1150000000, 1125000000, 1100000000, 1075000000, 1050000000, 1025000000, 1000000000, 975000000, 950000000, 925000000, 900000000, 875000000, 850000000, 825000000, 800000000, 775000000, 750000000, 725000000, 700000000, 675000000, 650000000, 625000000, 600000000, + 587500000, 575000000, 562500000, 550000000, 537500000, 525000000, 512500000, 500000000, 487500000, 475000000, 462500000, 450000000, 437500000, 425000000, 412500000, 400000000, 387500000, 375000000, 362500000, 350000000, 337500000, 325000000, 312500000, 300000000, 293750000, + 287500000, 281250000, 275000000, 268750000, 262500000, 256250000, 250000000, 243750000, 237500000, 231250000, 225000000, 218750000, 212500000, 206250000, 200000000, 193750000, 187500000, 181250000, 175000000, 168750000, 162500000, 156250000, 150000000, 146875000, 143750000, + 140625000, 137500000, 134375000, 131250000, 128125000, 125000000, 121875000, 118750000, 115625000, 112500000, 109375000, 106250000, 103125000, 100000000, 96875000, 93750000, 90625000, 87500000, 84375000, 81250000, 78125000, 75000000, 73437500, 71875000, 70312500, + 68750000, 67187500, 65625000, 64062500, 62500000, 60937500, 59375000, 57812500, 56250000, 54687500, 53125000, 51562500, 50000000, 48437500, 46875000, 45312500, 43750000, 42187500, 40625000, 39062500, 37500000, 36718750, 35937500, 35156250, 34375000, + 33593750, 32812500, 32031250, 31250000, 30468750, 29687500, 28906250, 28125000, 27343750, 26562500, 25781250, 25000000, 24218750, 23437500, 22656250, 21875000, 21093750, 20312500, 19531250, 18750000, 18359375, 17968750, 17578125, 17187500, 16796875, + 16406250, 16015625, 15625000, 15234375, 14843750, 14453125, 14062500, 13671875, 13281250, 12890625, 12500000, 12109375, 11718750, 11328125, 10937500, 10546875, 10156250, 9765625, 9375000, 9179687, 8984375, 8789062, 8593750, 8398437, 8203125, + 8007812, 7812500, 7617187, 7421875, 7226562, 7031250, 6835937, 6640625, 6445312, 6250000, 6054687, 5859375, 5664062, 5468750, 5273437, 5078125, 4882812, 4687500, 4589843, 4492187, 4394531, 4296875, 4199218, 4101562, 4003906, + 3906250, 3808593, 3710937, 3613281, 3515625, 3417968, 3320312, 3222656, 3125000, 3027343, 2929687, 2832031, 2734375, 2636718, 2539062, 2441406, 2343750, 2294921, 2246093, 2197265, 2148437, 2099609, 2050781, 2001953, 1953125, + 1904296, 1855468, 1806640, 1757812, 1708984, 1660156, 1611328, 1562500, 1513671, 1464843, 1416015, 1367187, 1318359, 1269531, 1220703, 1171875, 1147460, 1123046, 1098632, 1074218, 1049804, 1025390, 1000976, 976562, 952148, + 927734, 903320, 878906, 854492, 830078, 805664, 781250, 756835, 732421, 708007, 683593, 659179, 634765, 610351, 585937, 573730, 561523, 549316, 537109, 524902, 512695, 500488, 488281, 476074, 463867, + 451660, 439453, 427246, 415039, 402832, 390625, 378417, 366210, 354003, 341796, 329589, 317382, 305175, 292968, 286865, 280761, 274658, 268554, 262451, 256347, 250244, 244140, 238037, 231933, 225830, + 219726, 213623, 207519, 201416, 195312, 189208, 183105, 177001, 170898, 164794, 158691, 152587, 146484, 143432, 140380, 137329, 134277, 131225, 128173, 125122, 122070, 119018, 115966, 112915, 109863, + 106811, 103759, 100708, 97656, 94604, 91552, 88500, 85449, 82397, 79345, 76293, 73242, 71716, 70190, 68664, 67138, 65612, 64086, 62561, 61035, 59509, 57983, 56457, 54931, 53405, + 51879, 50354, 48828, 47302, 45776, 44250, 42724, 41198, 39672, 38146, 36621, 35858, 35095, 34332, 33569, 32806, 32043, 31280, 30517, 29754, 28991, 28228, 27465, 26702, 25939, + 25177, 24414, 23651, 22888, 22125, 21362, 20599, 19836, 19073, 18310, 17929, 17547, 17166, 16784, 16403, 16021, 15640, 15258, 14877, 14495, 14114, 13732, 13351, 12969, 12588, + 12207, 11825, 11444, 11062, 10681, 10299, 9918, 9536, 9155, 8964, 8773, 8583, 8392, 8201, 8010, 7820, 7629, 7438, 7247, 7057, 6866, 6675, 6484, 6294, 6103, + 5912, 5722, 5531, 5340, 5149, 4959, 4768, 4577, 4482, 4386, 4291, 4196, 4100, 4005, 3910, 3814, 3719, 3623, 3528, 3433, 3337, 3242, 3147, 3051, 2956, + 2861, 2765, 2670, 2574, 2479, 2384, 2288, 2241, 2193, 2145, 2098, 2050, 2002, 1955, 1907, 1859, 1811, 1764, 1716, 1668, 1621, 1573, 1525, 1478, 1430, + 1382, 1335, 1287, 1239, 1192, 1144, 1120, 1096, 1072, 1049, 1025, 1001, 977, 953, 929, 905, 882, 858, 834, 810, 786, 762, 739, 715, 691, + 667, 643, 619, 596, 572, 560, 548, 536, 524, 512, 500, 488, 476, 464, 452, 441, 429, 417, 405, 393, 381, 369, 357, 345, 333, + 321, 309, 298, 286, 280, 274, 268, 262, 256, 250, 244, 238, 232, 226, 220, 214, 208, 202, 196, 190, 184, 178, 172, 166, 160, + 154, 149, 143, 140, 137, 134, 131, 128, 125, 122, 119, 116, 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77, + 74, 71, 70, 68, 67, 65, 64, 62, 61, 59, 58, 56, 55, 53, 52, 50, 49, 47, 46, 44, 43, 41, 40, 38, 37, + 35, 35, 34, 33, 32, 32, 31, 30, 29, 29, 28, 27, 26, 26, 25, 24, 23, 23, 22, 21, 20, 20, 19, 18, 17, + 17, 17, 16, 16, 16, 15, 15, 14, 14, 14, 13, 13, 13, 12, 12, 11, 11, 11, 10, 10, 10, 9, 9, 8, 8, + 8, 8, 8, 8, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, + 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, +} + +func (c *coinbaseManager) getDeflationaryPeriodBlockSubsidyFromTable(month uint64) uint64 { + if month >= uint64(len(subsidyByDeflationaryMonthTable)) { + month = uint64(len(subsidyByDeflationaryMonthTable) - 1) + } + return subsidyByDeflationaryMonthTable[month] +} + +func (c *coinbaseManager) calcDeflationaryPeriodBlockSubsidyFloatCalc(month uint64) uint64 { + baseSubsidy := c.deflationaryPhaseBaseSubsidy + baseSubsidyCurrentPeriod := float64(baseSubsidy) / math.Pow(2, math.Trunc(float64(month)/24)) + subsidy := baseSubsidyCurrentPeriod - baseSubsidyCurrentPeriod/2/24*float64(month%24) + return uint64(subsidy) +} + +func (c *coinbaseManager) calcMergedBlockReward(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + blockAcceptanceData *externalapi.BlockAcceptanceData, mergingBlockDAAAddedBlocksSet hashset.HashSet) (uint64, error) { + + if !blockHash.Equal(blockAcceptanceData.BlockHash) { + return 0, errors.Errorf("blockAcceptanceData.BlockHash is expected to be %s but got %s", + blockHash, blockAcceptanceData.BlockHash) + } + + if !mergingBlockDAAAddedBlocksSet.Contains(blockHash) { + return 0, nil + } + + totalFees := uint64(0) + for _, txAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData { + if txAcceptanceData.IsAccepted { + totalFees += txAcceptanceData.Fee + } + } + + block, err := c.blockStore.Block(c.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, err + } + + _, _, subsidy, err := c.ExtractCoinbaseDataBlueScoreAndSubsidy(block.Transactions[transactionhelper.CoinbaseTransactionIndex]) + if err != nil { + return 0, err + } + + return subsidy + totalFees, nil +} + +// New instantiates a new CoinbaseManager +func New( + databaseContext model.DBReader, + + subsidyGenesisReward uint64, + preDeflationaryPhaseBaseSubsidy uint64, + coinbasePayloadScriptPublicKeyMaxLength uint8, + genesisHash *externalapi.DomainHash, + deflationaryPhaseDaaScore uint64, + deflationaryPhaseBaseSubsidy uint64, + + dagTraversalManager model.DAGTraversalManager, + ghostdagDataStore model.GHOSTDAGDataStore, + acceptanceDataStore model.AcceptanceDataStore, + daaBlocksStore model.DAABlocksStore, + blockStore model.BlockStore, + pruningStore model.PruningStore, + blockHeaderStore model.BlockHeaderStore) model.CoinbaseManager { + + return &coinbaseManager{ + databaseContext: databaseContext, + + subsidyGenesisReward: subsidyGenesisReward, + preDeflationaryPhaseBaseSubsidy: preDeflationaryPhaseBaseSubsidy, + coinbasePayloadScriptPublicKeyMaxLength: coinbasePayloadScriptPublicKeyMaxLength, + genesisHash: genesisHash, + deflationaryPhaseDaaScore: deflationaryPhaseDaaScore, + deflationaryPhaseBaseSubsidy: deflationaryPhaseBaseSubsidy, + + dagTraversalManager: dagTraversalManager, + ghostdagDataStore: ghostdagDataStore, + acceptanceDataStore: acceptanceDataStore, + daaBlocksStore: daaBlocksStore, + blockStore: blockStore, + pruningStore: pruningStore, + blockHeaderStore: blockHeaderStore, + } +} diff --git a/domain/consensus/processes/coinbasemanager/coinbasemanager_external_test.go b/domain/consensus/processes/coinbasemanager/coinbasemanager_external_test.go new file mode 100644 index 0000000..fd14e19 --- /dev/null +++ b/domain/consensus/processes/coinbasemanager/coinbasemanager_external_test.go @@ -0,0 +1,58 @@ +package coinbasemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestExtractCoinbaseDataBlueScoreAndSubsidy(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestBlockStatus") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + tests := []struct { + name string + scriptPublicKeyVersion uint16 + }{ + { + name: "below 255", + scriptPublicKeyVersion: 100, + }, + { + name: "above 255", + scriptPublicKeyVersion: 300, + }, + } + + for _, test := range tests { + coinbaseTx, _, err := tc.CoinbaseManager().ExpectedCoinbaseTransaction(model.NewStagingArea(), model.VirtualBlockHash, &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: test.scriptPublicKeyVersion, + }, + ExtraData: nil, + }) + if err != nil { + t.Fatal(err) + } + + _, cbData, _, err := tc.CoinbaseManager().ExtractCoinbaseDataBlueScoreAndSubsidy(coinbaseTx) + if err != nil { + t.Fatal(err) + } + + if cbData.ScriptPublicKey.Version != test.scriptPublicKeyVersion { + t.Fatalf("test %s post HF expected %d but got %d", test.name, test.scriptPublicKeyVersion, cbData.ScriptPublicKey.Version) + } + } + + }) +} diff --git a/domain/consensus/processes/coinbasemanager/coinbasemanager_test.go b/domain/consensus/processes/coinbasemanager/coinbasemanager_test.go new file mode 100644 index 0000000..36f89f9 --- /dev/null +++ b/domain/consensus/processes/coinbasemanager/coinbasemanager_test.go @@ -0,0 +1,133 @@ +package coinbasemanager + +import ( + "math" + "strconv" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func TestCalcDeflationaryPeriodBlockSubsidy(t *testing.T) { + const secondsPerMonth = 2629800 + const secondsPerHalving = secondsPerMonth * 24 + const deflationaryPhaseDaaScore = secondsPerMonth * 6 + const deflationaryPhaseBaseSubsidy = 12 * constants.SompiPerSpectre + coinbaseManagerInterface := New( + nil, + 0, + 0, + 0, + &externalapi.DomainHash{}, + deflationaryPhaseDaaScore, + deflationaryPhaseBaseSubsidy, + nil, + nil, + nil, + nil, + nil, + nil, + nil) + coinbaseManagerInstance := coinbaseManagerInterface.(*coinbaseManager) + + tests := []struct { + name string + blockDaaScore uint64 + expectedBlockSubsidy uint64 + }{ + { + name: "start of deflationary phase", + blockDaaScore: deflationaryPhaseDaaScore, + expectedBlockSubsidy: deflationaryPhaseBaseSubsidy, + }, + { + name: "after 2 years", + blockDaaScore: deflationaryPhaseDaaScore + secondsPerHalving, + expectedBlockSubsidy: uint64(math.Trunc(deflationaryPhaseBaseSubsidy / 2)), + }, + { + name: "after 4 years", + blockDaaScore: deflationaryPhaseDaaScore + secondsPerHalving*2, + expectedBlockSubsidy: uint64(math.Trunc(deflationaryPhaseBaseSubsidy / 4)), + }, + { + name: "after 8 years", + blockDaaScore: deflationaryPhaseDaaScore + secondsPerHalving*4, + expectedBlockSubsidy: uint64(math.Trunc(deflationaryPhaseBaseSubsidy / 16)), + }, + { + name: "after 16 years", + blockDaaScore: deflationaryPhaseDaaScore + secondsPerHalving*8, + expectedBlockSubsidy: uint64(math.Trunc(deflationaryPhaseBaseSubsidy / 256)), + }, + { + name: "after 32 years", + blockDaaScore: deflationaryPhaseDaaScore + secondsPerHalving*16, + expectedBlockSubsidy: uint64(math.Trunc(deflationaryPhaseBaseSubsidy / 65536)), + }, + { + name: "just before subsidy depleted", + blockDaaScore: deflationaryPhaseDaaScore + (secondsPerHalving / 24 * 725), + expectedBlockSubsidy: 1, + }, + { + name: "after subsidy depleted", + blockDaaScore: deflationaryPhaseDaaScore + (secondsPerHalving / 24 * 726), + expectedBlockSubsidy: 0, + }, + } + + for _, test := range tests { + blockSubsidy := coinbaseManagerInstance.calcDeflationaryPeriodBlockSubsidy(test.blockDaaScore) + if blockSubsidy != test.expectedBlockSubsidy { + t.Errorf("TestCalcDeflationaryPeriodBlockSubsidy: test '%s' failed. Want: %d, got: %d", + test.name, test.expectedBlockSubsidy, blockSubsidy) + } + } +} + +func TestBuildSubsidyTable(t *testing.T) { + deflationaryPhaseBaseSubsidy := dagconfig.MainnetParams.DeflationaryPhaseBaseSubsidy + if deflationaryPhaseBaseSubsidy != 12*constants.SompiPerSpectre { + t.Errorf("TestBuildSubsidyTable: table generation function was not updated to reflect "+ + "the new base subsidy %d. Please fix the constant above and replace subsidyByDeflationaryMonthTable "+ + "in coinbasemanager.go with the printed table", deflationaryPhaseBaseSubsidy) + } + coinbaseManagerInterface := New( + nil, + 0, + 0, + 0, + &externalapi.DomainHash{}, + 0, + deflationaryPhaseBaseSubsidy, + nil, + nil, + nil, + nil, + nil, + nil, + nil) + coinbaseManagerInstance := coinbaseManagerInterface.(*coinbaseManager) + + var subsidyTable []uint64 + for M := uint64(0); ; M++ { + subsidy := coinbaseManagerInstance.calcDeflationaryPeriodBlockSubsidyFloatCalc(M) + subsidyTable = append(subsidyTable, subsidy) + if subsidy == 0 { + break + } + } + + tableStr := "\n{\t" + for i := 0; i < len(subsidyTable); i++ { + tableStr += strconv.FormatUint(subsidyTable[i], 10) + ", " + if (i+1)%25 == 0 { + tableStr += "\n\t" + } + } + tableStr += "\n}" + t.Logf(tableStr) +} diff --git a/domain/consensus/processes/coinbasemanager/payload.go b/domain/consensus/processes/coinbasemanager/payload.go new file mode 100644 index 0000000..f75f9e9 --- /dev/null +++ b/domain/consensus/processes/coinbasemanager/payload.go @@ -0,0 +1,95 @@ +package coinbasemanager + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +const uint64Len = 8 +const uint16Len = 2 +const lengthOfSubsidy = uint64Len +const lengthOfScriptPubKeyLength = 1 +const lengthOfVersionScriptPubKey = uint16Len + +// serializeCoinbasePayload builds the coinbase payload based on the provided scriptPubKey and extra data. +func (c *coinbaseManager) serializeCoinbasePayload(blueScore uint64, + coinbaseData *externalapi.DomainCoinbaseData, subsidy uint64) ([]byte, error) { + + scriptLengthOfScriptPubKey := len(coinbaseData.ScriptPublicKey.Script) + if scriptLengthOfScriptPubKey > int(c.coinbasePayloadScriptPublicKeyMaxLength) { + return nil, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, "coinbase's payload script public key is "+ + "longer than the max allowed length of %d", c.coinbasePayloadScriptPublicKeyMaxLength) + } + + payload := make([]byte, uint64Len+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength+scriptLengthOfScriptPubKey+len(coinbaseData.ExtraData)+lengthOfSubsidy) + binary.LittleEndian.PutUint64(payload[:uint64Len], blueScore) + binary.LittleEndian.PutUint64(payload[uint64Len:], subsidy) + + binary.LittleEndian.PutUint16(payload[uint64Len+lengthOfSubsidy:], coinbaseData.ScriptPublicKey.Version) + payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey] = uint8(len(coinbaseData.ScriptPublicKey.Script)) + copy(payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength:], coinbaseData.ScriptPublicKey.Script) + copy(payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength+scriptLengthOfScriptPubKey:], coinbaseData.ExtraData) + + return payload, nil +} + +// ModifyCoinbasePayload modifies the coinbase payload based on the provided scriptPubKey and extra data. +func ModifyCoinbasePayload(payload []byte, coinbaseData *externalapi.DomainCoinbaseData, coinbasePayloadScriptPublicKeyMaxLength uint8) ([]byte, error) { + + scriptLengthOfScriptPubKey := len(coinbaseData.ScriptPublicKey.Script) + if scriptLengthOfScriptPubKey > int(coinbasePayloadScriptPublicKeyMaxLength) { + return nil, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, "coinbase's payload script public key is "+ + "longer than the max allowed length of %d", coinbasePayloadScriptPublicKeyMaxLength) + } + + newPayloadLen := uint64Len + lengthOfVersionScriptPubKey + lengthOfScriptPubKeyLength + scriptLengthOfScriptPubKey + len(coinbaseData.ExtraData) + lengthOfSubsidy + if len(payload) != newPayloadLen { + newPayload := make([]byte, newPayloadLen) + copy(newPayload, payload[:uint64Len+lengthOfSubsidy]) + payload = newPayload + } + + binary.LittleEndian.PutUint16(payload[uint64Len+lengthOfSubsidy:uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey], coinbaseData.ScriptPublicKey.Version) + payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey] = uint8(len(coinbaseData.ScriptPublicKey.Script)) + copy(payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength:], coinbaseData.ScriptPublicKey.Script) + copy(payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength+scriptLengthOfScriptPubKey:], coinbaseData.ExtraData) + + return payload, nil +} + +// ExtractCoinbaseDataBlueScoreAndSubsidy deserializes the coinbase payload to its component (scriptPubKey, extra data, and subsidy). +func (c *coinbaseManager) ExtractCoinbaseDataBlueScoreAndSubsidy(coinbaseTx *externalapi.DomainTransaction) ( + blueScore uint64, coinbaseData *externalapi.DomainCoinbaseData, subsidy uint64, err error) { + + minLength := uint64Len + lengthOfSubsidy + lengthOfVersionScriptPubKey + lengthOfScriptPubKeyLength + if len(coinbaseTx.Payload) < minLength { + return 0, nil, 0, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, + "coinbase payload is less than the minimum length of %d", minLength) + } + + blueScore = binary.LittleEndian.Uint64(coinbaseTx.Payload[:uint64Len]) + subsidy = binary.LittleEndian.Uint64(coinbaseTx.Payload[uint64Len:]) + + scriptPubKeyVersion := binary.LittleEndian.Uint16(coinbaseTx.Payload[uint64Len+lengthOfSubsidy : uint64Len+lengthOfSubsidy+uint16Len]) + + scriptPubKeyScriptLength := coinbaseTx.Payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey] + + if scriptPubKeyScriptLength > c.coinbasePayloadScriptPublicKeyMaxLength { + return 0, nil, 0, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, "coinbase's payload script public key is "+ + "longer than the max allowed length of %d", c.coinbasePayloadScriptPublicKeyMaxLength) + } + + if len(coinbaseTx.Payload) < minLength+int(scriptPubKeyScriptLength) { + return 0, nil, 0, errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, + "coinbase payload doesn't have enough bytes to contain a script public key of %d bytes", scriptPubKeyScriptLength) + } + scriptPubKeyScript := coinbaseTx.Payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength : uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength+scriptPubKeyScriptLength] + + return blueScore, &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: scriptPubKeyScript, Version: scriptPubKeyVersion}, + ExtraData: coinbaseTx.Payload[uint64Len+lengthOfSubsidy+lengthOfVersionScriptPubKey+lengthOfScriptPubKeyLength+scriptPubKeyScriptLength:], + }, subsidy, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/add_block_to_virtual.go b/domain/consensus/processes/consensusstatemanager/add_block_to_virtual.go new file mode 100644 index 0000000..046bc9d --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/add_block_to_virtual.go @@ -0,0 +1,166 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// AddBlock submits the given block to be added to the +// current virtual. This process may result in a new virtual block +// getting created +func (csm *consensusStateManager) AddBlock(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, updateVirtual bool) ( + *externalapi.SelectedChainPath, externalapi.UTXODiff, *model.UTXODiffReversalData, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "csm.AddBlock") + defer onEnd() + + var reversalData *model.UTXODiffReversalData + if updateVirtual { + log.Debugf("Resolving whether the block %s is the next virtual selected parent", blockHash) + isCandidateToBeNextVirtualSelectedParent, err := csm.isCandidateToBeNextVirtualSelectedParent(stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + + if isCandidateToBeNextVirtualSelectedParent { + // It's important to check for finality violation before resolving the block status, because the status of + // blocks with a selected chain that doesn't contain the pruning point cannot be resolved because they will + // eventually try to fetch UTXO diffs from the past of the pruning point. + log.Debugf("Block %s is candidate to be the next virtual selected parent. Resolving whether it violates "+ + "finality", blockHash) + isViolatingFinality, shouldNotify, err := csm.isViolatingFinality(stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + + if shouldNotify { + //TODO: Send finality conflict notification + log.Warnf("Finality Violation Detected! Block %s violates finality!", blockHash) + } + + if !isViolatingFinality { + log.Debugf("Block %s doesn't violate finality. Resolving its block status", blockHash) + var blockStatus externalapi.BlockStatus + blockStatus, reversalData, err = csm.resolveBlockStatus(stagingArea, blockHash, true) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Block %s resolved to status `%s`", blockHash, blockStatus) + } + } else { + log.Debugf("Block %s is not the next virtual selected parent, "+ + "therefore its status remains `%s`", blockHash, externalapi.StatusUTXOPendingVerification) + } + } + + log.Debugf("Adding block %s to the DAG tips", blockHash) + newTips, err := csm.addTip(stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + log.Debugf("After adding %s, the amount of new tips are %d", blockHash, len(newTips)) + + if !updateVirtual { + return &externalapi.SelectedChainPath{}, utxo.NewUTXODiff(), nil, nil + } + + log.Debugf("Updating the virtual with the new tips") + selectedParentChainChanges, virtualUTXODiff, err := csm.updateVirtual(stagingArea, blockHash, newTips) + if err != nil { + return nil, nil, nil, err + } + + return selectedParentChainChanges, virtualUTXODiff, reversalData, nil +} + +func (csm *consensusStateManager) isCandidateToBeNextVirtualSelectedParent( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + + log.Tracef("isCandidateToBeNextVirtualSelectedParent start for block %s", blockHash) + defer log.Tracef("isCandidateToBeNextVirtualSelectedParent end for block %s", blockHash) + + if blockHash.Equal(csm.genesisHash) { + log.Debugf("Block %s is the genesis block, therefore it is "+ + "the selected parent by definition", blockHash) + return true, nil + } + + virtualGhostdagData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return false, err + } + + log.Debugf("Selecting the next selected parent between "+ + "the block %s the current selected parent %s", blockHash, virtualGhostdagData.SelectedParent()) + nextVirtualSelectedParent, err := csm.ghostdagManager.ChooseSelectedParent( + stagingArea, virtualGhostdagData.SelectedParent(), blockHash) + if err != nil { + return false, err + } + log.Debugf("The next selected parent is: %s", nextVirtualSelectedParent) + + return blockHash.Equal(nextVirtualSelectedParent), nil +} + +func (csm *consensusStateManager) addTip(stagingArea *model.StagingArea, newTipHash *externalapi.DomainHash) (newTips []*externalapi.DomainHash, err error) { + log.Tracef("addTip start for new tip %s", newTipHash) + defer log.Tracef("addTip end for new tip %s", newTipHash) + + log.Debugf("Calculating the new tips for new tip %s", newTipHash) + newTips, err = csm.calculateNewTips(stagingArea, newTipHash) + if err != nil { + return nil, err + } + + csm.consensusStateStore.StageTips(stagingArea, newTips) + log.Debugf("Staged the new tips, len: %d", len(newTips)) + + return newTips, nil +} + +func (csm *consensusStateManager) calculateNewTips( + stagingArea *model.StagingArea, newTipHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + log.Tracef("calculateNewTips start for new tip %s", newTipHash) + defer log.Tracef("calculateNewTips end for new tip %s", newTipHash) + + if newTipHash.Equal(csm.genesisHash) { + log.Debugf("The new tip is the genesis block, therefore it is the only tip by definition") + return []*externalapi.DomainHash{newTipHash}, nil + } + + currentTips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext) + if err != nil { + return nil, err + } + log.Debugf("The number of tips is: %d", len(currentTips)) + log.Tracef("The current tips are: %s", currentTips) + + newTipParents, err := csm.dagTopologyManager.Parents(stagingArea, newTipHash) + if err != nil { + return nil, err + } + log.Debugf("The parents of the new tip are: %s", newTipParents) + + newTips := []*externalapi.DomainHash{newTipHash} + + for _, currentTip := range currentTips { + isCurrentTipInNewTipParents := false + for _, newTipParent := range newTipParents { + if currentTip.Equal(newTipParent) { + isCurrentTipInNewTipParents = true + break + } + } + if !isCurrentTipInNewTipParents { + newTips = append(newTips, currentTip) + } + } + log.Debugf("The new number of tips is: %d", len(newTips)) + log.Tracef("The new tips are: %s", newTips) + + return newTips, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/add_block_to_virtual_test.go b/domain/consensus/processes/consensusstatemanager/add_block_to_virtual_test.go new file mode 100644 index 0000000..b308f83 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/add_block_to_virtual_test.go @@ -0,0 +1,83 @@ +package consensusstatemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestVirtualDiff(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestVirtualDiff") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardown(false) + + // Add block A over the genesis + blockAHash, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block A: %+v", err) + } + + virtualUTXODiff := virtualChangeSet.VirtualUTXODiff + if virtualUTXODiff.ToRemove().Len() != 0 { + t.Fatalf("Unexpected length %d for virtualUTXODiff.ToRemove()", virtualUTXODiff.ToRemove().Len()) + } + + // Because the genesis is not in block A's DAA window, block A's coinbase doesn't pay to it, so it has no outputs. + if virtualUTXODiff.ToAdd().Len() != 0 { + t.Fatalf("Unexpected length %d for virtualUTXODiff.ToAdd()", virtualUTXODiff.ToAdd().Len()) + } + + blockBHash, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block A: %+v", err) + } + + blockB, err := tc.BlockStore().Block(tc.DatabaseContext(), model.NewStagingArea(), blockBHash) + if err != nil { + t.Fatalf("Block: %+v", err) + } + + virtualUTXODiff = virtualChangeSet.VirtualUTXODiff + if virtualUTXODiff.ToRemove().Len() != 0 { + t.Fatalf("Unexpected length %d for virtualUTXODiff.ToRemove()", virtualUTXODiff.ToRemove().Len()) + } + + if virtualUTXODiff.ToAdd().Len() != 1 { + t.Fatalf("Unexpected length %d for virtualUTXODiff.ToAdd()", virtualUTXODiff.ToAdd().Len()) + } + + iterator := virtualUTXODiff.ToAdd().Iterator() + iterator.First() + + outpoint, entry, err := iterator.Get() + if err != nil { + t.Fatalf("TestVirtualDiff: %+v", err) + } + + if !outpoint.Equal(&externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(blockB.Transactions[0]), + Index: 0, + }) { + t.Fatalf("Unexpected outpoint %s", outpoint) + } + + if !entry.Equal(utxo.NewUTXOEntry( + blockB.Transactions[0].Outputs[0].Value, + blockB.Transactions[0].Outputs[0].ScriptPublicKey, + true, + consensusConfig.GenesisBlock.Header.DAAScore()+2, //Expected virtual DAA score + )) { + t.Fatalf("Unexpected entry %s", entry) + } + }) +} diff --git a/domain/consensus/processes/consensusstatemanager/calculate_past_utxo.go b/domain/consensus/processes/consensusstatemanager/calculate_past_utxo.go new file mode 100644 index 0000000..d6ef517 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/calculate_past_utxo.go @@ -0,0 +1,301 @@ +package consensusstatemanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/infrastructure/logger" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +func (csm *consensusStateManager) CalculatePastUTXOAndAcceptanceData(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash) (externalapi.UTXODiff, externalapi.AcceptanceData, model.Multiset, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "CalculatePastUTXOAndAcceptanceData") + defer onEnd() + + log.Debugf("CalculatePastUTXOAndAcceptanceData start for block %s", blockHash) + + if blockHash.Equal(csm.genesisHash) { + log.Debugf("Block %s is the genesis. By definition, "+ + "it has a predefined UTXO diff, empty acceptance data, and a predefined multiset", blockHash) + multiset, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + return utxoDiff, externalapi.AcceptanceData{}, multiset, nil + } + + blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Restoring the past UTXO of block %s with selectedParent %s", + blockHash, blockGHOSTDAGData.SelectedParent()) + selectedParentPastUTXO, err := csm.restorePastUTXO(stagingArea, blockGHOSTDAGData.SelectedParent()) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Restored the past UTXO of block %s with selectedParent %s. "+ + "Diff toAdd length: %d, toRemove length: %d", blockHash, blockGHOSTDAGData.SelectedParent(), + selectedParentPastUTXO.ToAdd().Len(), selectedParentPastUTXO.ToRemove().Len()) + + return csm.calculatePastUTXOAndAcceptanceDataWithSelectedParentUTXO(stagingArea, blockHash, selectedParentPastUTXO) +} + +func (csm *consensusStateManager) calculatePastUTXOAndAcceptanceDataWithSelectedParentUTXO(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, selectedParentPastUTXO externalapi.UTXODiff) ( + externalapi.UTXODiff, externalapi.AcceptanceData, model.Multiset, error) { + + blockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, nil, nil, err + } + + daaScore, err := csm.daaBlocksStore.DAAScore(csm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Applying blue blocks to the selected parent past UTXO of block %s", blockHash) + acceptanceData, utxoDiff, err := csm.applyMergeSetBlocks(stagingArea, blockHash, selectedParentPastUTXO, daaScore) + if err != nil { + return nil, nil, nil, err + } + + log.Debugf("Calculating the multiset of %s", blockHash) + multiset, err := csm.calculateMultiset(stagingArea, blockHash, acceptanceData, blockGHOSTDAGData, daaScore) + if err != nil { + return nil, nil, nil, err + } + log.Debugf("The multiset of block %s resolved to: %s", blockHash, multiset.Hash()) + + return utxoDiff.ToImmutable(), acceptanceData, multiset, nil +} + +func (csm *consensusStateManager) restorePastUTXO( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.UTXODiff, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "restorePastUTXO") + defer onEnd() + + log.Debugf("restorePastUTXO start for block %s", blockHash) + + var err error + + log.Debugf("Collecting UTXO diffs for block %s", blockHash) + var utxoDiffs []externalapi.UTXODiff + nextBlockHash := blockHash + for { + log.Debugf("Collecting UTXO diff for block %s", nextBlockHash) + utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, nextBlockHash) + if err != nil { + return nil, err + } + utxoDiffs = append(utxoDiffs, utxoDiff) + log.Debugf("Collected UTXO diff for block %s: toAdd: %d, toRemove: %d", + nextBlockHash, utxoDiff.ToAdd().Len(), utxoDiff.ToRemove().Len()) + + exists, err := csm.utxoDiffStore.HasUTXODiffChild(csm.databaseContext, stagingArea, nextBlockHash) + if err != nil { + return nil, err + } + if !exists { + log.Debugf("Block %s does not have a UTXO diff child, "+ + "meaning we reached the virtual", nextBlockHash) + break + } + + nextBlockHash, err = csm.utxoDiffStore.UTXODiffChild(csm.databaseContext, stagingArea, nextBlockHash) + if err != nil { + return nil, err + } + if nextBlockHash == nil { + log.Debugf("Block %s does not have a UTXO diff child, "+ + "meaning we reached the virtual", nextBlockHash) + break + } + } + + // apply the diffs in reverse order + log.Debugf("Applying the collected UTXO diffs for block %s in reverse order", blockHash) + accumulatedDiff := utxo.NewMutableUTXODiff() + for i := len(utxoDiffs) - 1; i >= 0; i-- { + err = accumulatedDiff.WithDiffInPlace(utxoDiffs[i]) + if err != nil { + return nil, err + } + } + log.Tracef("The accumulated diff for block %s is: %s", blockHash, accumulatedDiff) + + return accumulatedDiff.ToImmutable(), nil +} + +func (csm *consensusStateManager) applyMergeSetBlocks(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + selectedParentPastUTXODiff externalapi.UTXODiff, daaScore uint64) ( + externalapi.AcceptanceData, externalapi.MutableUTXODiff, error) { + + log.Tracef("applyMergeSetBlocks start for block %s", blockHash) + defer log.Tracef("applyMergeSetBlocks end for block %s", blockHash) + + mergeSetHashes, err := csm.ghostdagManager.GetSortedMergeSet(stagingArea, blockHash) + if err != nil { + return nil, nil, err + } + log.Debugf("Merge set for block %s is %v", blockHash, mergeSetHashes) + mergeSetBlocks, err := csm.blockStore.Blocks(csm.databaseContext, stagingArea, mergeSetHashes) + if err != nil { + return nil, nil, err + } + + selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(stagingArea, blockHash) + if err != nil { + return nil, nil, err + } + log.Tracef("The past median time for block %s is: %d", blockHash, selectedParentMedianTime) + + multiblockAcceptanceData := make(externalapi.AcceptanceData, len(mergeSetBlocks)) + accumulatedUTXODiff := selectedParentPastUTXODiff.CloneMutable() + accumulatedMass := uint64(0) + + for i, mergeSetBlock := range mergeSetBlocks { + mergeSetBlockHash := consensushashing.BlockHash(mergeSetBlock) + blockAcceptanceData := &externalapi.BlockAcceptanceData{ + BlockHash: mergeSetBlockHash, + TransactionAcceptanceData: make([]*externalapi.TransactionAcceptanceData, len(mergeSetBlock.Transactions)), + } + isSelectedParent := i == 0 + log.Tracef("Is merge set block %s the selected parent: %t", mergeSetBlockHash, isSelectedParent) + + for j, transaction := range mergeSetBlock.Transactions { + var isAccepted bool + + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("Attempting to accept transaction %s in block %s", + transactionID, mergeSetBlockHash) + + isAccepted, accumulatedMass, err = csm.maybeAcceptTransaction(stagingArea, transaction, blockHash, + isSelectedParent, accumulatedUTXODiff, accumulatedMass, selectedParentMedianTime, daaScore) + if err != nil { + return nil, nil, err + } + log.Tracef("Transaction %s in block %s isAccepted: %t, fee: %d", + transactionID, mergeSetBlockHash, isAccepted, transaction.Fee) + + var transactionInputUTXOEntries []externalapi.UTXOEntry + if isAccepted { + transactionInputUTXOEntries = make([]externalapi.UTXOEntry, len(transaction.Inputs)) + for k, input := range transaction.Inputs { + transactionInputUTXOEntries[k] = input.UTXOEntry + } + } + + blockAcceptanceData.TransactionAcceptanceData[j] = &externalapi.TransactionAcceptanceData{ + Transaction: transaction, + Fee: transaction.Fee, + IsAccepted: isAccepted, + TransactionInputUTXOEntries: transactionInputUTXOEntries, + } + } + multiblockAcceptanceData[i] = blockAcceptanceData + } + + return multiblockAcceptanceData, accumulatedUTXODiff, nil +} + +func (csm *consensusStateManager) maybeAcceptTransaction(stagingArea *model.StagingArea, + transaction *externalapi.DomainTransaction, blockHash *externalapi.DomainHash, isSelectedParent bool, + accumulatedUTXODiff externalapi.MutableUTXODiff, accumulatedMassBefore uint64, selectedParentPastMedianTime int64, + blockDAAScore uint64) (isAccepted bool, accumulatedMassAfter uint64, err error) { + + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("maybeAcceptTransaction start for transaction %s in block %s", transactionID, blockHash) + defer log.Tracef("maybeAcceptTransaction end for transaction %s in block %s", transactionID, blockHash) + + log.Tracef("Populating transaction %s with UTXO entries", transactionID) + err = csm.populateTransactionWithUTXOEntriesFromVirtualOrDiff(stagingArea, transaction, accumulatedUTXODiff.ToImmutable()) + if err != nil { + if !errors.As(err, &(ruleerrors.RuleError{})) { + return false, 0, err + } + + return false, accumulatedMassBefore, nil + } + + // Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain. + if transactionhelper.IsCoinBase(transaction) { + if !isSelectedParent { + log.Tracef("Transaction %s is the coinbase of block %s "+ + "but said block is not in the selected parent chain. "+ + "As such, it is not accepted", transactionID, blockHash) + return false, accumulatedMassBefore, nil + } + log.Tracef("Transaction %s is the coinbase of block %s", transactionID, blockHash) + } else { + log.Tracef("Validating transaction %s in block %s", transactionID, blockHash) + err = csm.transactionValidator.ValidateTransactionInContextAndPopulateFee( + stagingArea, transaction, blockHash) + if err != nil { + if !errors.As(err, &(ruleerrors.RuleError{})) { + return false, 0, err + } + + log.Tracef("Validation failed for transaction %s "+ + "in block %s: %s", transactionID, blockHash, err) + return false, accumulatedMassBefore, nil + } + log.Tracef("Validation passed for transaction %s in block %s", transactionID, blockHash) + } + + log.Tracef("Adding transaction %s in block %s to the accumulated diff", transactionID, blockHash) + err = accumulatedUTXODiff.AddTransaction(transaction, blockDAAScore) + if err != nil { + return false, 0, err + } + + return true, accumulatedMassAfter, nil +} + +// RestorePastUTXOSetIterator restores the given block's UTXOSet iterator, and returns it as a externalapi.ReadOnlyUTXOSetIterator +func (csm *consensusStateManager) RestorePastUTXOSetIterator(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + externalapi.ReadOnlyUTXOSetIterator, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "RestorePastUTXOSetIterator") + defer onEnd() + + blockStatus, _, err := csm.resolveBlockStatus(stagingArea, blockHash, true) + if err != nil { + return nil, err + } + if blockStatus != externalapi.StatusUTXOValid { + return nil, errors.Errorf( + "block %s, has status '%s', and therefore can't restore it's UTXO set. Only blocks with status '%s' can be restored.", + blockHash, blockStatus, externalapi.StatusUTXOValid) + } + + log.Tracef("RestorePastUTXOSetIterator start for block %s", blockHash) + defer log.Tracef("RestorePastUTXOSetIterator end for block %s", blockHash) + + log.Debugf("Calculating UTXO diff for block %s", blockHash) + blockDiff, err := csm.restorePastUTXO(stagingArea, blockHash) + if err != nil { + return nil, err + } + + virtualUTXOSetIterator, err := csm.consensusStateStore.VirtualUTXOSetIterator(csm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + return utxo.IteratorWithDiff(virtualUTXOSetIterator, blockDiff) +} diff --git a/domain/consensus/processes/consensusstatemanager/calculate_past_utxo_test.go b/domain/consensus/processes/consensusstatemanager/calculate_past_utxo_test.go new file mode 100644 index 0000000..97fb275 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/calculate_past_utxo_test.go @@ -0,0 +1,162 @@ +package consensusstatemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/multiset" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +func TestUTXOCommitment(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestUTXOCommitment") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Build the following DAG: + // G <- A <- B <- C <- E + // <- D <- + // Where block D has a non-coinbase transaction + genesisHash := consensusConfig.GenesisHash + + // Block A: + blockAHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{genesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block A: %+v", err) + } + checkBlockUTXOCommitment(t, consensus, blockAHash, "A") + // Block B: + blockBHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block B: %+v", err) + } + blockB, _, err := consensus.GetBlock(blockBHash) + if err != nil { + t.Fatalf("Error getting block B: %+v", err) + } + checkBlockUTXOCommitment(t, consensus, blockBHash, "B") + // Block C: + blockCHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block C: %+v", err) + } + checkBlockUTXOCommitment(t, consensus, blockCHash, "C") + // Block D: + blockDTransaction, err := testutils.CreateTransaction( + blockB.Transactions[transactionhelper.CoinbaseTransactionIndex], 1) + if err != nil { + t.Fatalf("Error creating transaction: %+v", err) + } + blockDHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, + []*externalapi.DomainTransaction{blockDTransaction}) + if err != nil { + t.Fatalf("Error creating block D: %+v", err) + } + checkBlockUTXOCommitment(t, consensus, blockDHash, "D") + // Block E: + blockEHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{blockCHash, blockDHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block E: %+v", err) + } + checkBlockUTXOCommitment(t, consensus, blockEHash, "E") + }) +} + +func checkBlockUTXOCommitment(t *testing.T, consensus testapi.TestConsensus, blockHash *externalapi.DomainHash, blockName string) { + block, _, err := consensus.GetBlock(blockHash) + if err != nil { + t.Fatalf("Error getting block %s: %+v", blockName, err) + } + + // Get the past UTXO set of block + csm := consensus.ConsensusStateManager() + utxoSetIterator, err := csm.RestorePastUTXOSetIterator(model.NewStagingArea(), blockHash) + if err != nil { + t.Fatalf("Error restoring past UTXO of block %s: %+v", blockName, err) + } + defer utxoSetIterator.Close() + + // Build a Multiset + ms := multiset.New() + for ok := utxoSetIterator.First(); ok; ok = utxoSetIterator.Next() { + outpoint, entry, err := utxoSetIterator.Get() + if err != nil { + t.Fatalf("Error getting from UTXOSet iterator: %+v", err) + } + err = consensus.ConsensusStateManager().AddUTXOToMultiset(ms, entry, outpoint) + if err != nil { + t.Fatalf("Error adding utxo to multiset: %+v", err) + } + } + + // Turn the multiset into a UTXO commitment + utxoCommitment := ms.Hash() + + // Make sure that the two commitments are equal + if !utxoCommitment.Equal(block.Header.UTXOCommitment()) { + t.Fatalf("TestUTXOCommitment: calculated UTXO commitment for block %s and "+ + "actual UTXO commitment don't match. Want: %s, got: %s", blockName, + utxoCommitment, block.Header.UTXOCommitment()) + } +} + +func TestPastUTXOMultiset(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + factory := consensus.NewFactory() + + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestUTXOCommitment") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Build a short chain + currentHash := consensusConfig.GenesisHash + for i := 0; i < 3; i++ { + currentHash, _, err = consensus.AddBlock([]*externalapi.DomainHash{currentHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block A: %+v", err) + } + } + + // Save the current tip's hash to be used lated + testedBlockHash := currentHash + + // Take testedBlock's multiset and hash + firstMultiset, err := consensus.MultisetStore().Get(consensus.DatabaseContext(), stagingArea, testedBlockHash) + if err != nil { + return + } + firstMultisetHash := firstMultiset.Hash() + + // Add another block on top of testedBlock + _, _, err = consensus.AddBlock([]*externalapi.DomainHash{testedBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating block A: %+v", err) + } + + // Take testedBlock's multiset and hash again + secondMultiset, err := consensus.MultisetStore().Get(consensus.DatabaseContext(), stagingArea, testedBlockHash) + if err != nil { + return + } + secondMultisetHash := secondMultiset.Hash() + + // Make sure the multiset hasn't changed + if !firstMultisetHash.Equal(secondMultisetHash) { + t.Fatalf("TestPastUTXOMultiSet: selectedParentMultiset appears to have changed!") + } + }) +} diff --git a/domain/consensus/processes/consensusstatemanager/check_finality_violation.go b/domain/consensus/processes/consensusstatemanager/check_finality_violation.go new file mode 100644 index 0000000..97a3e1d --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/check_finality_violation.go @@ -0,0 +1,70 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (csm *consensusStateManager) isViolatingFinality(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, +) (isViolatingFinality bool, shouldSendNotification bool, err error) { + + log.Tracef("isViolatingFinality start for block %s", blockHash) + defer log.Tracef("isViolatingFinality end for block %s", blockHash) + + if blockHash.Equal(csm.genesisHash) { + log.Debugf("Block %s is the genesis block, "+ + "and does not violate finality by definition", blockHash) + return false, false, nil + } + + var finalityPoint *externalapi.DomainHash + virtualFinalityPoint, err := csm.finalityManager.VirtualFinalityPoint(stagingArea) + if err != nil { + return false, false, err + } + log.Debugf("The virtual finality point is: %s", virtualFinalityPoint) + + // There can be a situation where the virtual points close to the pruning point (or even in the past + // of the pruning point before calling validateAndInsertBlock for the pruning point block) and the + // finality point from the virtual point-of-view is in the past of the pruning point. + // In such situation we override the finality point to be the pruning point to avoid situations where + // the virtual selected parent chain don't include the pruning point. + pruningPoint, err := csm.pruningStore.PruningPoint(csm.databaseContext, stagingArea) + if err != nil { + return false, false, err + } + log.Debugf("The pruning point is: %s", pruningPoint) + + isFinalityPointInPastOfPruningPoint, err := csm.dagTopologyManager.IsAncestorOf(stagingArea, virtualFinalityPoint, pruningPoint) + if err != nil { + return false, false, err + } + + if !isFinalityPointInPastOfPruningPoint { + finalityPoint = virtualFinalityPoint + } else { + log.Debugf("The virtual finality point is %s in the past of the pruning point, so finality is validated "+ + "using the pruning point", virtualFinalityPoint) + finalityPoint = pruningPoint + } + + isInSelectedParentChainOfFinalityPoint, err := + csm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, finalityPoint, blockHash) + if err != nil { + return false, false, err + } + + if !isInSelectedParentChainOfFinalityPoint { + if !isFinalityPointInPastOfPruningPoint { + return true, true, nil + } + // On IBD it's pretty normal to get blocks in the anticone of the pruning + // point, so we don't notify on cases when the pruning point is in the future + // of the finality point. + log.Debugf("Block %s violates finality, but spectred is currently doing IBD, so this is normal", blockHash) + return true, false, nil + } + log.Debugf("Block %s does not violate finality", blockHash) + + return false, false, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/consensus_state_manager.go b/domain/consensus/processes/consensusstatemanager/consensus_state_manager.go new file mode 100644 index 0000000..966c095 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/consensus_state_manager.go @@ -0,0 +1,118 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// consensusStateManager manages the node's consensus state +type consensusStateManager struct { + maxBlockParents externalapi.KType + mergeSetSizeLimit uint64 + genesisHash *externalapi.DomainHash + databaseContext model.DBManager + + ghostdagManager model.GHOSTDAGManager + dagTopologyManager model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + pastMedianTimeManager model.PastMedianTimeManager + transactionValidator model.TransactionValidator + coinbaseManager model.CoinbaseManager + mergeDepthManager model.MergeDepthManager + finalityManager model.FinalityManager + difficultyManager model.DifficultyManager + + headersSelectedTipStore model.HeaderSelectedTipStore + blockStatusStore model.BlockStatusStore + ghostdagDataStore model.GHOSTDAGDataStore + consensusStateStore model.ConsensusStateStore + multisetStore model.MultisetStore + blockStore model.BlockStore + utxoDiffStore model.UTXODiffStore + blockRelationStore model.BlockRelationStore + acceptanceDataStore model.AcceptanceDataStore + blockHeaderStore model.BlockHeaderStore + pruningStore model.PruningStore + daaBlocksStore model.DAABlocksStore + + stores []model.Store +} + +// New instantiates a new ConsensusStateManager +func New( + databaseContext model.DBManager, + maxBlockParents externalapi.KType, + mergeSetSizeLimit uint64, + genesisHash *externalapi.DomainHash, + + ghostdagManager model.GHOSTDAGManager, + dagTopologyManager model.DAGTopologyManager, + dagTraversalManager model.DAGTraversalManager, + pastMedianTimeManager model.PastMedianTimeManager, + transactionValidator model.TransactionValidator, + coinbaseManager model.CoinbaseManager, + mergeDepthManager model.MergeDepthManager, + finalityManager model.FinalityManager, + difficultyManager model.DifficultyManager, + + blockStatusStore model.BlockStatusStore, + ghostdagDataStore model.GHOSTDAGDataStore, + consensusStateStore model.ConsensusStateStore, + multisetStore model.MultisetStore, + blockStore model.BlockStore, + utxoDiffStore model.UTXODiffStore, + blockRelationStore model.BlockRelationStore, + acceptanceDataStore model.AcceptanceDataStore, + blockHeaderStore model.BlockHeaderStore, + headersSelectedTipStore model.HeaderSelectedTipStore, + pruningStore model.PruningStore, + daaBlocksStore model.DAABlocksStore) (model.ConsensusStateManager, error) { + + csm := &consensusStateManager{ + maxBlockParents: maxBlockParents, + mergeSetSizeLimit: mergeSetSizeLimit, + genesisHash: genesisHash, + + databaseContext: databaseContext, + + ghostdagManager: ghostdagManager, + dagTopologyManager: dagTopologyManager, + dagTraversalManager: dagTraversalManager, + pastMedianTimeManager: pastMedianTimeManager, + transactionValidator: transactionValidator, + coinbaseManager: coinbaseManager, + mergeDepthManager: mergeDepthManager, + finalityManager: finalityManager, + difficultyManager: difficultyManager, + + multisetStore: multisetStore, + blockStore: blockStore, + blockStatusStore: blockStatusStore, + ghostdagDataStore: ghostdagDataStore, + consensusStateStore: consensusStateStore, + utxoDiffStore: utxoDiffStore, + blockRelationStore: blockRelationStore, + acceptanceDataStore: acceptanceDataStore, + blockHeaderStore: blockHeaderStore, + headersSelectedTipStore: headersSelectedTipStore, + pruningStore: pruningStore, + daaBlocksStore: daaBlocksStore, + + stores: []model.Store{ + consensusStateStore, + acceptanceDataStore, + blockStore, + blockStatusStore, + blockRelationStore, + multisetStore, + ghostdagDataStore, + consensusStateStore, + utxoDiffStore, + blockHeaderStore, + headersSelectedTipStore, + pruningStore, + }, + } + + return csm, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes.go b/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes.go new file mode 100644 index 0000000..8a16709 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes.go @@ -0,0 +1,21 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (csm *consensusStateManager) GetVirtualSelectedParentChainFromBlock(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) { + + // Calculate chain changes between the given blockHash and the + // virtual's selected parent. Note that we explicitly don't + // do the calculation against the virtual itself so that we + // won't later need to remove it from the result. + virtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + virtualSelectedParent := virtualGHOSTDAGData.SelectedParent() + + return csm.dagTraversalManager.CalculateChainPath(stagingArea, blockHash, virtualSelectedParent) +} diff --git a/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes_test.go b/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes_test.go new file mode 100644 index 0000000..9241a3e --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/find_selected_parent_chain_changes_test.go @@ -0,0 +1,109 @@ +package consensusstatemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestCalculateChainPath(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCalculateChainPath") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Add block A over the genesis + blockAHash, blockAVirtualChangeSet, err := consensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block A: %+v", err) + } + blockASelectedParentChainChanges := blockAVirtualChangeSet.VirtualSelectedParentChainChanges + + // Make sure that the removed slice is empty + if len(blockASelectedParentChainChanges.Removed) > 0 { + t.Fatalf("The `removed` slice is not empty after inserting block A") + } + + // Make sure that the added slice contains only blockAHash + if len(blockASelectedParentChainChanges.Added) != 1 { + t.Fatalf("The `added` slice contains an unexpected amount of items after inserting block A. "+ + "Want: %d, got: %d", 1, len(blockASelectedParentChainChanges.Added)) + } + if !blockASelectedParentChainChanges.Added[0].Equal(blockAHash) { + t.Fatalf("The `added` slice contains an unexpected hash. Want: %s, got: %s", + blockAHash, blockASelectedParentChainChanges.Added[0]) + } + + // Add block B over the genesis + blockBHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block B: %+v", err) + } + + // Figure out which among blocks A and B is NOT the virtual selected parent + virtualGHOSTDAGData, err := consensus.GHOSTDAGDataStore().Get(consensus.DatabaseContext(), model.NewStagingArea(), model.VirtualBlockHash, false) + if err != nil { + t.Fatalf("Error getting virtual GHOSTDAG data: %+v", err) + } + virtualSelectedParent := virtualGHOSTDAGData.SelectedParent() + notVirtualSelectedParent := blockAHash + if virtualSelectedParent.Equal(blockAHash) { + notVirtualSelectedParent = blockBHash + } + + // Add block C over the block that isn't the current virtual's selected parent + // We expect this to cause a reorg + blockCHash, blockCVirtualChangeSet, err := consensus.AddBlock([]*externalapi.DomainHash{notVirtualSelectedParent}, nil, nil) + if err != nil { + t.Fatalf("Error adding block C: %+v", err) + } + blockCSelectedParentChainChanges := blockCVirtualChangeSet.VirtualSelectedParentChainChanges + + // Make sure that the removed slice contains only the block that was previously + // the selected parent + if len(blockCSelectedParentChainChanges.Removed) != 1 { + t.Fatalf("The `removed` slice contains an unexpected amount of items after inserting block C. "+ + "Want: %d, got: %d", 1, len(blockCSelectedParentChainChanges.Removed)) + } + if !blockCSelectedParentChainChanges.Removed[0].Equal(virtualSelectedParent) { + t.Fatalf("The `removed` slice contains an unexpected hash. "+ + "Want: %s, got: %s", virtualSelectedParent, blockCSelectedParentChainChanges.Removed[0]) + } + + // Make sure that the added slice contains the block that was NOT previously + // the selected parent and blockCHash, in that order + if len(blockCSelectedParentChainChanges.Added) != 2 { + t.Fatalf("The `added` slice contains an unexpected amount of items after inserting block C. "+ + "Want: %d, got: %d", 2, len(blockCSelectedParentChainChanges.Added)) + } + if !blockCSelectedParentChainChanges.Added[0].Equal(notVirtualSelectedParent) { + t.Fatalf("The `added` slice contains an unexpected hash as the first item. "+ + "Want: %s, got: %s", notVirtualSelectedParent, blockCSelectedParentChainChanges.Added[0]) + } + if !blockCSelectedParentChainChanges.Added[1].Equal(blockCHash) { + t.Fatalf("The `added` slice contains an unexpected hash as the second item. "+ + "Want: %s, got: %s", blockCHash, blockCSelectedParentChainChanges.Added[1]) + } + + // Add block D over the genesis + _, blockDVirtualChangeSet, err := consensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block D: %+v", err) + } + blockDSelectedParentChainChanges := blockDVirtualChangeSet.VirtualSelectedParentChainChanges + + // Make sure that both the added and the removed slices are empty + if len(blockDSelectedParentChainChanges.Added) > 0 { + t.Fatalf("The `added` slice is not empty after inserting block D") + } + if len(blockDSelectedParentChainChanges.Removed) > 0 { + t.Fatalf("The `removed` slice is not empty after inserting block D") + } + }) +} diff --git a/domain/consensus/processes/consensusstatemanager/import_pruning_utxo_set.go b/domain/consensus/processes/consensusstatemanager/import_pruning_utxo_set.go new file mode 100644 index 0000000..d2327ef --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/import_pruning_utxo_set.go @@ -0,0 +1,233 @@ +package consensusstatemanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +func (csm *consensusStateManager) ImportPruningPointUTXOSet(stagingArea *model.StagingArea, newPruningPoint *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ImportPruningPointUTXOSet") + defer onEnd() + + err := csm.importPruningPointUTXOSet(stagingArea, newPruningPoint) + if err != nil { + return err + } + + err = csm.applyImportedPruningPointUTXOSet(stagingArea, newPruningPoint) + if err != nil { + return err + } + + return nil +} + +func (csm *consensusStateManager) importPruningPointUTXOSet(stagingArea *model.StagingArea, newPruningPoint *externalapi.DomainHash) error { + log.Tracef("importPruningPointUTXOSet start") + defer log.Tracef("importPruningPointUTXOSet end") + + // TODO: We should validate the imported pruning point doesn't violate finality as part of the headers proof. + + importedPruningPointMultiset, err := csm.pruningStore.ImportedPruningPointMultiset(csm.databaseContext) + if err != nil { + return err + } + + newPruningPointHeader, err := csm.blockHeaderStore.BlockHeader(csm.databaseContext, stagingArea, newPruningPoint) + if err != nil { + return err + } + log.Debugf("The UTXO commitment of the pruning point: %s", + newPruningPointHeader.UTXOCommitment()) + + if !newPruningPointHeader.UTXOCommitment().Equal(importedPruningPointMultiset.Hash()) { + return errors.Wrapf(ruleerrors.ErrBadPruningPointUTXOSet, "the expected multiset hash of the pruning "+ + "point UTXO set is %s but got %s", newPruningPointHeader.UTXOCommitment(), *importedPruningPointMultiset.Hash()) + } + log.Debugf("The new pruning point UTXO commitment validation passed") + + log.Debugf("Setting the pruning point as the only virtual parent") + err = csm.dagTopologyManager.SetParents(stagingArea, model.VirtualBlockHash, []*externalapi.DomainHash{newPruningPoint}) + if err != nil { + return err + } + + log.Debugf("Calculating GHOSTDAG for the new virtual") + err = csm.ghostdagManager.GHOSTDAG(stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + log.Debugf("Updating the new pruning point to be the new virtual diff parent with an empty diff") + csm.stageDiff(stagingArea, newPruningPoint, utxo.NewUTXODiff(), nil) + + log.Debugf("Populating the pruning point with UTXO entries") + importedPruningPointUTXOIterator, err := csm.pruningStore.ImportedPruningPointUTXOIterator(csm.databaseContext) + if err != nil { + return err + } + defer importedPruningPointUTXOIterator.Close() + + newPruningPointBlock, err := csm.blockStore.Block(csm.databaseContext, stagingArea, newPruningPoint) + if err != nil { + return err + } + + err = csm.populateTransactionWithUTXOEntriesFromUTXOSet(newPruningPointBlock, importedPruningPointUTXOIterator) + if err != nil { + return err + } + + // Before we manually mark the new pruning point as valid, we validate that all of its transactions are valid + // against the provided UTXO set. + log.Debugf("Validating that the pruning point is UTXO valid") + newPruningPointSelectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(stagingArea, newPruningPoint) + if err != nil { + return err + } + log.Tracef("The past median time of pruning block %s is %d", + newPruningPoint, newPruningPointSelectedParentMedianTime) + + for i, transaction := range newPruningPointBlock.Transactions { + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("Validating transaction %s in pruning block %s against "+ + "the pruning point's past UTXO", transactionID, newPruningPoint) + if i == transactionhelper.CoinbaseTransactionIndex { + log.Tracef("Skipping transaction %s because it is the coinbase", transactionID) + continue + } + log.Tracef("Validating transaction %s and populating it with mass and fee", transactionID) + err = csm.transactionValidator.ValidateTransactionInContextAndPopulateFee( + stagingArea, transaction, newPruningPoint) + if err != nil { + return err + } + log.Tracef("Validation against the pruning point's past UTXO "+ + "passed for transaction %s", transactionID) + } + + log.Debugf("Staging the new pruning point as %s", externalapi.StatusUTXOValid) + csm.blockStatusStore.Stage(stagingArea, newPruningPoint, externalapi.StatusUTXOValid) + + log.Debugf("Staging the new pruning point multiset") + csm.multisetStore.Stage(stagingArea, newPruningPoint, importedPruningPointMultiset) + + _, err = csm.difficultyManager.StageDAADataAndReturnRequiredDifficulty(stagingArea, model.VirtualBlockHash, false) + if err != nil { + return err + } + + return nil +} + +func (csm *consensusStateManager) ImportPruningPoints(stagingArea *model.StagingArea, pruningPoints []externalapi.BlockHeader) error { + for i, header := range pruningPoints { + blockHash := consensushashing.HeaderHash(header) + err := csm.pruningStore.StagePruningPointByIndex(csm.databaseContext, stagingArea, blockHash, uint64(i)) + if err != nil { + return err + } + + csm.blockHeaderStore.Stage(stagingArea, blockHash, header) + } + + lastPruningPointHeader := pruningPoints[len(pruningPoints)-1] + csm.pruningStore.StagePruningPointCandidate(stagingArea, consensushashing.HeaderHash(lastPruningPointHeader)) + + return nil +} + +func (csm *consensusStateManager) applyImportedPruningPointUTXOSet(stagingArea *model.StagingArea, newPruningPoint *externalapi.DomainHash) error { + dbTx, err := csm.databaseContext.Begin() + if err != nil { + return err + } + + err = stagingArea.Commit(dbTx) + if err != nil { + return err + } + + log.Debugf("Starting to import virtual UTXO set and pruning point utxo set") + err = csm.consensusStateStore.StartImportingPruningPointUTXOSet(dbTx) + if err != nil { + return err + } + + log.Debugf("Committing all staged data for imported pruning point") + err = dbTx.Commit() + if err != nil { + return err + } + + return csm.importVirtualUTXOSetAndPruningPointUTXOSet(newPruningPoint) +} + +func (csm *consensusStateManager) importVirtualUTXOSetAndPruningPointUTXOSet(pruningPoint *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "importVirtualUTXOSetAndPruningPointUTXOSet") + defer onEnd() + + log.Debugf("Getting an iterator into the imported pruning point utxo set") + pruningPointUTXOSetIterator, err := csm.pruningStore.ImportedPruningPointUTXOIterator(csm.databaseContext) + if err != nil { + return err + } + defer pruningPointUTXOSetIterator.Close() + + log.Debugf("Importing the virtual UTXO set") + err = csm.consensusStateStore.ImportPruningPointUTXOSetIntoVirtualUTXOSet(csm.databaseContext, pruningPointUTXOSetIterator) + if err != nil { + return err + } + + log.Debugf("Importing the new pruning point UTXO set") + err = csm.pruningStore.CommitImportedPruningPointUTXOSet(csm.databaseContext) + if err != nil { + return err + } + + // Run update virtual to create acceptance data and any other missing data. + updateVirtualStagingArea := model.NewStagingArea() + _, _, err = csm.updateVirtual(updateVirtualStagingArea, pruningPoint, []*externalapi.DomainHash{pruningPoint}) + if err != nil { + return err + } + + err = staging.CommitAllChanges(csm.databaseContext, updateVirtualStagingArea) + if err != nil { + return err + } + + log.Debugf("Finishing to import virtual UTXO set and pruning point UTXO set") + return csm.consensusStateStore.FinishImportingPruningPointUTXOSet(csm.databaseContext) +} + +func (csm *consensusStateManager) RecoverUTXOIfRequired() error { + hadStartedImportingPruningPointUTXOSet, err := csm.consensusStateStore.HadStartedImportingPruningPointUTXOSet(csm.databaseContext) + if err != nil { + return err + } + if !hadStartedImportingPruningPointUTXOSet { + return nil + } + + log.Warnf("Unimported pruning point UTXO set detected. Attempting to recover...") + pruningPoint, err := csm.pruningStore.PruningPoint(csm.databaseContext, model.NewStagingArea()) + if err != nil { + return err + } + + err = csm.importVirtualUTXOSetAndPruningPointUTXOSet(pruningPoint) + if err != nil { + return err + } + log.Warnf("Unimported UTXO set successfully recovered") + return nil +} diff --git a/domain/consensus/processes/consensusstatemanager/log.go b/domain/consensus/processes/consensusstatemanager/log.go new file mode 100644 index 0000000..6c87474 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/log.go @@ -0,0 +1,7 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BDAG") diff --git a/domain/consensus/processes/consensusstatemanager/multisets.go b/domain/consensus/processes/consensusstatemanager/multisets.go new file mode 100644 index 0000000..b363819 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/multisets.go @@ -0,0 +1,109 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" +) + +func (csm *consensusStateManager) calculateMultiset(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + acceptanceData externalapi.AcceptanceData, + blockGHOSTDAGData *externalapi.BlockGHOSTDAGData, + daaScore uint64) (model.Multiset, error) { + + log.Tracef("calculateMultiset start for block with selected parent %s", blockGHOSTDAGData.SelectedParent()) + defer log.Tracef("calculateMultiset end for block with selected parent %s", blockGHOSTDAGData.SelectedParent()) + + if blockHash.Equal(csm.genesisHash) { + log.Debugf("Selected parent is nil, which could only happen for the genesis. " + + "The genesis has a predefined multiset") + return csm.multisetStore.Get(csm.databaseContext, stagingArea, blockHash) + } + + ms, err := csm.multisetStore.Get(csm.databaseContext, stagingArea, blockGHOSTDAGData.SelectedParent()) + if err != nil { + return nil, err + } + log.Debugf("The multiset for the selected parent %s is: %s", blockGHOSTDAGData.SelectedParent(), ms.Hash()) + + for _, blockAcceptanceData := range acceptanceData { + for i, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData { + transaction := transactionAcceptanceData.Transaction + transactionID := consensushashing.TransactionID(transaction) + if !transactionAcceptanceData.IsAccepted { + log.Tracef("Skipping transaction %s because it was not accepted", transactionID) + continue + } + + isCoinbase := i == 0 + log.Tracef("Is transaction %s a coinbase transaction: %t", transactionID, isCoinbase) + + err := addTransactionToMultiset(ms, transaction, daaScore, isCoinbase) + if err != nil { + return nil, err + } + log.Tracef("Added transaction %s to the multiset", transactionID) + } + } + + return ms, nil +} + +func addTransactionToMultiset(multiset model.Multiset, transaction *externalapi.DomainTransaction, + blockDAAScore uint64, isCoinbase bool) error { + + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("addTransactionToMultiset start for transaction %s", transactionID) + defer log.Tracef("addTransactionToMultiset end for transaction %s", transactionID) + + for _, input := range transaction.Inputs { + log.Tracef("Removing input %s at index %d from the multiset", + input.PreviousOutpoint.TransactionID, input.PreviousOutpoint.Index) + err := removeUTXOFromMultiset(multiset, input.UTXOEntry, &input.PreviousOutpoint) + if err != nil { + return err + } + } + + for i, output := range transaction.Outputs { + outpoint := &externalapi.DomainOutpoint{ + TransactionID: *transactionID, + Index: uint32(i), + } + utxoEntry := utxo.NewUTXOEntry(output.Value, output.ScriptPublicKey, isCoinbase, blockDAAScore) + + log.Tracef("Adding input %s at index %d from the multiset", transactionID, i) + err := addUTXOToMultiset(multiset, utxoEntry, outpoint) + if err != nil { + return err + } + } + + return nil +} + +func addUTXOToMultiset(multiset model.Multiset, entry externalapi.UTXOEntry, + outpoint *externalapi.DomainOutpoint) error { + + serializedUTXO, err := utxo.SerializeUTXO(entry, outpoint) + if err != nil { + return err + } + multiset.Add(serializedUTXO) + + return nil +} + +func removeUTXOFromMultiset(multiset model.Multiset, entry externalapi.UTXOEntry, + outpoint *externalapi.DomainOutpoint) error { + + serializedUTXO, err := utxo.SerializeUTXO(entry, outpoint) + if err != nil { + return err + } + multiset.Remove(serializedUTXO) + + return nil +} diff --git a/domain/consensus/processes/consensusstatemanager/pick_virtual_parents.go b/domain/consensus/processes/consensusstatemanager/pick_virtual_parents.go new file mode 100644 index 0000000..bc06cdf --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/pick_virtual_parents.go @@ -0,0 +1,355 @@ +package consensusstatemanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/math" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" +) + +func (csm *consensusStateManager) pickVirtualParents(stagingArea *model.StagingArea, tips []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "pickVirtualParents") + defer onEnd() + + log.Debugf("pickVirtualParents start for tips len: %d", len(tips)) + + log.Debugf("Pushing all tips into a DownHeap") + candidatesHeap := csm.dagTraversalManager.NewDownHeap(stagingArea) + for _, tip := range tips { + err := candidatesHeap.Push(tip) + if err != nil { + return nil, err + } + } + + // If the first candidate has been disqualified from the chain or violates finality - + // it cannot be virtual's parent, since it will make it virtual's selectedParent - disqualifying virtual itself. + // Therefore, in such a case we remove it from the list of virtual parent candidates, and replace with + // its parents that have no disqualified children + virtualSelectedParent, err := csm.selectVirtualSelectedParent(stagingArea, candidatesHeap) + if err != nil { + return nil, err + } + log.Debugf("The selected parent of the virtual is: %s", virtualSelectedParent) + + // Limit to maxBlockParents*3 candidates, that way we don't go over thousands of tips when the network isn't healthy. + // There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation saying we probably + // don't want to consider and calculate 3 times the amount of candidates for the set of parents. + maxCandidates := int(csm.maxBlockParents) * 3 + candidateAllocationSize := math.MinInt(maxCandidates, candidatesHeap.Len()) + candidates := make([]*externalapi.DomainHash, 0, candidateAllocationSize) + for len(candidates) < maxCandidates && candidatesHeap.Len() > 0 { + candidates = append(candidates, candidatesHeap.Pop()) + } + + // prioritize half the blocks with highest blueWork and half with lowest, so the network will merge splits faster. + if len(candidates) >= int(csm.maxBlockParents) { + // We already have the selectedParent, so we're left with csm.maxBlockParents-1. + maxParents := csm.maxBlockParents - 1 + end := len(candidates) - 1 + for i := (maxParents) / 2; i < maxParents; i++ { + candidates[i], candidates[end] = candidates[end], candidates[i] + end-- + } + } + + selectedVirtualParents := []*externalapi.DomainHash{virtualSelectedParent} + mergeSetSize := uint64(1) // starts counting from 1 because selectedParent is already in the mergeSet + + // First condition implies that no point in searching since limit was already reached + for mergeSetSize < csm.mergeSetSizeLimit && len(candidates) > 0 && uint64(len(selectedVirtualParents)) < uint64(csm.maxBlockParents) { + candidate := candidates[0] + candidates = candidates[1:] + + log.Debugf("Attempting to add %s to the virtual parents", candidate) + log.Debugf("The current merge set size is %d", mergeSetSize) + + canBeParent, newCandidate, mergeSetIncrease, err := csm.mergeSetIncrease( + stagingArea, candidate, selectedVirtualParents, mergeSetSize) + if err != nil { + return nil, err + } + if canBeParent { + mergeSetSize += mergeSetIncrease + selectedVirtualParents = append(selectedVirtualParents, candidate) + log.Tracef("Added block %s to the virtual parents set", candidate) + continue + } + // If we already have a candidate in the past of newCandidate then skip. + isInFutureOfCandidates, err := csm.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidates, newCandidate) + if err != nil { + return nil, err + } + if isInFutureOfCandidates { + continue + } + // Remove all candidates in the future of newCandidate + candidates, err = csm.removeHashesInFutureOf(stagingArea, candidates, newCandidate) + if err != nil { + return nil, err + } + candidates = append(candidates, newCandidate) + log.Debugf("Block %s increases merge set too much, instead adding its ancestor %s", candidate, newCandidate) + } + + boundedMergeBreakingParents, err := csm.boundedMergeBreakingParents(stagingArea, selectedVirtualParents) + if err != nil { + return nil, err + } + log.Tracef("The following parents are omitted for "+ + "breaking the bounded merge set: %s", boundedMergeBreakingParents) + + // Remove all boundedMergeBreakingParents from selectedVirtualParents + for _, breakingParent := range boundedMergeBreakingParents { + for i, parent := range selectedVirtualParents { + if parent.Equal(breakingParent) { + selectedVirtualParents[i] = selectedVirtualParents[len(selectedVirtualParents)-1] + selectedVirtualParents = selectedVirtualParents[:len(selectedVirtualParents)-1] + break + } + } + } + log.Debugf("The virtual parents resolved to be: %s", selectedVirtualParents) + return selectedVirtualParents, nil +} + +func (csm *consensusStateManager) removeHashesInFutureOf(stagingArea *model.StagingArea, hashes []*externalapi.DomainHash, + ancestor *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + // Source: https://github.com/golang/go/wiki/SliceTricks#filter-in-place + i := 0 + for _, hash := range hashes { + isInFutureOfAncestor, err := csm.dagTopologyManager.IsAncestorOf(stagingArea, ancestor, hash) + if err != nil { + return nil, err + } + if !isInFutureOfAncestor { + hashes[i] = hash + i++ + } + } + return hashes[:i], nil +} + +func (csm *consensusStateManager) selectVirtualSelectedParent(stagingArea *model.StagingArea, + candidatesHeap model.BlockHeap) (*externalapi.DomainHash, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "selectVirtualSelectedParent") + defer onEnd() + + disqualifiedCandidates := hashset.New() + + for { + if candidatesHeap.Len() == 0 { + return nil, errors.New("virtual has no valid parent candidates") + } + selectedParentCandidate := candidatesHeap.Pop() + + log.Debugf("Checking block %s for selected parent eligibility", selectedParentCandidate) + selectedParentCandidateStatus, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, selectedParentCandidate) + if err != nil { + return nil, err + } + if selectedParentCandidateStatus == externalapi.StatusUTXOValid { + log.Debugf("Block %s is valid. Returning it as the selected parent", selectedParentCandidate) + return selectedParentCandidate, nil + } + + log.Debugf("Block %s is not valid. Adding it to the disqualified set", selectedParentCandidate) + disqualifiedCandidates.Add(selectedParentCandidate) + + candidateParents, err := csm.dagTopologyManager.Parents(stagingArea, selectedParentCandidate) + if err != nil { + return nil, err + } + log.Debugf("The parents of block %s are: %s", selectedParentCandidate, candidateParents) + for _, parent := range candidateParents { + allParentChildren, err := csm.dagTopologyManager.Children(stagingArea, parent) + if err != nil { + return nil, err + } + log.Debugf("The children of block %s are: %s", parent, allParentChildren) + + // remove virtual and any headers-only blocks from parentChildren if such are there + nonHeadersOnlyParentChildren := make([]*externalapi.DomainHash, 0, len(allParentChildren)) + for _, parentChild := range allParentChildren { + if parentChild.Equal(model.VirtualBlockHash) { + continue + } + + parentChildStatus, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, parentChild) + if err != nil { + return nil, err + } + if parentChildStatus == externalapi.StatusHeaderOnly { + continue + } + nonHeadersOnlyParentChildren = append(nonHeadersOnlyParentChildren, parentChild) + } + log.Debugf("The non-virtual, non-headers-only children of block %s are: %s", parent, nonHeadersOnlyParentChildren) + + if disqualifiedCandidates.ContainsAllInSlice(nonHeadersOnlyParentChildren) { + log.Debugf("The disqualified set contains all the "+ + "children of %s. Adding it to the candidate heap", nonHeadersOnlyParentChildren) + err := candidatesHeap.Push(parent) + if err != nil { + return nil, err + } + } + } + } +} + +// mergeSetIncrease returns different things depending on the result: +// If the candidate can be a virtual parent then canBeParent=true and mergeSetIncrease=The increase in merge set size +// If the candidate can't be a virtual parent, then canBeParent=false and newCandidate is a new proposed candidate in the past of candidate. +func (csm *consensusStateManager) mergeSetIncrease(stagingArea *model.StagingArea, candidate *externalapi.DomainHash, + selectedVirtualParents []*externalapi.DomainHash, mergeSetSize uint64) ( + canBeParent bool, newCandidate *externalapi.DomainHash, mergeSetIncrease uint64, err error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "mergeSetIncrease") + defer onEnd() + + visited := hashset.New() + // Start with the candidate's parents in the queue as we already know the candidate isn't an ancestor of the selectedVirtualParents. + parents, err := csm.dagTopologyManager.Parents(stagingArea, candidate) + if err != nil { + return false, nil, 0, err + } + for _, parent := range parents { + visited.Add(parent) + } + queue := parents + mergeSetIncrease = uint64(1) // starts with 1 for the candidate itself + + var current *externalapi.DomainHash + for len(queue) > 0 { + current, queue = queue[0], queue[1:] + log.Tracef("Attempting to increment the merge set size increase for block %s", current) + + isInPastOfSelectedVirtualParents, err := csm.dagTopologyManager.IsAncestorOfAny(stagingArea, current, selectedVirtualParents) + if err != nil { + return false, nil, 0, err + } + if isInPastOfSelectedVirtualParents { + log.Tracef("Skipping block %s because it's in the past of one (or more) of the selected virtual parents", current) + continue + } + + log.Tracef("Incrementing the merge set size increase") + mergeSetIncrease++ + + if (mergeSetSize + mergeSetIncrease) > csm.mergeSetSizeLimit { + log.Debugf("The merge set would increase by more than the limit with block %s", candidate) + return false, current, mergeSetIncrease, nil + } + + parents, err := csm.dagTopologyManager.Parents(stagingArea, current) + if err != nil { + return false, nil, 0, err + } + for _, parent := range parents { + if !visited.Contains(parent) { + visited.Add(parent) + queue = append(queue, parent) + } + } + } + log.Debugf("The resolved merge set size increase is: %d", mergeSetIncrease) + + return true, nil, mergeSetIncrease, nil +} + +func (csm *consensusStateManager) boundedMergeBreakingParents(stagingArea *model.StagingArea, + parents []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "boundedMergeBreakingParents") + defer onEnd() + + log.Tracef("boundedMergeBreakingParents start for parents: %s", parents) + + log.Debug("Temporarily setting virtual to all parents, so that we can run ghostdag on it") + err := csm.dagTopologyManager.SetParents(stagingArea, model.VirtualBlockHash, parents) + if err != nil { + return nil, err + } + + err = csm.ghostdagManager.GHOSTDAG(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + virtualMergeDepthRoot, err := csm.mergeDepthManager.VirtualMergeDepthRoot(stagingArea) + if err != nil { + return nil, err + } + log.Debugf("The merge depth root of virtual is: %s", virtualMergeDepthRoot) + + potentiallyKosherizingBlocks, err := + csm.mergeDepthManager.NonBoundedMergeDepthViolatingBlues(stagingArea, model.VirtualBlockHash, virtualMergeDepthRoot) + if err != nil { + return nil, err + } + log.Debugf("The potentially kosherizing blocks are: %s", potentiallyKosherizingBlocks) + + var badReds []*externalapi.DomainHash + + virtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + for _, redBlock := range virtualGHOSTDAGData.MergeSetReds() { + log.Debugf("Check whether red block %s is kosherized", redBlock) + isMergeDepthRootInPast, err := csm.dagTopologyManager.IsAncestorOf(stagingArea, virtualMergeDepthRoot, redBlock) + if err != nil { + return nil, err + } + if isMergeDepthRootInPast { + log.Debugf("Skipping red block %s because it has the virtual's"+ + " merge depth root in its past", redBlock) + continue + } + + isKosherized := false + for _, potentiallyKosherizingBlock := range potentiallyKosherizingBlocks { + isKosherized, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, redBlock, potentiallyKosherizingBlock) + if err != nil { + return nil, err + } + log.Debugf("Red block %s is an ancestor of potentially kosherizing "+ + "block %s, therefore the red block is kosher", redBlock, potentiallyKosherizingBlock) + if isKosherized { + break + } + } + if !isKosherized { + log.Debugf("Red block %s is not kosher. Adding it to the bad reds set", redBlock) + badReds = append(badReds, redBlock) + } + } + + var boundedMergeBreakingParents []*externalapi.DomainHash + for _, parent := range parents { + log.Debugf("Checking whether parent %s breaks the bounded merge set", parent) + isBadRedInPast := false + for _, badRedBlock := range badReds { + isBadRedInPast, err = csm.dagTopologyManager.IsAncestorOf(stagingArea, badRedBlock, parent) + if err != nil { + return nil, err + } + if isBadRedInPast { + log.Debugf("Parent %s is a descendant of bad red %s", parent, badRedBlock) + break + } + } + if isBadRedInPast { + log.Debugf("Adding parent %s to the bounded merge breaking parents set", parent) + boundedMergeBreakingParents = append(boundedMergeBreakingParents, parent) + } + } + + return boundedMergeBreakingParents, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/populate_tx_with_utxo_entries.go b/domain/consensus/processes/consensusstatemanager/populate_tx_with_utxo_entries.go new file mode 100644 index 0000000..94aff8e --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/populate_tx_with_utxo_entries.go @@ -0,0 +1,126 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// PopulateTransactionWithUTXOEntries populates the transaction UTXO entries with data from the virtual's UTXO set. +func (csm *consensusStateManager) PopulateTransactionWithUTXOEntries( + stagingArea *model.StagingArea, transaction *externalapi.DomainTransaction) error { + return csm.populateTransactionWithUTXOEntriesFromVirtualOrDiff(stagingArea, transaction, nil) +} + +// populateTransactionWithUTXOEntriesFromVirtualOrDiff populates the transaction UTXO entries with data +// from the virtual's UTXO set combined with the provided utxoDiff. +// If utxoDiff == nil UTXO entries are taken from the virtual's UTXO set only +func (csm *consensusStateManager) populateTransactionWithUTXOEntriesFromVirtualOrDiff(stagingArea *model.StagingArea, + transaction *externalapi.DomainTransaction, utxoDiff externalapi.UTXODiff) error { + + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("populateTransactionWithUTXOEntriesFromVirtualOrDiff start for transaction %s", transactionID) + defer log.Tracef("populateTransactionWithUTXOEntriesFromVirtualOrDiff end for transaction %s", transactionID) + + var missingOutpoints []*externalapi.DomainOutpoint + for _, transactionInput := range transaction.Inputs { + // skip all inputs that have a pre-filled utxo entry + if transactionInput.UTXOEntry != nil { + log.Tracef("Skipping outpoint %s:%d because it is already populated", + transactionInput.PreviousOutpoint.TransactionID, transactionInput.PreviousOutpoint.Index) + continue + } + + // check if utxoDiff says anything about the input's outpoint + if utxoDiff != nil { + if utxoEntry, ok := utxoDiff.ToAdd().Get(&transactionInput.PreviousOutpoint); ok { + log.Tracef("Populating outpoint %s:%d from the given utxoDiff", + transactionInput.PreviousOutpoint.TransactionID, transactionInput.PreviousOutpoint.Index) + transactionInput.UTXOEntry = utxoEntry + continue + } + + if utxoDiff.ToRemove().Contains(&transactionInput.PreviousOutpoint) { + log.Tracef("Outpoint %s:%d is missing in the given utxoDiff", + transactionInput.PreviousOutpoint.TransactionID, transactionInput.PreviousOutpoint.Index) + missingOutpoints = append(missingOutpoints, &transactionInput.PreviousOutpoint) + continue + } + } + + // Check for the input's outpoint in virtual's UTXO set. + hasUTXOEntry, err := csm.consensusStateStore.HasUTXOByOutpoint( + csm.databaseContext, stagingArea, &transactionInput.PreviousOutpoint) + if err != nil { + return err + } + if !hasUTXOEntry { + log.Tracef("Outpoint %s:%d is missing in the database", + transactionInput.PreviousOutpoint.TransactionID, transactionInput.PreviousOutpoint.Index) + missingOutpoints = append(missingOutpoints, &transactionInput.PreviousOutpoint) + continue + } + + log.Tracef("Populating outpoint %s:%d from the database", + transactionInput.PreviousOutpoint.TransactionID, transactionInput.PreviousOutpoint.Index) + utxoEntry, err := csm.consensusStateStore.UTXOByOutpoint( + csm.databaseContext, stagingArea, &transactionInput.PreviousOutpoint) + if err != nil { + return err + } + transactionInput.UTXOEntry = utxoEntry + } + + if len(missingOutpoints) > 0 { + return ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + + return nil +} + +func (csm *consensusStateManager) populateTransactionWithUTXOEntriesFromUTXOSet( + pruningPoint *externalapi.DomainBlock, iterator externalapi.ReadOnlyUTXOSetIterator) error { + + // Collect the required outpoints from the block + outpointsForPopulation := make(map[externalapi.DomainOutpoint]interface{}) + for _, transaction := range pruningPoint.Transactions { + for _, input := range transaction.Inputs { + outpointsForPopulation[input.PreviousOutpoint] = struct{}{} + } + } + + // Collect the UTXO entries from the iterator + outpointsToUTXOEntries := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, len(outpointsForPopulation)) + for ok := iterator.First(); ok; ok = iterator.Next() { + outpoint, utxoEntry, err := iterator.Get() + if err != nil { + return err + } + outpointValue := *outpoint + if _, ok := outpointsForPopulation[outpointValue]; ok { + outpointsToUTXOEntries[outpointValue] = utxoEntry + } + if len(outpointsForPopulation) == len(outpointsToUTXOEntries) { + break + } + } + + // Populate the block with the collected UTXO entries + var missingOutpoints []*externalapi.DomainOutpoint + for _, transaction := range pruningPoint.Transactions { + for _, input := range transaction.Inputs { + utxoEntry, ok := outpointsToUTXOEntries[input.PreviousOutpoint] + if !ok { + missingOutpoints = append(missingOutpoints, &input.PreviousOutpoint) + continue + } + input.UTXOEntry = utxoEntry + } + } + + if len(missingOutpoints) > 0 { + return ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + return nil +} diff --git a/domain/consensus/processes/consensusstatemanager/resolve.go b/domain/consensus/processes/consensusstatemanager/resolve.go new file mode 100644 index 0000000..e828b4d --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/resolve.go @@ -0,0 +1,225 @@ +package consensusstatemanager + +import ( + "sort" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +// tipsInDecreasingGHOSTDAGParentSelectionOrder returns the current DAG tips in decreasing parent selection order. +// This means that the first tip in the resulting list would be the GHOSTDAG selected parent, and if removed from the list, +// the second tip would be the selected parent, and so on. +func (csm *consensusStateManager) tipsInDecreasingGHOSTDAGParentSelectionOrder(stagingArea *model.StagingArea) ([]*externalapi.DomainHash, error) { + tips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext) + if err != nil { + return nil, err + } + + var sortErr error + sort.Slice(tips, func(i, j int) bool { + selectedParent, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, tips[i], tips[j]) + if err != nil { + sortErr = err + return false + } + + return selectedParent.Equal(tips[i]) + }) + if sortErr != nil { + return nil, sortErr + } + return tips, nil +} + +func (csm *consensusStateManager) findNextPendingTip(stagingArea *model.StagingArea) (*externalapi.DomainHash, externalapi.BlockStatus, error) { + orderedTips, err := csm.tipsInDecreasingGHOSTDAGParentSelectionOrder(stagingArea) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + for _, tip := range orderedTips { + log.Debugf("Resolving tip %s", tip) + isViolatingFinality, shouldNotify, err := csm.isViolatingFinality(stagingArea, tip) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + + if isViolatingFinality { + if shouldNotify { + //TODO: Send finality conflict notification + log.Warnf("Skipping %s tip resolution because it violates finality", tip) + } + continue + } + + status, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, tip) + if err != nil { + return nil, externalapi.StatusInvalid, err + } + if status == externalapi.StatusUTXOValid || status == externalapi.StatusUTXOPendingVerification { + return tip, status, nil + } + } + + return nil, externalapi.StatusInvalid, nil +} + +// getGHOSTDAGLowerTips returns the set of tips which are lower in GHOSTDAG parent selection order than `pendingTip`. i.e., +// they can be added to virtual parents but `pendingTip` will remain the virtual selected parent +func (csm *consensusStateManager) getGHOSTDAGLowerTips(stagingArea *model.StagingArea, pendingTip *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + tips, err := csm.consensusStateStore.Tips(stagingArea, csm.databaseContext) + if err != nil { + return nil, err + } + + lowerTips := []*externalapi.DomainHash{pendingTip} + for _, tip := range tips { + if tip.Equal(pendingTip) { + continue + } + selectedParent, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, tip, pendingTip) + if err != nil { + return nil, err + } + if selectedParent.Equal(pendingTip) { + lowerTips = append(lowerTips, tip) + } + } + return lowerTips, nil +} + +func (csm *consensusStateManager) ResolveVirtual(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "csm.ResolveVirtual") + defer onEnd() + + // We use a read-only staging area for some read-only actions, to avoid + // confusion with the resolve/updateVirtual staging areas below + readStagingArea := model.NewStagingArea() + + pendingTip, pendingTipStatus, err := csm.findNextPendingTip(readStagingArea) + if err != nil { + return nil, false, err + } + + if pendingTip == nil { + log.Warnf("None of the DAG tips are valid") + return nil, true, nil + } + + previousVirtualSelectedParent, err := csm.virtualSelectedParent(readStagingArea) + if err != nil { + return nil, false, err + } + + if pendingTipStatus == externalapi.StatusUTXOValid && previousVirtualSelectedParent.Equal(pendingTip) { + return nil, true, nil + } + + // Resolve a chunk from the pending chain + resolveStagingArea := model.NewStagingArea() + unverifiedBlocks, err := csm.getUnverifiedChainBlocks(resolveStagingArea, pendingTip) + if err != nil { + return nil, false, err + } + + // Initially set the resolve processing point to the pending tip + processingPoint := pendingTip + + // Too many blocks to verify, so we only process a chunk and return + if maxBlocksToResolve != 0 && uint64(len(unverifiedBlocks)) > maxBlocksToResolve { + processingPointIndex := uint64(len(unverifiedBlocks)) - maxBlocksToResolve + processingPoint = unverifiedBlocks[processingPointIndex] + isNewVirtualSelectedParent, err := csm.isNewSelectedTip(readStagingArea, processingPoint, previousVirtualSelectedParent) + if err != nil { + return nil, false, err + } + + // We must find a processing point which wins previous virtual selected parent + // even if we process more than `maxBlocksToResolve` for that. + // Otherwise, internal UTXO diff logic gets all messed up + for !isNewVirtualSelectedParent { + if processingPointIndex == 0 { + return nil, false, errors.Errorf( + "Expecting the pending tip %s to overcome the previous selected parent %s", pendingTip, previousVirtualSelectedParent) + } + processingPointIndex-- + processingPoint = unverifiedBlocks[processingPointIndex] + isNewVirtualSelectedParent, err = csm.isNewSelectedTip(readStagingArea, processingPoint, previousVirtualSelectedParent) + if err != nil { + return nil, false, err + } + } + log.Debugf("Has more than %d blocks to resolve. Setting the resolve processing point to %s", maxBlocksToResolve, processingPoint) + } + + processingPointStatus, reversalData, err := csm.resolveBlockStatus( + resolveStagingArea, processingPoint, true) + if err != nil { + return nil, false, err + } + + if processingPointStatus == externalapi.StatusUTXOValid { + err = staging.CommitAllChanges(csm.databaseContext, resolveStagingArea) + if err != nil { + return nil, false, err + } + + if reversalData != nil { + err = csm.ReverseUTXODiffs(processingPoint, reversalData) + if err != nil { + return nil, false, err + } + } + } + + isActualTip := processingPoint.Equal(pendingTip) + isCompletelyResolved := isActualTip && processingPointStatus == externalapi.StatusUTXOValid + + updateVirtualStagingArea := model.NewStagingArea() + + virtualParents := []*externalapi.DomainHash{processingPoint} + // If `isCompletelyResolved`, set virtual correctly with all tips which have less blue work than pending + if isCompletelyResolved { + lowerTips, err := csm.getGHOSTDAGLowerTips(readStagingArea, pendingTip) + if err != nil { + return nil, false, err + } + log.Debugf("Picking virtual parents from relevant tips len: %d", len(lowerTips)) + + virtualParents, err = csm.pickVirtualParents(readStagingArea, lowerTips) + if err != nil { + return nil, false, err + } + log.Debugf("Picked virtual parents: %s", virtualParents) + } + virtualUTXODiff, err := csm.updateVirtualWithParents(updateVirtualStagingArea, virtualParents) + if err != nil { + return nil, false, err + } + + err = staging.CommitAllChanges(csm.databaseContext, updateVirtualStagingArea) + if err != nil { + return nil, false, err + } + + selectedParentChainChanges, err := csm.dagTraversalManager. + CalculateChainPath(updateVirtualStagingArea, previousVirtualSelectedParent, processingPoint) + if err != nil { + return nil, false, err + } + + virtualParentsOutcome, err := csm.dagTopologyManager.Parents(updateVirtualStagingArea, model.VirtualBlockHash) + if err != nil { + return nil, false, err + } + + return &externalapi.VirtualChangeSet{ + VirtualSelectedParentChainChanges: selectedParentChainChanges, + VirtualUTXODiff: virtualUTXODiff, + VirtualParents: virtualParentsOutcome, + }, isCompletelyResolved, nil +} diff --git a/domain/consensus/processes/consensusstatemanager/resolve_block_status.go b/domain/consensus/processes/consensusstatemanager/resolve_block_status.go new file mode 100644 index 0000000..fc11626 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/resolve_block_status.go @@ -0,0 +1,308 @@ +package consensusstatemanager + +import ( + "fmt" + + "github.com/spectre-project/spectred/util/staging" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func (csm *consensusStateManager) resolveBlockStatus(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + useSeparateStagingAreaPerBlock bool) (externalapi.BlockStatus, *model.UTXODiffReversalData, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, fmt.Sprintf("resolveBlockStatus for %s", blockHash)) + defer onEnd() + + log.Debugf("Getting a list of all blocks in the selected "+ + "parent chain of %s that have no yet resolved their status", blockHash) + unverifiedBlocks, err := csm.getUnverifiedChainBlocks(stagingArea, blockHash) + if err != nil { + return 0, nil, err + } + log.Debugf("Got %d unverified blocks in the selected parent "+ + "chain of %s: %s", len(unverifiedBlocks), blockHash, unverifiedBlocks) + + // If there's no unverified blocks in the given block's chain - this means the given block already has a + // UTXO-verified status, and therefore it should be retrieved from the store and returned + if len(unverifiedBlocks) == 0 { + log.Debugf("There are not unverified blocks in %s's selected parent chain. "+ + "This means that the block already has a UTXO-verified status.", blockHash) + status, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, nil, err + } + log.Debugf("Block %s's status resolved to: %s", blockHash, status) + return status, nil, nil + } + + log.Debugf("Finding the status of the selected parent of %s", blockHash) + selectedParentHash, selectedParentStatus, selectedParentUTXOSet, err := csm.selectedParentInfo(stagingArea, unverifiedBlocks) + if err != nil { + return 0, nil, err + } + log.Debugf("The status of the selected parent of %s is: %s", blockHash, selectedParentStatus) + + log.Debugf("Resolving the unverified blocks' status in reverse order (past to present)") + var blockStatus externalapi.BlockStatus + + previousBlockHash := selectedParentHash + previousBlockUTXOSet := selectedParentUTXOSet + var oneBeforeLastResolvedBlockUTXOSet externalapi.UTXODiff + var oneBeforeLastResolvedBlockHash *externalapi.DomainHash + + for i := len(unverifiedBlocks) - 1; i >= 0; i-- { + unverifiedBlockHash := unverifiedBlocks[i] + + stagingAreaForCurrentBlock := stagingArea + isResolveTip := i == 0 + useSeparateStagingArea := useSeparateStagingAreaPerBlock && !isResolveTip + if useSeparateStagingArea { + stagingAreaForCurrentBlock = model.NewStagingArea() + } + + if selectedParentStatus == externalapi.StatusDisqualifiedFromChain { + blockStatus = externalapi.StatusDisqualifiedFromChain + } else { + oneBeforeLastResolvedBlockUTXOSet = previousBlockUTXOSet + oneBeforeLastResolvedBlockHash = previousBlockHash + + blockStatus, previousBlockUTXOSet, err = csm.resolveSingleBlockStatus( + stagingAreaForCurrentBlock, unverifiedBlockHash, previousBlockHash, previousBlockUTXOSet, isResolveTip) + if err != nil { + return 0, nil, err + } + } + + csm.blockStatusStore.Stage(stagingAreaForCurrentBlock, unverifiedBlockHash, blockStatus) + selectedParentStatus = blockStatus + log.Debugf("Block %s status resolved to `%s`, finished %d/%d of unverified blocks", + unverifiedBlockHash, blockStatus, len(unverifiedBlocks)-i, len(unverifiedBlocks)) + + if useSeparateStagingArea { + err := staging.CommitAllChanges(csm.databaseContext, stagingAreaForCurrentBlock) + if err != nil { + return 0, nil, err + } + } + previousBlockHash = unverifiedBlockHash + } + + var reversalData *model.UTXODiffReversalData + if blockStatus == externalapi.StatusUTXOValid && len(unverifiedBlocks) > 1 { + log.Debugf("Preparing data for reversing the UTXODiff") + // During resolveSingleBlockStatus, all unverifiedBlocks (excluding the tip) were assigned their selectedParent + // as their UTXODiffChild. + // Now that the whole chain has been resolved - we can reverse the UTXODiffs, to create shorter UTXODiffChild paths. + // However, we can't do this right now, because the tip of the chain is not yet committed, so we prepare the + // needed data (tip's selectedParent and selectedParent's UTXODiff) + selectedParentUTXODiff, err := previousBlockUTXOSet.DiffFrom(oneBeforeLastResolvedBlockUTXOSet) + if err != nil { + return 0, nil, err + } + + reversalData = &model.UTXODiffReversalData{ + SelectedParentHash: oneBeforeLastResolvedBlockHash, + SelectedParentUTXODiff: selectedParentUTXODiff, + } + } + + return blockStatus, reversalData, nil +} + +// selectedParentInfo returns the hash and status of the selectedParent of the last block in the unverifiedBlocks +// chain, in addition, if the status is UTXOValid, it return it's pastUTXOSet +func (csm *consensusStateManager) selectedParentInfo( + stagingArea *model.StagingArea, unverifiedBlocks []*externalapi.DomainHash) ( + *externalapi.DomainHash, externalapi.BlockStatus, externalapi.UTXODiff, error) { + + log.Tracef("findSelectedParentStatus start") + defer log.Tracef("findSelectedParentStatus end") + + lastUnverifiedBlock := unverifiedBlocks[len(unverifiedBlocks)-1] + if lastUnverifiedBlock.Equal(csm.genesisHash) { + log.Debugf("the most recent unverified block is the genesis block, "+ + "which by definition has status: %s", externalapi.StatusUTXOValid) + utxoDiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, lastUnverifiedBlock) + if err != nil { + return nil, 0, nil, err + } + return lastUnverifiedBlock, externalapi.StatusUTXOValid, utxoDiff, nil + } + lastUnverifiedBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, lastUnverifiedBlock, false) + if err != nil { + return nil, 0, nil, err + } + selectedParent := lastUnverifiedBlockGHOSTDAGData.SelectedParent() + selectedParentStatus, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, selectedParent) + if err != nil { + return nil, 0, nil, err + } + if selectedParentStatus != externalapi.StatusUTXOValid { + return selectedParent, selectedParentStatus, nil, nil + } + + selectedParentUTXOSet, err := csm.restorePastUTXO(stagingArea, selectedParent) + if err != nil { + return nil, 0, nil, err + } + return selectedParent, selectedParentStatus, selectedParentUTXOSet, nil +} + +func (csm *consensusStateManager) getUnverifiedChainBlocks(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + log.Tracef("getUnverifiedChainBlocks start for block %s", blockHash) + defer log.Tracef("getUnverifiedChainBlocks end for block %s", blockHash) + + var unverifiedBlocks []*externalapi.DomainHash + currentHash := blockHash + for { + log.Tracef("Getting status for block %s", currentHash) + currentBlockStatus, err := csm.blockStatusStore.Get(csm.databaseContext, stagingArea, currentHash) + if err != nil { + return nil, err + } + if currentBlockStatus != externalapi.StatusUTXOPendingVerification { + log.Tracef("Block %s has status %s. Returning all the "+ + "unverified blocks prior to it: %s", currentHash, currentBlockStatus, unverifiedBlocks) + return unverifiedBlocks, nil + } + + log.Tracef("Block %s is unverified. Adding it to the unverified block collection", currentHash) + unverifiedBlocks = append(unverifiedBlocks, currentHash) + + currentBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, currentHash, false) + if err != nil { + return nil, err + } + + if currentBlockGHOSTDAGData.SelectedParent() == nil { + log.Tracef("Genesis block reached. Returning all the "+ + "unverified blocks prior to it: %s", unverifiedBlocks) + return unverifiedBlocks, nil + } + + currentHash = currentBlockGHOSTDAGData.SelectedParent() + } +} + +func (csm *consensusStateManager) resolveSingleBlockStatus(stagingArea *model.StagingArea, + blockHash, selectedParentHash *externalapi.DomainHash, selectedParentPastUTXOSet externalapi.UTXODiff, isResolveTip bool) ( + externalapi.BlockStatus, externalapi.UTXODiff, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, fmt.Sprintf("resolveSingleBlockStatus for %s", blockHash)) + defer onEnd() + + log.Tracef("Calculating pastUTXO and acceptance data and multiset for block %s", blockHash) + pastUTXOSet, acceptanceData, multiset, err := csm.calculatePastUTXOAndAcceptanceDataWithSelectedParentUTXO( + stagingArea, blockHash, selectedParentPastUTXOSet) + if err != nil { + return 0, nil, err + } + + log.Tracef("Staging the calculated acceptance data of block %s", blockHash) + csm.acceptanceDataStore.Stage(stagingArea, blockHash, acceptanceData) + + block, err := csm.blockStore.Block(csm.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, nil, err + } + + log.Tracef("verifying the UTXO of block %s", blockHash) + err = csm.verifyUTXO(stagingArea, block, blockHash, pastUTXOSet, acceptanceData, multiset) + if err != nil { + if errors.As(err, &ruleerrors.RuleError{}) { + log.Debugf("UTXO verification for block %s failed: %s", blockHash, err) + return externalapi.StatusDisqualifiedFromChain, nil, nil + } + return 0, nil, err + } + log.Debugf("UTXO verification for block %s passed", blockHash) + + log.Tracef("Staging the multiset of block %s", blockHash) + csm.multisetStore.Stage(stagingArea, blockHash, multiset) + + if csm.genesisHash.Equal(blockHash) { + log.Tracef("Staging the utxoDiff of genesis") + csm.stageDiff(stagingArea, blockHash, pastUTXOSet, nil) + return externalapi.StatusUTXOValid, nil, nil + } + + oldSelectedTip, err := csm.virtualSelectedParent(stagingArea) + if err != nil { + return 0, nil, err + } + + if isResolveTip { + oldSelectedTipUTXOSet, err := csm.restorePastUTXO(stagingArea, oldSelectedTip) + if err != nil { + return 0, nil, err + } + isNewSelectedTip, err := csm.isNewSelectedTip(stagingArea, blockHash, oldSelectedTip) + if err != nil { + return 0, nil, err + } + + if isNewSelectedTip { + log.Debugf("Block %s is the new selected tip, therefore setting it as old selected tip's diffChild", blockHash) + + updatedOldSelectedTipUTXOSet, err := pastUTXOSet.DiffFrom(oldSelectedTipUTXOSet) + if err != nil { + return 0, nil, err + } + log.Debugf("Setting the old selected tip's (%s) diffChild to be the new selected tip (%s)", + oldSelectedTip, blockHash) + csm.stageDiff(stagingArea, oldSelectedTip, updatedOldSelectedTipUTXOSet, blockHash) + + log.Tracef("Staging the utxoDiff of block %s, with virtual as diffChild", blockHash) + csm.stageDiff(stagingArea, blockHash, pastUTXOSet, nil) + } else { + log.Debugf("Block %s is the tip of currently resolved chain, but not the new selected tip,"+ + "therefore setting it's utxoDiffChild to be the current selectedTip %s", blockHash, oldSelectedTip) + utxoDiff, err := oldSelectedTipUTXOSet.DiffFrom(pastUTXOSet) + if err != nil { + return 0, nil, err + } + csm.stageDiff(stagingArea, blockHash, utxoDiff, oldSelectedTip) + } + } else { + // If the block is not the tip of the currently resolved chain, we set it's diffChild to be the selectedParent, + // this is a temporary measure to ensure there's a restore path to all blocks at all times. + // Later down the process, the diff will be reversed in reverseUTXODiffs. + log.Debugf("Block %s is not the new selected tip, and is not the tip of the currently verified chain, "+ + "therefore temporarily setting selectedParent as it's diffChild", blockHash) + utxoDiff, err := selectedParentPastUTXOSet.DiffFrom(pastUTXOSet) + if err != nil { + return 0, nil, err + } + + csm.stageDiff(stagingArea, blockHash, utxoDiff, selectedParentHash) + } + + return externalapi.StatusUTXOValid, pastUTXOSet, nil +} + +func (csm *consensusStateManager) isNewSelectedTip(stagingArea *model.StagingArea, + blockHash, oldSelectedTip *externalapi.DomainHash) (bool, error) { + + newSelectedTip, err := csm.ghostdagManager.ChooseSelectedParent(stagingArea, blockHash, oldSelectedTip) + if err != nil { + return false, err + } + + return blockHash.Equal(newSelectedTip), nil +} + +func (csm *consensusStateManager) virtualSelectedParent(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + virtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + + return virtualGHOSTDAGData.SelectedParent(), nil +} diff --git a/domain/consensus/processes/consensusstatemanager/resolve_block_status_test.go b/domain/consensus/processes/consensusstatemanager/resolve_block_status_test.go new file mode 100644 index 0000000..d6ca44c --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/resolve_block_status_test.go @@ -0,0 +1,443 @@ +package consensusstatemanager_test + +import ( + "errors" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +func TestDoubleSpends(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + consensusConfig.BlockCoinbaseMaturity = 0 + + factory := consensus.NewFactory() + + consensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestUTXOCommitment") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Mine chain of two blocks to fund our double spend + firstBlockHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating firstBlock: %+v", err) + } + fundingBlockHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{firstBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating fundingBlock: %+v", err) + } + fundingBlock, _, err := consensus.GetBlock(fundingBlockHash) + if err != nil { + t.Fatalf("Error getting fundingBlock: %+v", err) + } + + // Get funding transaction + fundingTransaction := fundingBlock.Transactions[transactionhelper.CoinbaseTransactionIndex] + + // Create two transactions that spends the same output, but with different IDs + spendingTransaction1, err := testutils.CreateTransaction(fundingTransaction, 1) + if err != nil { + t.Fatalf("Error creating spendingTransaction1: %+v", err) + } + spendingTransaction2, err := testutils.CreateTransaction(fundingTransaction, 1) + if err != nil { + t.Fatalf("Error creating spendingTransaction2: %+v", err) + } + spendingTransaction2.Outputs[0].Value-- // tweak the value to create a different ID + spendingTransaction1ID := consensushashing.TransactionID(spendingTransaction1) + spendingTransaction2ID := consensushashing.TransactionID(spendingTransaction2) + if spendingTransaction1ID.Equal(spendingTransaction2ID) { + t.Fatalf("spendingTransaction1 and spendingTransaction2 ids are equal") + } + + // Mine a block with spendingTransaction1 and make sure that it's valid + goodBlock1Hash, _, err := consensus.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, + []*externalapi.DomainTransaction{spendingTransaction1}) + if err != nil { + t.Fatalf("Error adding goodBlock1: %+v", err) + } + goodBlock1Status, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, goodBlock1Hash) + if err != nil { + t.Fatalf("Error getting status of goodBlock1: %+v", err) + } + if goodBlock1Status != externalapi.StatusUTXOValid { + t.Fatalf("GoodBlock1 status expected to be '%s', but is '%s'", externalapi.StatusUTXOValid, goodBlock1Status) + } + + // To check that a block containing the same transaction already in it's past is disqualified: + // Add a block on top of goodBlock, containing spendingTransaction1, and make sure it's disqualified + doubleSpendingBlock1Hash, _, err := consensus.AddBlock([]*externalapi.DomainHash{goodBlock1Hash}, nil, + []*externalapi.DomainTransaction{spendingTransaction1}) + if err != nil { + t.Fatalf("Error adding doubleSpendingBlock1: %+v", err) + } + doubleSpendingBlock1Status, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, doubleSpendingBlock1Hash) + if err != nil { + t.Fatalf("Error getting status of goodBlock: %+v", err) + } + if doubleSpendingBlock1Status != externalapi.StatusDisqualifiedFromChain { + t.Fatalf("doubleSpendingBlock1 status expected to be '%s', but is '%s'", + externalapi.StatusDisqualifiedFromChain, doubleSpendingBlock1Status) + } + + // To check that a block containing a transaction that double-spends a transaction that + // is in it's past is disqualified: + // Add a block on top of goodBlock, containing spendingTransaction2, and make sure it's disqualified + doubleSpendingBlock2Hash, _, err := consensus.AddBlock([]*externalapi.DomainHash{goodBlock1Hash}, nil, + []*externalapi.DomainTransaction{spendingTransaction2}) + if err != nil { + t.Fatalf("Error adding doubleSpendingBlock2: %+v", err) + } + doubleSpendingBlock2Status, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, doubleSpendingBlock2Hash) + if err != nil { + t.Fatalf("Error getting status of goodBlock: %+v", err) + } + if doubleSpendingBlock2Status != externalapi.StatusDisqualifiedFromChain { + t.Fatalf("doubleSpendingBlock2 status expected to be '%s', but is '%s'", + externalapi.StatusDisqualifiedFromChain, doubleSpendingBlock2Status) + } + + // To make sure that a block double-spending itself is rejected: + // Add a block on top of goodBlock, containing both spendingTransaction1 and spendingTransaction2, and make + // sure AddBlock returns a RuleError + _, _, err = consensus.AddBlock([]*externalapi.DomainHash{goodBlock1Hash}, nil, + []*externalapi.DomainTransaction{spendingTransaction1, spendingTransaction2}) + if err == nil { + t.Fatalf("No error when adding a self-double-spending block") + } + if !errors.Is(err, ruleerrors.ErrDoubleSpendInSameBlock) { + t.Fatalf("Adding self-double-spending block should have "+ + "returned ruleerrors.ErrDoubleSpendInSameBlock, but instead got: %+v", err) + } + + // To make sure that a block containing the same transaction twice is rejected: + // Add a block on top of goodBlock, containing spendingTransaction1 twice, and make + // sure AddBlock returns a RuleError + _, _, err = consensus.AddBlock([]*externalapi.DomainHash{goodBlock1Hash}, nil, + []*externalapi.DomainTransaction{spendingTransaction1, spendingTransaction1}) + if err == nil { + t.Fatalf("No error when adding a block containing the same transactin twice") + } + if !errors.Is(err, ruleerrors.ErrDuplicateTx) { + t.Fatalf("Adding block that contains the same transaction twice should have "+ + "returned ruleerrors.ErrDuplicateTx, but instead got: %+v", err) + } + + // Check that a block will not get disqualified if it has a transaction that double spends + // a transaction from its anticone. + goodBlock2Hash, _, err := consensus.AddBlock([]*externalapi.DomainHash{fundingBlockHash}, nil, + []*externalapi.DomainTransaction{spendingTransaction2}) + if err != nil { + t.Fatalf("Error adding goodBlock: %+v", err) + } + //use ResolveBlockStatus, since goodBlock2 might not be the selectedTip + goodBlock2Status, err := consensus.ConsensusStateManager().ResolveBlockStatus( + stagingArea, goodBlock2Hash, true) + if err != nil { + t.Fatalf("Error getting status of goodBlock: %+v", err) + } + if goodBlock2Status != externalapi.StatusUTXOValid { + t.Fatalf("GoodBlock2 status expected to be '%s', but is '%s'", externalapi.StatusUTXOValid, goodBlock2Status) + } + }) +} + +// TestTransactionAcceptance checks that block transactions are accepted correctly when the merge set is sorted topologically. +// DAG diagram: +// genesis <- blockA <- blockB <- blockC <- ..(chain of k-blocks).. lastBlockInChain <- blockD <- blockE <- blockF <- blockG +// +// ^ ^ | +// | redBlock <------------------------ blueChildOfRedBlock <------------------------------- +func TestTransactionAcceptance(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestTransactionAcceptance") + if err != nil { + t.Fatalf("Error setting up testConsensus: %+v", err) + } + defer teardown(false) + + blockHashA, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockA: %+v", err) + } + blockHashB, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashA}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockB: %+v", err) + } + blockHashC, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashB}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockC: %+v", err) + } + // Add a chain of K blocks above blockC so we'll + // be able to mine a red block on top of it. + chainTipHash := blockHashC + for i := externalapi.KType(0); i < consensusConfig.K; i++ { + var err error + chainTipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{chainTipHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating a block: %+v", err) + } + } + lastBlockInChain := chainTipHash + blockC, _, err := testConsensus.GetBlock(blockHashC) + if err != nil { + t.Fatalf("Error getting blockC: %+v", err) + } + fees := uint64(1) + transactionFromBlockC := blockC.Transactions[transactionhelper.CoinbaseTransactionIndex] + // transactionFromRedBlock is spending TransactionFromBlockC. + transactionFromRedBlock, err := testutils.CreateTransaction(transactionFromBlockC, fees) + if err != nil { + t.Fatalf("Error creating a transactionFromRedBlock: %+v", err) + } + transactionFromRedBlockInput0UTXOEntry, err := testConsensus.ConsensusStateStore(). + UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &transactionFromRedBlock.Inputs[0].PreviousOutpoint) + if err != nil { + t.Fatalf("Error getting UTXOEntry for transactionFromRedBlockInput: %s", err) + } + redHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashC}, nil, + []*externalapi.DomainTransaction{transactionFromRedBlock}) + if err != nil { + t.Fatalf("Error creating redBlock: %+v", err) + } + + transactionFromBlueChildOfRedBlock, err := testutils.CreateTransaction(transactionFromRedBlock, fees) + if err != nil { + t.Fatalf("Error creating transactionFromBlueChildOfRedBlock: %+v", err) + } + transactionFromBlueChildOfRedBlockInput0UTXOEntry, err := testConsensus.ConsensusStateStore(). + UTXOByOutpoint(testConsensus.DatabaseContext(), stagingArea, &transactionFromBlueChildOfRedBlock.Inputs[0].PreviousOutpoint) + if err != nil { + t.Fatalf("Error getting UTXOEntry for transactionFromBlueChildOfRedBlockInput: %s", err) + } + blueChildOfRedBlockScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{3}, Version: 0} + // The blueChildOfRedBlock contains a transaction that spent an output from the red block. + hashBlueChildOfRedBlock, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{lastBlockInChain, redHash}, + &externalapi.DomainCoinbaseData{ + ScriptPublicKey: blueChildOfRedBlockScriptPublicKey, + ExtraData: nil, + }, []*externalapi.DomainTransaction{transactionFromBlueChildOfRedBlock}) + if err != nil { + t.Fatalf("Error creating blueChildOfRedBlock: %+v", err) + } + + // K blocks minded between blockC and blockD. + blockHashD, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{lastBlockInChain}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockD : %+v", err) + } + blockHashE, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashD}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockD : %+v", err) + } + blockEScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{4}, Version: 0} + blockHashF, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashE}, + &externalapi.DomainCoinbaseData{ + ScriptPublicKey: blockEScriptPublicKey, + ExtraData: nil, + }, nil) + if err != nil { + t.Fatalf("Error creating blockE: %+v", err) + } + blockFScriptPublicKey := &externalapi.ScriptPublicKey{Script: []byte{5}, Version: 0} + blockHashG, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockHashF, hashBlueChildOfRedBlock}, + &externalapi.DomainCoinbaseData{ + ScriptPublicKey: blockFScriptPublicKey, + ExtraData: nil, + }, nil) + if err != nil { + t.Fatalf("Error creating blockF: %+v", err) + } + + acceptanceData, err := testConsensus.AcceptanceDataStore().Get(testConsensus.DatabaseContext(), stagingArea, blockHashG) + if err != nil { + t.Fatalf("Error getting acceptance data: %+v", err) + } + blueChildOfRedBlock, _, err := testConsensus.GetBlock(hashBlueChildOfRedBlock) + if err != nil { + t.Fatalf("Error getting blueChildOfRedBlock: %+v", err) + } + blockE, _, err := testConsensus.GetBlock(blockHashF) + if err != nil { + t.Fatalf("Error getting blockE: %+v", err) + } + redBlock, _, err := testConsensus.GetBlock(redHash) + if err != nil { + t.Fatalf("Error getting redBlock: %+v", err) + } + _, found, err := testConsensus.GetBlock(blockHashG) + if err != nil { + t.Fatalf("Error getting blockG: %+v", err) + } + + if !found { + t.Fatalf("block G is missing") + } + + updatedDAAScoreVirtualBlock := consensusConfig.GenesisBlock.Header.DAAScore() + 26 + //We expect the second transaction in the "blue block" (blueChildOfRedBlock) to be accepted because the merge set is ordered topologically + //and the red block is ordered topologically before the "blue block" so the input is known in the UTXOSet. + expectedAcceptanceData := externalapi.AcceptanceData{ + { + BlockHash: blockHashF, + TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{ + { + Transaction: blockE.Transactions[0], + Fee: 0, + IsAccepted: true, + TransactionInputUTXOEntries: []externalapi.UTXOEntry{}, + }, + }, + }, + { + BlockHash: redHash, + TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{ + { //Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain, + // and this block isn't. + Transaction: redBlock.Transactions[0], + Fee: 0, + IsAccepted: false, + TransactionInputUTXOEntries: []externalapi.UTXOEntry{}, + }, + { + Transaction: redBlock.Transactions[1], + Fee: fees, + IsAccepted: true, + TransactionInputUTXOEntries: []externalapi.UTXOEntry{transactionFromRedBlockInput0UTXOEntry}, + }, + }, + }, + { + BlockHash: hashBlueChildOfRedBlock, + TransactionAcceptanceData: []*externalapi.TransactionAcceptanceData{ + { //Coinbase transaction outputs are added to the UTXO-set only if they are in the selected parent chain, + // and this block isn't. + Transaction: blueChildOfRedBlock.Transactions[0], + Fee: 0, + IsAccepted: false, + TransactionInputUTXOEntries: []externalapi.UTXOEntry{}, + }, + { // The DAAScore was calculated by the virtual block pov. The DAAScore has changed since more blocks were added to the DAG. + // So we will change the DAAScore in the UTXOEntryInput to the updated virtual DAAScore. + Transaction: blueChildOfRedBlock.Transactions[1], + Fee: fees, + IsAccepted: true, + TransactionInputUTXOEntries: []externalapi.UTXOEntry{ + utxo.NewUTXOEntry(transactionFromBlueChildOfRedBlockInput0UTXOEntry.Amount(), + transactionFromBlueChildOfRedBlockInput0UTXOEntry.ScriptPublicKey(), + transactionFromBlueChildOfRedBlockInput0UTXOEntry.IsCoinbase(), uint64(updatedDAAScoreVirtualBlock))}, + }, + }, + }, + } + if !acceptanceData.Equal(expectedAcceptanceData) { + t.Fatalf("The acceptance data is not the expected acceptance data") + } + }) +} + +func TestResolveBlockStatusSanity(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + consensus, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestResolveBlockStatusSanity") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + genesisHash := consensusConfig.GenesisHash + allHashes := []*externalapi.DomainHash{genesisHash} + + // Make sure that the status of genesisHash is valid + genesisStatus, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, genesisHash) + if err != nil { + t.Fatalf("error getting genesis status: %s", err) + } + if genesisStatus != externalapi.StatusUTXOValid { + t.Fatalf("genesis is unexpectedly non-valid. Its status is: %s", genesisStatus) + } + + chainLength := int(consensusConfig.K) + 1 + + // Add a chain of blocks over the genesis and make sure all their + // statuses are valid + currentHash := genesisHash + for i := 0; i < chainLength; i++ { + addedBlockHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{currentHash}, nil, nil) + if err != nil { + t.Fatalf("error adding block %d: %s", i, err) + } + blockStatus, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, addedBlockHash) + if err != nil { + t.Fatalf("error getting block %d (%s) status: %s", i, addedBlockHash, err) + } + if blockStatus != externalapi.StatusUTXOValid { + t.Fatalf("block %d (%s) is unexpectedly non-valid. Its status is: %s", i, addedBlockHash, blockStatus) + } + currentHash = addedBlockHash + allHashes = append(allHashes, addedBlockHash) + } + + // Add another chain of blocks over the genesis that's shorter than + // the original chain by 1. Here we expect all the statuses to be + // StatusUTXOPendingVerification + currentHash = genesisHash + for i := 0; i < chainLength-1; i++ { + addedBlockHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{currentHash}, nil, nil) + if err != nil { + t.Fatalf("error adding block %d: %s", i, err) + } + blockStatus, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, addedBlockHash) + if err != nil { + t.Fatalf("error getting block %d (%s) status: %s", i, addedBlockHash, err) + } + if blockStatus != externalapi.StatusUTXOPendingVerification { + t.Fatalf("block %d (%s) has unexpected status. "+ + "Want: %s, got: %s", i, addedBlockHash, externalapi.StatusUTXOPendingVerification, blockStatus) + } + currentHash = addedBlockHash + allHashes = append(allHashes, addedBlockHash) + } + + // Add another two blocks to the second chain. This should trigger + // resolving the entire chain + for i := 0; i < 2; i++ { + addedBlockHash, _, err := consensus.AddBlock([]*externalapi.DomainHash{currentHash}, nil, nil) + if err != nil { + t.Fatalf("error adding block %d: %s", i, err) + } + currentHash = addedBlockHash + allHashes = append(allHashes, addedBlockHash) + } + + // Make sure that all the blocks in the DAG now have StatusUTXOValid + for _, hash := range allHashes { + blockStatus, err := consensus.BlockStatusStore().Get(consensus.DatabaseContext(), stagingArea, hash) + if err != nil { + t.Fatalf("error getting block %s status: %s", hash, err) + } + if blockStatus != externalapi.StatusUTXOValid { + t.Fatalf("block %s is unexpectedly non-valid. Its status is: %s", hash, blockStatus) + } + } + }) +} diff --git a/domain/consensus/processes/consensusstatemanager/resolve_virtual_test.go b/domain/consensus/processes/consensusstatemanager/resolve_virtual_test.go new file mode 100644 index 0000000..fceb7db --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/resolve_virtual_test.go @@ -0,0 +1,445 @@ +package consensusstatemanager_test + +import ( + "fmt" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestAddBlockBetweenResolveVirtualCalls(t *testing.T) { + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddBlockBetweenResolveVirtualCalls") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash} + + // Create a chain of blocks + const initialChainLength = 10 + previousBlockHash := consensusConfig.GenesisHash + for i := 0; i < initialChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + hashes = append(hashes, previousBlockHash) + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err) + } + } + + // Mine a chain with more blocks, to re-organize the DAG + const reorgChainLength = initialChainLength + 1 + previousBlockHash = consensusConfig.GenesisHash + for i := 0; i < reorgChainLength; i++ { + previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + previousBlockHash = consensushashing.BlockHash(previousBlock) + hashes = append(hashes, previousBlockHash) + + // Do not UTXO validate in order to resolve virtual later + err = tc.ValidateAndInsertBlock(previousBlock, false) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + } + + // Resolve one step + _, _, err = tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + emptyCoinbase := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + + // Get template based on current resolve state + blockTemplate, err := tc.BuildBlockTemplate(emptyCoinbase, nil) + if err != nil { + t.Fatalf("Error building block template during virtual resolution of reorg: %+v", err) + } + + // Resolve one more step + _, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + // Add the mined block (now virtual was modified) + err = tc.ValidateAndInsertBlock(blockTemplate.Block, true) + if err != nil { + t.Fatalf("Error mining block during virtual resolution of reorg: %+v", err) + } + hashes = append(hashes, consensushashing.BlockHash(blockTemplate.Block)) + + // Complete resolving virtual + for !isCompletelyResolved { + _, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + } + + verifyUtxoDiffPaths(t, tc, hashes) + }) +} + +func TestAddGenesisChildAfterOneResolveVirtualCall(t *testing.T) { + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterOneResolveVirtualCall") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash} + + // Create a chain of blocks + const initialChainLength = 6 + previousBlockHash := consensusConfig.GenesisHash + for i := 0; i < initialChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + hashes = append(hashes, previousBlockHash) + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err) + } + } + + // Mine a chain with more blocks, to re-organize the DAG + const reorgChainLength = initialChainLength + 1 + previousBlockHash = consensusConfig.GenesisHash + for i := 0; i < reorgChainLength; i++ { + previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + previousBlockHash = consensushashing.BlockHash(previousBlock) + hashes = append(hashes, previousBlockHash) + + // Do not UTXO validate in order to resolve virtual later + err = tc.ValidateAndInsertBlock(previousBlock, false) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + } + + // Resolve one step + _, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + _, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block during virtual resolution of reorg: %+v", err) + } + + // Complete resolving virtual + for !isCompletelyResolved { + _, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + } + + verifyUtxoDiffPaths(t, tc, hashes) + }) +} + +func TestAddGenesisChildAfterTwoResolveVirtualCalls(t *testing.T) { + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterTwoResolveVirtualCalls") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash} + + // Create a chain of blocks + const initialChainLength = 6 + previousBlockHash := consensusConfig.GenesisHash + for i := 0; i < initialChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + hashes = append(hashes, previousBlockHash) + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err) + } + } + + // Mine a chain with more blocks, to re-organize the DAG + const reorgChainLength = initialChainLength + 1 + previousBlockHash = consensusConfig.GenesisHash + for i := 0; i < reorgChainLength; i++ { + previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + previousBlockHash = consensushashing.BlockHash(previousBlock) + hashes = append(hashes, previousBlockHash) + + // Do not UTXO validate in order to resolve virtual later + err = tc.ValidateAndInsertBlock(previousBlock, false) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + } + + // Resolve one step + _, _, err = tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + // Resolve one more step + _, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + _, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error adding block during virtual resolution of reorg: %+v", err) + } + + // Complete resolving virtual + for !isCompletelyResolved { + _, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(2) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + } + + verifyUtxoDiffPaths(t, tc, hashes) + }) +} + +func TestResolveVirtualBackAndForthReorgs(t *testing.T) { + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestAddGenesisChildAfterTwoResolveVirtualCalls") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + hashes := []*externalapi.DomainHash{consensusConfig.GenesisHash} + blocks := make(map[externalapi.DomainHash]string) + blocks[*consensusConfig.GenesisHash] = "g" + blocks[*model.VirtualBlockHash] = "v" + printfDebug("%s\n\n", consensusConfig.GenesisHash) + + // Create a chain of blocks + const initialChainLength = 6 + previousBlockHash := consensusConfig.GenesisHash + for i := 0; i < initialChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + blocks[*previousBlockHash] = fmt.Sprintf("A_%d", i) + hashes = append(hashes, previousBlockHash) + printfDebug("A_%d: %s\n", i, previousBlockHash) + + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err) + } + } + + printfDebug("\n") + verifyUtxoDiffPaths(t, tc, hashes) + + firstChainTip := previousBlockHash + + // Mine a chain with more blocks, to re-organize the DAG + const reorgChainLength = 12 // initialChainLength + 1 + previousBlockHash = consensusConfig.GenesisHash + for i := 0; i < reorgChainLength; i++ { + previousBlock, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + previousBlockHash = consensushashing.BlockHash(previousBlock) + blocks[*previousBlockHash] = fmt.Sprintf("B_%d", i) + hashes = append(hashes, previousBlockHash) + printfDebug("B_%d: %s\n", i, previousBlockHash) + + // Do not UTXO validate in order to resolve virtual later + err = tc.ValidateAndInsertBlock(previousBlock, false) + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + } + + printfDebug("\n") + + printUtxoDiffChildren(t, tc, hashes, blocks) + verifyUtxoDiffPaths(t, tc, hashes) + + previousVirtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + t.Fatal(err) + } + + // Resolve one step + virtualChangeSet, _, err := tc.ResolveVirtualWithMaxParam(3) + if err != nil { + printUtxoDiffChildren(t, tc, hashes, blocks) + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + newVirtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + t.Fatal(err) + } + + // Make sure the reported change-set is compatible with actual changes. + // Checking this for one call should suffice to avoid possible bugs. + reportedPreviousVirtualSelectedParent := virtualChangeSet.VirtualSelectedParentChainChanges.Removed[0] + reportedNewVirtualSelectedParent := virtualChangeSet.VirtualSelectedParentChainChanges. + Added[len(virtualChangeSet.VirtualSelectedParentChainChanges.Added)-1] + + if !previousVirtualSelectedParent.Equal(reportedPreviousVirtualSelectedParent) { + t.Fatalf("The reported changeset is incompatible with actual changes") + } + if !newVirtualSelectedParent.Equal(reportedNewVirtualSelectedParent) { + t.Fatalf("The reported changeset is incompatible with actual changes") + } + + // Resolve one more step + _, isCompletelyResolved, err := tc.ResolveVirtualWithMaxParam(3) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + + // Complete resolving virtual + for !isCompletelyResolved { + _, isCompletelyResolved, err = tc.ResolveVirtualWithMaxParam(3) + if err != nil { + t.Fatalf("Error resolving virtual in re-org chain: %+v", err) + } + } + + printUtxoDiffChildren(t, tc, hashes, blocks) + verifyUtxoDiffPaths(t, tc, hashes) + + // Now get the first chain back to the wining position + previousBlockHash = firstChainTip + for i := 0; i < reorgChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + blocks[*previousBlockHash] = fmt.Sprintf("A_%d", initialChainLength+i) + hashes = append(hashes, previousBlockHash) + printfDebug("A_%d: %s\n", initialChainLength+i, previousBlockHash) + + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", initialChainLength+i, err) + } + } + + printfDebug("\n") + + printUtxoDiffChildren(t, tc, hashes, blocks) + verifyUtxoDiffPaths(t, tc, hashes) + }) +} + +func verifyUtxoDiffPathToRoot(t *testing.T, tc testapi.TestConsensus, stagingArea *model.StagingArea, block, utxoDiffRoot *externalapi.DomainHash) { + current := block + for !current.Equal(utxoDiffRoot) { + hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, current) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + if !hasUTXODiffChild { + t.Fatalf("%s is expected to have a UTXO diff child", current) + } + current, err = tc.UTXODiffStore().UTXODiffChild(tc.DatabaseContext(), stagingArea, current) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + } +} + +func verifyUtxoDiffPaths(t *testing.T, tc testapi.TestConsensus, hashes []*externalapi.DomainHash) { + stagingArea := model.NewStagingArea() + + virtualGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, model.VirtualBlockHash, false) + if err != nil { + t.Fatal(err) + } + + utxoDiffRoot := virtualGHOSTDAGData.SelectedParent() + hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, utxoDiffRoot) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + if hasUTXODiffChild { + t.Fatalf("Virtual selected parent is not expected to have an explicit diff child") + } + _, err = tc.UTXODiffStore().UTXODiff(tc.DatabaseContext(), stagingArea, utxoDiffRoot) + if err != nil { + t.Fatalf("Virtual selected parent is expected to have a utxo diff: %+v", err) + } + + for _, block := range hashes { + hasUTXODiffChild, err = tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, block) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + isOnVirtualSelectedChain, err := tc.DAGTopologyManager().IsInSelectedParentChainOf(stagingArea, block, utxoDiffRoot) + if err != nil { + t.Fatal(err) + } + // We expect a valid path to root in both cases: (i) block has a diff child, (ii) block is on the virtual selected chain + if hasUTXODiffChild || isOnVirtualSelectedChain { + verifyUtxoDiffPathToRoot(t, tc, stagingArea, block, utxoDiffRoot) + } + } +} + +func printfDebug(format string, a ...any) { + // Uncomment below when debugging the test + //fmt.Printf(format, a...) +} + +func printUtxoDiffChildren(t *testing.T, tc testapi.TestConsensus, hashes []*externalapi.DomainHash, blocks map[externalapi.DomainHash]string) { + printfDebug("\n===============================\nBlock\t\tDiff child\n") + stagingArea := model.NewStagingArea() + for _, block := range hashes { + hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, block) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + if hasUTXODiffChild { + utxoDiffChild, err := tc.UTXODiffStore().UTXODiffChild(tc.DatabaseContext(), stagingArea, block) + if err != nil { + t.Fatalf("Error while reading utxo diff store: %+v", err) + } + printfDebug("%s\t\t\t%s\n", blocks[*block], blocks[*utxoDiffChild]) + } else { + printfDebug("%s\n", blocks[*block]) + } + } + printfDebug("\n===============================\n") +} diff --git a/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs.go b/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs.go new file mode 100644 index 0000000..059b9ef --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs.go @@ -0,0 +1,95 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +func (csm *consensusStateManager) ReverseUTXODiffs(tipHash *externalapi.DomainHash, + reversalData *model.UTXODiffReversalData) error { + + // During the process of resolving a chain of blocks, we temporarily set all blocks' (except the tip) + // UTXODiffChild to be the selected parent. + // Once the process is complete, we can reverse said chain, to now go directly to virtual through the relevant tip + onEnd := logger.LogAndMeasureExecutionTime(log, "reverseUTXODiffs") + defer onEnd() + + readStagingArea := model.NewStagingArea() + + log.Debugf("Reversing utxoDiffs") + + // Set previousUTXODiff and previousBlock to tip.SelectedParent before we start touching them, + // since previousBlock's UTXODiff is going to be over-written in the next step + previousBlock := reversalData.SelectedParentHash + previousUTXODiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, readStagingArea, previousBlock) + if err != nil { + return err + } + + // tip.selectedParent is special in the sense that we don't have it's diff available in reverse, however, + // we were able to calculate it when the tip's and tip.selectedParent's UTXOSets were known during resolveBlockStatus. + // Therefore - we treat it separately + err = csm.commitUTXODiffInSeparateStagingArea(previousBlock, reversalData.SelectedParentUTXODiff, tipHash) + if err != nil { + return err + } + + log.Trace("Reversed 1 utxoDiff") + + previousBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, readStagingArea, previousBlock, false) + if err != nil { + return err + } + // Now go over the rest of the blocks and assign for every block Bi.UTXODiff = Bi+1.UTXODiff.Reversed() + for i := 1; ; i++ { + currentBlock := previousBlockGHOSTDAGData.SelectedParent() + log.Debugf("Reversing UTXO diff for %s", currentBlock) + + currentBlockUTXODiffChild, err := csm.utxoDiffStore.UTXODiffChild(csm.databaseContext, readStagingArea, currentBlock) + if err != nil { + return err + } + currentBlockGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, readStagingArea, currentBlock, false) + if err != nil { + return err + } + + currentUTXODiff := previousUTXODiff.Reversed() + + // retrieve current utxoDiff for Bi, to be used by next block + previousUTXODiff, err = csm.utxoDiffStore.UTXODiff(csm.databaseContext, readStagingArea, currentBlock) + if err != nil { + return err + } + + err = csm.commitUTXODiffInSeparateStagingArea(currentBlock, currentUTXODiff, previousBlock) + if err != nil { + return err + } + + // We stop reversing when current's UTXODiffChild is not current's SelectedParent + if !currentBlockGHOSTDAGData.SelectedParent().Equal(currentBlockUTXODiffChild) { + log.Debugf("Block %s's UTXODiffChild is not it's selected parent - finish reversing", currentBlock) + break + } + + previousBlock = currentBlock + previousBlockGHOSTDAGData = currentBlockGHOSTDAGData + + log.Tracef("Reversed %d utxoDiffs", i) + } + + return nil +} + +func (csm *consensusStateManager) commitUTXODiffInSeparateStagingArea( + blockHash *externalapi.DomainHash, utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) error { + + stagingAreaForCurrentBlock := model.NewStagingArea() + + csm.utxoDiffStore.Stage(stagingAreaForCurrentBlock, blockHash, utxoDiff, utxoDiffChild) + + return staging.CommitAllChanges(csm.databaseContext, stagingAreaForCurrentBlock) +} diff --git a/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs_test.go b/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs_test.go new file mode 100644 index 0000000..5e77d95 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/reverse_utxo_diffs_test.go @@ -0,0 +1,115 @@ +package consensusstatemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestReverseUTXODiffs(t *testing.T) { + // This test doesn't check ReverseUTXODiffs directly, since that would be quite complicated, + // instead, it creates a situation where a reversal would defenitely happen - a reorg of 5 blocks, + // then verifies that the resulting utxo-diffs and utxo-diff-children are all correct. + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestUTXOCommitment") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Create a chain of 5 blocks + const initialChainLength = 5 + previousBlockHash := consensusConfig.GenesisHash + for i := 0; i < initialChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + if err != nil { + t.Fatalf("Error mining block no. %d in initial chain: %+v", i, err) + } + } + + // Mine a chain of 6 blocks, to re-organize the DAG + const reorgChainLength = initialChainLength + 1 + reorgChain := make([]*externalapi.DomainHash, reorgChainLength) + previousBlockHash = consensusConfig.GenesisHash + for i := 0; i < reorgChainLength; i++ { + previousBlockHash, _, err = tc.AddBlock([]*externalapi.DomainHash{previousBlockHash}, nil, nil) + reorgChain[i] = previousBlockHash + if err != nil { + t.Fatalf("Error mining block no. %d in re-org chain: %+v", i, err) + } + } + + stagingArea := model.NewStagingArea() + // Check that every block in the reorg chain has the next block as it's UTXODiffChild, + // except that tip that has virtual, And that the diff is only `{ toRemove: { coinbase } }` + for i, currentBlockHash := range reorgChain { + if i == reorgChainLength-1 { + hasUTXODiffChild, err := tc.UTXODiffStore().HasUTXODiffChild(tc.DatabaseContext(), stagingArea, currentBlockHash) + if err != nil { + t.Fatalf("Error getting HasUTXODiffChild of %s: %+v", currentBlockHash, err) + } + if hasUTXODiffChild { + t.Errorf("Block %s expected utxoDiffChild is virtual, but HasUTXODiffChild returned true", + currentBlockHash) + } + } else { + utxoDiffChild, err := tc.UTXODiffStore().UTXODiffChild(tc.DatabaseContext(), stagingArea, currentBlockHash) + if err != nil { + t.Fatalf("Error getting utxoDiffChild of block No. %d, %s: %+v", i, currentBlockHash, err) + } + expectedUTXODiffChild := reorgChain[i+1] + if !expectedUTXODiffChild.Equal(utxoDiffChild) { + t.Errorf("Block %s expected utxoDiffChild is %s, but got %s instead", + currentBlockHash, expectedUTXODiffChild, utxoDiffChild) + continue + } + } + + // skip the first block, since it's coinbase doesn't create outputs + if i == 0 { + continue + } + + currentBlock, err := tc.BlockStore().Block(tc.DatabaseContext(), stagingArea, currentBlockHash) + if err != nil { + t.Fatalf("Error getting block %s: %+v", currentBlockHash, err) + } + utxoDiff, err := tc.UTXODiffStore().UTXODiff(tc.DatabaseContext(), stagingArea, currentBlockHash) + if err != nil { + t.Fatalf("Error getting utxoDiffChild of %s: %+v", currentBlockHash, err) + } + if !checkIsUTXODiffOnlyRemoveCoinbase(t, utxoDiff, currentBlock) { + t.Errorf("Expected %s to only have toRemove: {%s}, but got %s instead", + currentBlockHash, consensushashing.TransactionID(currentBlock.Transactions[0]), utxoDiff) + } + } + }) +} + +func checkIsUTXODiffOnlyRemoveCoinbase(t *testing.T, utxoDiff externalapi.UTXODiff, currentBlock *externalapi.DomainBlock) bool { + if utxoDiff.ToAdd().Len() > 0 || utxoDiff.ToRemove().Len() > 1 { + return false + } + + iterator := utxoDiff.ToRemove().Iterator() + iterator.First() + outpoint, _, err := iterator.Get() + if err != nil { + t.Fatalf("Error getting from UTXODiff's iterator: %+v", err) + } + if !outpoint.TransactionID.Equal(consensushashing.TransactionID(currentBlock.Transactions[0])) { + return false + } + + return true +} diff --git a/domain/consensus/processes/consensusstatemanager/test_consensus_state_manager.go b/domain/consensus/processes/consensusstatemanager/test_consensus_state_manager.go new file mode 100644 index 0000000..ff7ffc8 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/test_consensus_state_manager.go @@ -0,0 +1,29 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" +) + +type testConsensusStateManager struct { + *consensusStateManager +} + +// NewTestConsensusStateManager creates an instance of a TestConsensusStateManager +func NewTestConsensusStateManager(baseConsensusStateManager model.ConsensusStateManager) testapi.TestConsensusStateManager { + return &testConsensusStateManager{consensusStateManager: baseConsensusStateManager.(*consensusStateManager)} +} + +func (csm *testConsensusStateManager) AddUTXOToMultiset( + multiset model.Multiset, entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint) error { + + return addUTXOToMultiset(multiset, entry, outpoint) +} + +func (csm *testConsensusStateManager) ResolveBlockStatus(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + useSeparateStagingAreaPerBlock bool) (externalapi.BlockStatus, error) { + + status, _, err := csm.resolveBlockStatus(stagingArea, blockHash, useSeparateStagingAreaPerBlock) + return status, err +} diff --git a/domain/consensus/processes/consensusstatemanager/update_virtual.go b/domain/consensus/processes/consensusstatemanager/update_virtual.go new file mode 100644 index 0000000..c7e8e3b --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/update_virtual.go @@ -0,0 +1,132 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func (csm *consensusStateManager) updateVirtual(stagingArea *model.StagingArea, newBlockHash *externalapi.DomainHash, + tips []*externalapi.DomainHash) (*externalapi.SelectedChainPath, externalapi.UTXODiff, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "updateVirtual") + defer onEnd() + + log.Debugf("updateVirtual start for block %s", newBlockHash) + + log.Debugf("Saving a reference to the GHOSTDAG data of the old virtual") + var oldVirtualSelectedParent *externalapi.DomainHash + if !newBlockHash.Equal(csm.genesisHash) { + oldVirtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, nil, err + } + oldVirtualSelectedParent = oldVirtualGHOSTDAGData.SelectedParent() + } + + log.Debugf("Picking virtual parents from tips len: %d", len(tips)) + virtualParents, err := csm.pickVirtualParents(stagingArea, tips) + if err != nil { + return nil, nil, err + } + log.Debugf("Picked virtual parents: %s", virtualParents) + + virtualUTXODiff, err := csm.updateVirtualWithParents(stagingArea, virtualParents) + if err != nil { + return nil, nil, err + } + + log.Debugf("Calculating selected parent chain changes") + var selectedParentChainChanges *externalapi.SelectedChainPath + if !newBlockHash.Equal(csm.genesisHash) { + newVirtualGHOSTDAGData, err := csm.ghostdagDataStore.Get(csm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, nil, err + } + newVirtualSelectedParent := newVirtualGHOSTDAGData.SelectedParent() + selectedParentChainChanges, err = csm.dagTraversalManager. + CalculateChainPath(stagingArea, oldVirtualSelectedParent, newVirtualSelectedParent) + if err != nil { + return nil, nil, err + } + log.Debugf("Selected parent chain changes: %d blocks were removed and %d blocks were added", + len(selectedParentChainChanges.Removed), len(selectedParentChainChanges.Added)) + } + + return selectedParentChainChanges, virtualUTXODiff, nil +} + +func (csm *consensusStateManager) updateVirtualWithParents( + stagingArea *model.StagingArea, virtualParents []*externalapi.DomainHash) (externalapi.UTXODiff, error) { + err := csm.dagTopologyManager.SetParents(stagingArea, model.VirtualBlockHash, virtualParents) + if err != nil { + return nil, err + } + log.Debugf("Set new parents for the virtual block hash") + + err = csm.ghostdagManager.GHOSTDAG(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + // This is needed for `csm.CalculatePastUTXOAndAcceptanceData` + _, err = csm.difficultyManager.StageDAADataAndReturnRequiredDifficulty(stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + + log.Debugf("Calculating past UTXO, acceptance data, and multiset for the new virtual block") + virtualUTXODiff, virtualAcceptanceData, virtualMultiset, err := + csm.CalculatePastUTXOAndAcceptanceData(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + log.Debugf("Calculated the past UTXO of the new virtual. "+ + "Diff toAdd length: %d, toRemove length: %d", + virtualUTXODiff.ToAdd().Len(), virtualUTXODiff.ToRemove().Len()) + + log.Debugf("Staging new acceptance data for the virtual block") + csm.acceptanceDataStore.Stage(stagingArea, model.VirtualBlockHash, virtualAcceptanceData) + + log.Debugf("Staging new multiset for the virtual block") + csm.multisetStore.Stage(stagingArea, model.VirtualBlockHash, virtualMultiset) + + log.Debugf("Staging new UTXO diff for the virtual block") + csm.consensusStateStore.StageVirtualUTXODiff(stagingArea, virtualUTXODiff) + + log.Debugf("Updating the selected tip's utxo-diff") + err = csm.updateSelectedTipUTXODiff(stagingArea, virtualUTXODiff) + if err != nil { + return nil, err + } + + return virtualUTXODiff, nil +} + +func (csm *consensusStateManager) updateSelectedTipUTXODiff( + stagingArea *model.StagingArea, virtualUTXODiff externalapi.UTXODiff) error { + + onEnd := logger.LogAndMeasureExecutionTime(log, "updateSelectedTipUTXODiff") + defer onEnd() + + selectedTip, err := csm.virtualSelectedParent(stagingArea) + if err != nil { + return err + } + + log.Debugf("Calculating new UTXO diff for virtual diff parent %s", selectedTip) + selectedTipUTXODiff, err := csm.utxoDiffStore.UTXODiff(csm.databaseContext, stagingArea, selectedTip) + if err != nil { + return err + } + newDiff, err := virtualUTXODiff.DiffFrom(selectedTipUTXODiff) + if err != nil { + return err + } + + log.Debugf("Staging new UTXO diff for virtual diff parent %s", selectedTip) + csm.stageDiff(stagingArea, selectedTip, newDiff, nil) + + return nil +} diff --git a/domain/consensus/processes/consensusstatemanager/utxo_diffs.go b/domain/consensus/processes/consensusstatemanager/utxo_diffs.go new file mode 100644 index 0000000..ddd3108 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/utxo_diffs.go @@ -0,0 +1,16 @@ +package consensusstatemanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (csm *consensusStateManager) stageDiff(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, + utxoDiff externalapi.UTXODiff, utxoDiffChild *externalapi.DomainHash) { + + log.Tracef("stageDiff start for block %s", blockHash) + defer log.Tracef("stageDiff end for block %s", blockHash) + + log.Debugf("Staging block %s as the diff child of %s", utxoDiffChild, blockHash) + csm.utxoDiffStore.Stage(stagingArea, blockHash, utxoDiff, utxoDiffChild) +} diff --git a/domain/consensus/processes/consensusstatemanager/verify_and_build_utxo.go b/domain/consensus/processes/consensusstatemanager/verify_and_build_utxo.go new file mode 100644 index 0000000..4d6cf22 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/verify_and_build_utxo.go @@ -0,0 +1,185 @@ +package consensusstatemanager + +import ( + "sort" + + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +func (csm *consensusStateManager) verifyUTXO(stagingArea *model.StagingArea, block *externalapi.DomainBlock, + blockHash *externalapi.DomainHash, pastUTXODiff externalapi.UTXODiff, acceptanceData externalapi.AcceptanceData, + multiset model.Multiset) error { + + log.Tracef("verifyUTXO start for block %s", blockHash) + defer log.Tracef("verifyUTXO end for block %s", blockHash) + + log.Debugf("Validating UTXO commitment for block %s", blockHash) + err := csm.validateUTXOCommitment(block, blockHash, multiset) + if err != nil { + return err + } + log.Debugf("UTXO commitment validation passed for block %s", blockHash) + + log.Debugf("Validating acceptedIDMerkleRoot for block %s", blockHash) + err = csm.validateAcceptedIDMerkleRoot(block, blockHash, acceptanceData) + if err != nil { + return err + } + log.Debugf("AcceptedIDMerkleRoot validation passed for block %s", blockHash) + + coinbaseTransaction := block.Transactions[0] + log.Debugf("Validating coinbase transaction %s for block %s", + consensushashing.TransactionID(coinbaseTransaction), blockHash) + err = csm.validateCoinbaseTransaction(stagingArea, blockHash, coinbaseTransaction) + if err != nil { + return err + } + log.Debugf("Coinbase transaction validation passed for block %s", blockHash) + + log.Debugf("Validating transactions against past UTXO for block %s", blockHash) + err = csm.validateBlockTransactionsAgainstPastUTXO(stagingArea, block, pastUTXODiff) + if err != nil { + return err + } + log.Tracef("Transactions against past UTXO validation passed for block %s", blockHash) + + return nil +} + +func (csm *consensusStateManager) validateBlockTransactionsAgainstPastUTXO(stagingArea *model.StagingArea, + block *externalapi.DomainBlock, pastUTXODiff externalapi.UTXODiff) error { + + blockHash := consensushashing.BlockHash(block) + log.Tracef("validateBlockTransactionsAgainstPastUTXO start for block %s", blockHash) + defer log.Tracef("validateBlockTransactionsAgainstPastUTXO end for block %s", blockHash) + + selectedParentMedianTime, err := csm.pastMedianTimeManager.PastMedianTime(stagingArea, blockHash) + if err != nil { + return err + } + log.Tracef("The past median time of %s is %d", blockHash, selectedParentMedianTime) + + for i, transaction := range block.Transactions { + transactionID := consensushashing.TransactionID(transaction) + log.Tracef("Validating transaction %s in block %s against "+ + "the block's past UTXO", transactionID, blockHash) + if i == transactionhelper.CoinbaseTransactionIndex { + log.Tracef("Skipping transaction %s because it is the coinbase", transactionID) + continue + } + + log.Tracef("Populating transaction %s with UTXO entries", transactionID) + err = csm.populateTransactionWithUTXOEntriesFromVirtualOrDiff(stagingArea, transaction, pastUTXODiff) + if err != nil { + return err + } + + log.Tracef("Validating transaction %s and populating it with fee", transactionID) + err = csm.transactionValidator.ValidateTransactionInContextAndPopulateFee( + stagingArea, transaction, blockHash) + if err != nil { + return err + } + log.Tracef("Validation against the block's past UTXO "+ + "passed for transaction %s in block %s", transactionID, blockHash) + } + return nil +} + +func (csm *consensusStateManager) validateAcceptedIDMerkleRoot(block *externalapi.DomainBlock, + blockHash *externalapi.DomainHash, acceptanceData externalapi.AcceptanceData) error { + + log.Tracef("validateAcceptedIDMerkleRoot start for block %s", blockHash) + defer log.Tracef("validateAcceptedIDMerkleRoot end for block %s", blockHash) + + calculatedAcceptedIDMerkleRoot := calculateAcceptedIDMerkleRoot(acceptanceData) + if !block.Header.AcceptedIDMerkleRoot().Equal(calculatedAcceptedIDMerkleRoot) { + return errors.Wrapf(ruleerrors.ErrBadMerkleRoot, "block %s accepted ID merkle root is invalid - block "+ + "header indicates %s, but calculated value is %s", + blockHash, block.Header.UTXOCommitment(), calculatedAcceptedIDMerkleRoot) + } + + return nil +} + +func (csm *consensusStateManager) validateUTXOCommitment( + block *externalapi.DomainBlock, blockHash *externalapi.DomainHash, multiset model.Multiset) error { + + log.Tracef("validateUTXOCommitment start for block %s", blockHash) + defer log.Tracef("validateUTXOCommitment end for block %s", blockHash) + + if blockHash.Equal(csm.genesisHash) { + return nil + } + + multisetHash := multiset.Hash() + if !block.Header.UTXOCommitment().Equal(multisetHash) { + return errors.Wrapf(ruleerrors.ErrBadUTXOCommitment, "block %s UTXO commitment is invalid - block "+ + "header indicates %s, but calculated value is %s", blockHash, block.Header.UTXOCommitment(), multisetHash) + } + + return nil +} + +func calculateAcceptedIDMerkleRoot(multiblockAcceptanceData externalapi.AcceptanceData) *externalapi.DomainHash { + log.Tracef("calculateAcceptedIDMerkleRoot start") + defer log.Tracef("calculateAcceptedIDMerkleRoot end") + + var acceptedTransactions []*externalapi.DomainTransaction + + for _, blockAcceptanceData := range multiblockAcceptanceData { + for _, transactionAcceptance := range blockAcceptanceData.TransactionAcceptanceData { + if !transactionAcceptance.IsAccepted { + continue + } + acceptedTransactions = append(acceptedTransactions, transactionAcceptance.Transaction) + } + } + sort.Slice(acceptedTransactions, func(i, j int) bool { + return consensushashing.TransactionID(acceptedTransactions[i]).Less( + consensushashing.TransactionID(acceptedTransactions[j])) + }) + + return merkle.CalculateIDMerkleRoot(acceptedTransactions) +} + +func (csm *consensusStateManager) validateCoinbaseTransaction(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, coinbaseTransaction *externalapi.DomainTransaction) error { + + log.Tracef("validateCoinbaseTransaction start for block %s", blockHash) + defer log.Tracef("validateCoinbaseTransaction end for block %s", blockHash) + + log.Tracef("Extracting coinbase data for coinbase transaction %s in block %s", + consensushashing.TransactionID(coinbaseTransaction), blockHash) + _, coinbaseData, _, err := csm.coinbaseManager.ExtractCoinbaseDataBlueScoreAndSubsidy(coinbaseTransaction) + if err != nil { + return err + } + + log.Tracef("Calculating the expected coinbase transaction for the given coinbase data and block %s", blockHash) + expectedCoinbaseTransaction, _, err := + csm.coinbaseManager.ExpectedCoinbaseTransaction(stagingArea, blockHash, coinbaseData) + if err != nil { + return err + } + + coinbaseTransactionHash := consensushashing.TransactionHash(coinbaseTransaction) + expectedCoinbaseTransactionHash := consensushashing.TransactionHash(expectedCoinbaseTransaction) + log.Tracef("given coinbase hash: %s, expected coinbase hash: %s", + coinbaseTransactionHash, expectedCoinbaseTransactionHash) + + if !coinbaseTransactionHash.Equal(expectedCoinbaseTransactionHash) { + return errors.Wrap(ruleerrors.ErrBadCoinbaseTransaction, "coinbase transaction is not built as expected") + } + + return nil +} diff --git a/domain/consensus/processes/consensusstatemanager/virtual_parents_test.go b/domain/consensus/processes/consensusstatemanager/virtual_parents_test.go new file mode 100644 index 0000000..673eaf1 --- /dev/null +++ b/domain/consensus/processes/consensusstatemanager/virtual_parents_test.go @@ -0,0 +1,112 @@ +package consensusstatemanager_test + +import ( + "sort" + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestConsensusStateManager_pickVirtualParents(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + tc, teardown, err := consensus.NewFactory().NewTestConsensus(consensusConfig, "TestConsensusStateManager_pickVirtualParents") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardown(false) + + getSortedVirtualParents := func(tc testapi.TestConsensus) []*externalapi.DomainHash { + virtualRelations, err := tc.BlockRelationStore().BlockRelation(tc.DatabaseContext(), stagingArea, model.VirtualBlockHash) + if err != nil { + t.Fatalf("Failed getting virtual block virtualRelations: %v", err) + } + + block, err := tc.BuildBlock(&externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}}, nil) + if err != nil { + t.Fatalf("Consensus failed building a block: %v", err) + } + blockParents := block.Header.DirectParents() + sort.Sort(testutils.NewTestGhostDAGSorter(stagingArea, virtualRelations.Parents, tc, t)) + sort.Sort(testutils.NewTestGhostDAGSorter(stagingArea, blockParents, tc, t)) + if !externalapi.HashesEqual(virtualRelations.Parents, blockParents) { + t.Fatalf("Block relations and BuildBlock return different parents for virtual, %s != %s", virtualRelations.Parents, blockParents) + } + return virtualRelations.Parents + } + + // We build 2*consensusConfig.MaxBlockParents each one with blueWork higher than the other. + parents := make([]*externalapi.DomainHash, 0, consensusConfig.MaxBlockParents) + for i := 0; i < 2*int(consensusConfig.MaxBlockParents); i++ { + lastBlock := consensusConfig.GenesisHash + for j := 0; j <= i; j++ { + lastBlock, _, err = tc.AddBlock([]*externalapi.DomainHash{lastBlock}, nil, nil) + if err != nil { + t.Fatalf("Failed Adding block to tc: %+v", err) + } + } + parents = append(parents, lastBlock) + } + + virtualParents := getSortedVirtualParents(tc) + sort.Sort(testutils.NewTestGhostDAGSorter(stagingArea, parents, tc, t)) + + // Make sure the first half of the blocks are with highest blueWork + // we use (max+1)/2 because the first "half" is rounded up, so `(dividend + (divisor - 1)) / divisor` = `(max + (2-1))/2` = `(max+1)/2` + for i := 0; i < int(consensusConfig.MaxBlockParents+1)/2; i++ { + if !virtualParents[i].Equal(parents[i]) { + t.Fatalf("Expected block at %d to be equal, instead found %s != %s", i, virtualParents[i], parents[i]) + } + } + + // Make sure the second half is the candidates with lowest blueWork + end := len(parents) - int(consensusConfig.MaxBlockParents)/2 + for i := (consensusConfig.MaxBlockParents + 1) / 2; i < consensusConfig.MaxBlockParents; i++ { + if !virtualParents[i].Equal(parents[end]) { + t.Fatalf("Expected block at %d to be equal, instead found %s != %s", i, virtualParents[i], parents[end]) + } + end++ + } + if end != len(parents) { + t.Fatalf("Expected %d==%d", end, len(parents)) + } + + // Clear all tips. + var virtualSelectedParent *externalapi.DomainHash + for { + block, err := tc.BuildBlock(&externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, ExtraData: nil}, nil) + if err != nil { + t.Fatalf("Failed building a block: %v", err) + } + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("Failed Inserting block to tc: %v", err) + } + virtualSelectedParent = consensushashing.BlockHash(block) + if len(block.Header.DirectParents()) == 1 { + break + } + } + // build exactly consensusConfig.MaxBlockParents + parents = make([]*externalapi.DomainHash, 0, consensusConfig.MaxBlockParents) + for i := 0; i < int(consensusConfig.MaxBlockParents); i++ { + block, _, err := tc.AddBlock([]*externalapi.DomainHash{virtualSelectedParent}, nil, nil) + if err != nil { + t.Fatalf("Failed Adding block to tc: %+v", err) + } + parents = append(parents, block) + } + + sort.Sort(testutils.NewTestGhostDAGSorter(stagingArea, parents, tc, t)) + virtualParents = getSortedVirtualParents(tc) + if !externalapi.HashesEqual(virtualParents, parents) { + t.Fatalf("Expected VirtualParents and parents to be equal, instead: %s != %s", virtualParents, parents) + } + }) +} diff --git a/domain/consensus/processes/dagtopologymanager/dagtopologymanager.go b/domain/consensus/processes/dagtopologymanager/dagtopologymanager.go new file mode 100644 index 0000000..4808d05 --- /dev/null +++ b/domain/consensus/processes/dagtopologymanager/dagtopologymanager.go @@ -0,0 +1,220 @@ +package dagtopologymanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// dagTopologyManager exposes methods for querying relationships +// between blocks in the DAG +type dagTopologyManager struct { + reachabilityManager model.ReachabilityManager + blockRelationStore model.BlockRelationStore + ghostdagStore model.GHOSTDAGDataStore + databaseContext model.DBReader +} + +// New instantiates a new DAGTopologyManager +func New( + databaseContext model.DBReader, + reachabilityManager model.ReachabilityManager, + blockRelationStore model.BlockRelationStore, + ghostdagStore model.GHOSTDAGDataStore) model.DAGTopologyManager { + + return &dagTopologyManager{ + databaseContext: databaseContext, + reachabilityManager: reachabilityManager, + blockRelationStore: blockRelationStore, + ghostdagStore: ghostdagStore, + } +} + +// Parents returns the DAG parents of the given blockHash +func (dtm *dagTopologyManager) Parents(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + blockRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + return blockRelations.Parents, nil +} + +// Children returns the DAG children of the given blockHash +func (dtm *dagTopologyManager) Children(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + blockRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + return blockRelations.Children, nil +} + +// IsParentOf returns true if blockHashA is a direct DAG parent of blockHashB +func (dtm *dagTopologyManager) IsParentOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + blockRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, blockHashB) + if err != nil { + return false, err + } + return isHashInSlice(blockHashA, blockRelations.Parents), nil +} + +// IsChildOf returns true if blockHashA is a direct DAG child of blockHashB +func (dtm *dagTopologyManager) IsChildOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + blockRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, blockHashB) + if err != nil { + return false, err + } + return isHashInSlice(blockHashA, blockRelations.Children), nil +} + +// IsAncestorOf returns true if blockHashA is a DAG ancestor of blockHashB +func (dtm *dagTopologyManager) IsAncestorOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + return dtm.reachabilityManager.IsDAGAncestorOf(stagingArea, blockHashA, blockHashB) +} + +// IsAncestorOfAny returns true if `blockHash` is an ancestor of at least one of `potentialDescendants` +func (dtm *dagTopologyManager) IsAncestorOfAny(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error) { + for _, potentialDescendant := range potentialDescendants { + isAncestorOf, err := dtm.IsAncestorOf(stagingArea, blockHash, potentialDescendant) + if err != nil { + return false, err + } + + if isAncestorOf { + return true, nil + } + } + + return false, nil +} + +// IsAnyAncestorOf returns true if at least one of `potentialAncestors` is an ancestor of `blockHash` +func (dtm *dagTopologyManager) IsAnyAncestorOf(stagingArea *model.StagingArea, potentialAncestors []*externalapi.DomainHash, blockHash *externalapi.DomainHash) (bool, error) { + for _, potentialAncestor := range potentialAncestors { + isAncestorOf, err := dtm.IsAncestorOf(stagingArea, potentialAncestor, blockHash) + if err != nil { + return false, err + } + + if isAncestorOf { + return true, nil + } + } + + return false, nil +} + +// IsInSelectedParentChainOf returns true if blockHashA is in the selected parent chain of blockHashB +func (dtm *dagTopologyManager) IsInSelectedParentChainOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + + // Virtual doesn't have reachability data, therefore, it should be treated as a special case - + // use its selected parent as blockHashB. + if blockHashB == model.VirtualBlockHash { + ghostdagData, err := dtm.ghostdagStore.Get(dtm.databaseContext, stagingArea, blockHashB, false) + if err != nil { + return false, err + } + blockHashB = ghostdagData.SelectedParent() + } + + return dtm.reachabilityManager.IsReachabilityTreeAncestorOf(stagingArea, blockHashA, blockHashB) +} + +func isHashInSlice(hash *externalapi.DomainHash, hashes []*externalapi.DomainHash) bool { + for _, h := range hashes { + if h.Equal(hash) { + return true + } + } + return false +} + +func (dtm *dagTopologyManager) SetParents(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, parentHashes []*externalapi.DomainHash) error { + hasRelations, err := dtm.blockRelationStore.Has(dtm.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + if hasRelations { + // Go over the block's current relations (if they exist), and remove the block from all its current parents + // Note: In theory we should also remove the block from all its children, however, in practice no block + // ever has its relations updated after getting any children, therefore we skip this step + + currentRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, blockHash) + if err != nil { + return err + } + + for _, currentParent := range currentRelations.Parents { + parentRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, currentParent) + if err != nil { + return err + } + for i, parentChild := range parentRelations.Children { + if parentChild.Equal(blockHash) { + parentRelations.Children = append(parentRelations.Children[:i], parentRelations.Children[i+1:]...) + dtm.blockRelationStore.StageBlockRelation(stagingArea, currentParent, parentRelations) + break + } + } + } + } + + // Go over all new parents and add block as their child + for _, parent := range parentHashes { + parentRelations, err := dtm.blockRelationStore.BlockRelation(dtm.databaseContext, stagingArea, parent) + if err != nil { + return err + } + isBlockAlreadyInChildren := false + for _, parentChild := range parentRelations.Children { + if parentChild.Equal(blockHash) { + isBlockAlreadyInChildren = true + break + } + } + if !isBlockAlreadyInChildren { + parentRelations.Children = append(parentRelations.Children, blockHash) + dtm.blockRelationStore.StageBlockRelation(stagingArea, parent, parentRelations) + } + } + + // Finally - create the relations for the block itself + dtm.blockRelationStore.StageBlockRelation(stagingArea, blockHash, &model.BlockRelations{ + Parents: parentHashes, + Children: []*externalapi.DomainHash{}, + }) + + return nil +} + +// ChildInSelectedParentChainOf returns the child of `lowHash` that is in the selected-parent-chain of `highHash` +func (dtm *dagTopologyManager) ChildInSelectedParentChainOf(stagingArea *model.StagingArea, lowHash, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + + // Virtual doesn't have reachability data, therefore, it should be treated as a special case - + // use its selected parent as highHash. + specifiedHighHash := highHash + if highHash == model.VirtualBlockHash { + ghostdagData, err := dtm.ghostdagStore.Get(dtm.databaseContext, stagingArea, highHash, false) + if err != nil { + return nil, err + } + selectedParent := ghostdagData.SelectedParent() + + // In case where `context` is an immediate parent of `highHash` + if lowHash.Equal(selectedParent) { + return highHash, nil + } + highHash = selectedParent + } + + isInSelectedParentChain, err := dtm.IsInSelectedParentChainOf(stagingArea, lowHash, highHash) + if err != nil { + return nil, err + } + if !isInSelectedParentChain { + return nil, errors.Errorf("Claimed chain ancestor (%s) is not in the selected-parent-chain of highHash (%s)", + lowHash, specifiedHighHash) + } + + return dtm.reachabilityManager.FindNextAncestor(stagingArea, highHash, lowHash) +} diff --git a/domain/consensus/processes/dagtopologymanager/dagtopologymanager_external_test.go b/domain/consensus/processes/dagtopologymanager/dagtopologymanager_external_test.go new file mode 100644 index 0000000..94d553e --- /dev/null +++ b/domain/consensus/processes/dagtopologymanager/dagtopologymanager_external_test.go @@ -0,0 +1,54 @@ +package dagtopologymanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestIsAncestorOf(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestIsAncestorOf") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + // Add a chain of two blocks above the genesis. This will be the + // selected parent chain. + blockA, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + blockB, _, err := tc.AddBlock([]*externalapi.DomainHash{blockA}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %s", err) + } + + // Add another block above the genesis + blockC, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %s", err) + } + + // Add a block whose parents are the two tips + blockD, _, err := tc.AddBlock([]*externalapi.DomainHash{blockB, blockC}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %s", err) + } + + isAncestorOf, err := tc.DAGTopologyManager().IsAncestorOf(model.NewStagingArea(), blockC, blockD) + if err != nil { + t.Fatalf("IsAncestorOf: %s", err) + } + if !isAncestorOf { + t.Fatalf("TestIsInPast: node C is unexpectedly not the past of node D") + } + }) +} diff --git a/domain/consensus/processes/dagtraversalmanager/anticone.go b/domain/consensus/processes/dagtraversalmanager/anticone.go new file mode 100644 index 0000000..dd099a3 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/anticone.go @@ -0,0 +1,77 @@ +package dagtraversalmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" +) + +func (dtm *dagTraversalManager) AnticoneFromVirtualPOV(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + []*externalapi.DomainHash, error) { + + virtualParents, err := dtm.dagTopologyManager.Parents(stagingArea, model.VirtualBlockHash) + if err != nil { + return nil, err + } + + return dtm.AnticoneFromBlocks(stagingArea, virtualParents, blockHash, 0) +} + +func (dtm *dagTraversalManager) AnticoneFromBlocks(stagingArea *model.StagingArea, tips []*externalapi.DomainHash, + blockHash *externalapi.DomainHash, maxTraversalAllowed uint64) ( + []*externalapi.DomainHash, error) { + + anticone := []*externalapi.DomainHash{} + queue := tips + visited := hashset.New() + + traversalCounter := uint64(0) + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + + if visited.Contains(current) { + continue + } + + visited.Add(current) + + currentIsAncestorOfBlock, err := dtm.dagTopologyManager.IsAncestorOf(stagingArea, current, blockHash) + if err != nil { + return nil, err + } + + if currentIsAncestorOfBlock { + continue + } + + blockIsAncestorOfCurrent, err := dtm.dagTopologyManager.IsAncestorOf(stagingArea, blockHash, current) + if err != nil { + return nil, err + } + + // We count the number of blocks in past(tips) \setminus past(blockHash). + // We don't use `len(visited)` since it includes some maximal blocks in past(blockHash) as well. + traversalCounter++ + if maxTraversalAllowed > 0 && traversalCounter > maxTraversalAllowed { + return nil, errors.Wrapf(model.ErrReachedMaxTraversalAllowed, + "Passed max allowed traversal (%d > %d)", traversalCounter, maxTraversalAllowed) + } + + if !blockIsAncestorOfCurrent { + anticone = append(anticone, current) + } + + currentParents, err := dtm.dagTopologyManager.Parents(stagingArea, current) + if err != nil { + return nil, err + } + + for _, parent := range currentParents { + queue = append(queue, parent) + } + } + + return anticone, nil +} diff --git a/domain/consensus/processes/dagtraversalmanager/block_heap.go b/domain/consensus/processes/dagtraversalmanager/block_heap.go new file mode 100644 index 0000000..4a9ed36 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/block_heap.go @@ -0,0 +1,204 @@ +package dagtraversalmanager + +import ( + "container/heap" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func blockGHOSTDAGDataHashPairLess(left, right *externalapi.BlockGHOSTDAGDataHashPair, gm model.GHOSTDAGManager) bool { + return gm.Less(left.Hash, left.GHOSTDAGData, right.Hash, right.GHOSTDAGData) +} + +// baseHeap is an implementation for heap.Interface that sorts blocks by their blueWork+hash +type baseHeap struct { + slice []*externalapi.BlockGHOSTDAGDataHashPair + ghostdagManager model.GHOSTDAGManager +} + +func (h *baseHeap) Len() int { return len(h.slice) } +func (h *baseHeap) Swap(i, j int) { h.slice[i], h.slice[j] = h.slice[j], h.slice[i] } + +func (h *baseHeap) Push(x interface{}) { + h.slice = append(h.slice, x.(*externalapi.BlockGHOSTDAGDataHashPair)) +} + +func (h *baseHeap) Pop() interface{} { + oldSlice := h.slice + oldLength := len(oldSlice) + popped := oldSlice[oldLength-1] + h.slice = oldSlice[0 : oldLength-1] + return popped +} + +// peek returns the block with lowest blueWork+hash from this heap without removing it +func (h *baseHeap) peek() *externalapi.BlockGHOSTDAGDataHashPair { + return h.slice[0] +} + +// upHeap extends baseHeap to include Less operation that traverses from bottom to top +type upHeap struct{ baseHeap } + +func (h *upHeap) Less(i, j int) bool { + heapNodeI := h.slice[i] + heapNodeJ := h.slice[j] + return blockGHOSTDAGDataHashPairLess(heapNodeI, heapNodeJ, h.ghostdagManager) +} + +// downHeap extends baseHeap to include Less operation that traverses from top to bottom +type downHeap struct{ baseHeap } + +func (h *downHeap) Less(i, j int) bool { + heapNodeI := h.slice[i] + heapNodeJ := h.slice[j] + return !blockGHOSTDAGDataHashPairLess(heapNodeI, heapNodeJ, h.ghostdagManager) +} + +// blockHeap represents a mutable heap of blocks, sorted by their blueWork+hash +type blockHeap struct { + impl heap.Interface + ghostdagStore model.GHOSTDAGDataStore + dbContext model.DBReader + stagingArea *model.StagingArea +} + +// NewDownHeap initializes and returns a new blockHeap +func (dtm *dagTraversalManager) NewDownHeap(stagingArea *model.StagingArea) model.BlockHeap { + h := blockHeap{ + impl: &downHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}}, + ghostdagStore: dtm.ghostdagDataStore, + dbContext: dtm.databaseContext, + stagingArea: stagingArea, + } + heap.Init(h.impl) + return &h +} + +// NewUpHeap initializes and returns a new blockHeap +func (dtm *dagTraversalManager) NewUpHeap(stagingArea *model.StagingArea) model.BlockHeap { + h := blockHeap{ + impl: &upHeap{baseHeap{ghostdagManager: dtm.ghostdagManager}}, + ghostdagStore: dtm.ghostdagDataStore, + dbContext: dtm.databaseContext, + stagingArea: stagingArea, + } + heap.Init(h.impl) + return &h +} + +// Pop removes the block with lowest blueWork+hash from this heap and returns it +func (bh *blockHeap) Pop() *externalapi.DomainHash { + return heap.Pop(bh.impl).(*externalapi.BlockGHOSTDAGDataHashPair).Hash +} + +// Push pushes the block onto the heap +func (bh *blockHeap) Push(blockHash *externalapi.DomainHash) error { + ghostdagData, err := bh.ghostdagStore.Get(bh.dbContext, bh.stagingArea, blockHash, false) + if err != nil { + return err + } + + heap.Push(bh.impl, &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: blockHash, + GHOSTDAGData: ghostdagData, + }) + + return nil +} + +func (bh *blockHeap) PushSlice(blockHashes []*externalapi.DomainHash) error { + for _, blockHash := range blockHashes { + err := bh.Push(blockHash) + if err != nil { + return err + } + } + return nil +} + +// Len returns the length of this heap +func (bh *blockHeap) Len() int { + return bh.impl.Len() +} + +// ToSlice copies this heap to a slice +func (bh *blockHeap) ToSlice() []*externalapi.DomainHash { + length := bh.Len() + hashes := make([]*externalapi.DomainHash, length) + for i := 0; i < length; i++ { + hashes[i] = bh.Pop() + } + return hashes +} + +// sizedUpBlockHeap represents a mutable heap of Blocks, sorted by their blueWork+hash, capped by a specific size. +type sizedUpBlockHeap struct { + impl upHeap + ghostdagStore model.GHOSTDAGDataStore + dbContext model.DBReader + stagingArea *model.StagingArea +} + +// newSizedUpHeap initializes and returns a new sizedUpBlockHeap +func (dtm *dagTraversalManager) newSizedUpHeap(stagingArea *model.StagingArea, cap int) *sizedUpBlockHeap { + h := sizedUpBlockHeap{ + impl: upHeap{baseHeap{slice: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, cap), ghostdagManager: dtm.ghostdagManager}}, + ghostdagStore: dtm.ghostdagDataStore, + dbContext: dtm.databaseContext, + stagingArea: stagingArea, + } + heap.Init(&h.impl) + return &h +} + +func (dtm *dagTraversalManager) newSizedUpHeapFromSlice(stagingArea *model.StagingArea, slice []*externalapi.BlockGHOSTDAGDataHashPair) *sizedUpBlockHeap { + sliceClone := make([]*externalapi.BlockGHOSTDAGDataHashPair, len(slice), cap(slice)) + copy(sliceClone, slice) + h := sizedUpBlockHeap{ + impl: upHeap{baseHeap{slice: sliceClone, ghostdagManager: dtm.ghostdagManager}}, + ghostdagStore: dtm.ghostdagDataStore, + dbContext: dtm.databaseContext, + stagingArea: stagingArea, + } + return &h +} + +// len returns the length of this heap +func (sbh *sizedUpBlockHeap) len() int { + return sbh.impl.Len() +} + +// pop removes the block with lowest blueWork+hash from this heap and returns it +func (sbh *sizedUpBlockHeap) pop() *externalapi.DomainHash { + return heap.Pop(&sbh.impl).(*externalapi.BlockGHOSTDAGDataHashPair).Hash +} + +// tryPushWithGHOSTDAGData is just like tryPush but the caller provides the ghostdagData of the block. +func (sbh *sizedUpBlockHeap) tryPushWithGHOSTDAGData(blockHash *externalapi.DomainHash, + ghostdagData *externalapi.BlockGHOSTDAGData) (bool, error) { + + node := &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: blockHash, + GHOSTDAGData: ghostdagData, + } + if len(sbh.impl.slice) == cap(sbh.impl.slice) { + min := sbh.impl.peek() + // if the heap is full, and the new block is less than the minimum, return false + if blockGHOSTDAGDataHashPairLess(node, min, sbh.impl.ghostdagManager) { + return false, nil + } + sbh.pop() + } + heap.Push(&sbh.impl, node) + return true, nil +} + +// tryPush tries to push the block onto the heap, if the heap is full and it's less than the minimum it rejects it +func (sbh *sizedUpBlockHeap) tryPush(blockHash *externalapi.DomainHash) (bool, error) { + ghostdagData, err := sbh.ghostdagStore.Get(sbh.dbContext, sbh.stagingArea, blockHash, false) + if err != nil { + return false, err + } + return sbh.tryPushWithGHOSTDAGData(blockHash, ghostdagData) +} diff --git a/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager.go b/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager.go new file mode 100644 index 0000000..9bcc503 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager.go @@ -0,0 +1,129 @@ +package dagtraversalmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// dagTraversalManager exposes methods for traversing blocks +// in the DAG +type dagTraversalManager struct { + databaseContext model.DBReader + + dagTopologyManager model.DAGTopologyManager + ghostdagManager model.GHOSTDAGManager + ghostdagDataStore model.GHOSTDAGDataStore + reachabilityManager model.ReachabilityManager + daaWindowStore model.BlocksWithTrustedDataDAAWindowStore + genesisHash *externalapi.DomainHash + difficultyAdjustmentWindowSize int + windowHeapSliceStore model.WindowHeapSliceStore +} + +// New instantiates a new DAGTraversalManager +func New( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + ghostdagDataStore model.GHOSTDAGDataStore, + reachabilityManager model.ReachabilityManager, + ghostdagManager model.GHOSTDAGManager, + daaWindowStore model.BlocksWithTrustedDataDAAWindowStore, + windowHeapSliceStore model.WindowHeapSliceStore, + genesisHash *externalapi.DomainHash, + difficultyAdjustmentWindowSize int) model.DAGTraversalManager { + return &dagTraversalManager{ + databaseContext: databaseContext, + dagTopologyManager: dagTopologyManager, + ghostdagDataStore: ghostdagDataStore, + reachabilityManager: reachabilityManager, + ghostdagManager: ghostdagManager, + daaWindowStore: daaWindowStore, + + genesisHash: genesisHash, + difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, + windowHeapSliceStore: windowHeapSliceStore, + } +} + +func (dtm *dagTraversalManager) LowestChainBlockAboveOrEqualToBlueScore(stagingArea *model.StagingArea, highHash *externalapi.DomainHash, blueScore uint64) (*externalapi.DomainHash, error) { + highBlockGHOSTDAGData, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, stagingArea, highHash, false) + if err != nil { + return nil, err + } + + if highBlockGHOSTDAGData.BlueScore() < blueScore { + return nil, errors.Errorf("the given blue score %d is higher than block %s blue score of %d", + blueScore, highHash, highBlockGHOSTDAGData.BlueScore()) + } + + currentHash := highHash + currentBlockGHOSTDAGData := highBlockGHOSTDAGData + + for !currentHash.Equal(dtm.genesisHash) { + selectedParentBlockGHOSTDAGData, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, stagingArea, + currentBlockGHOSTDAGData.SelectedParent(), false) + if err != nil { + return nil, err + } + + if selectedParentBlockGHOSTDAGData.BlueScore() < blueScore { + break + } + currentHash = currentBlockGHOSTDAGData.SelectedParent() + currentBlockGHOSTDAGData = selectedParentBlockGHOSTDAGData + } + + return currentHash, nil +} + +func (dtm *dagTraversalManager) CalculateChainPath(stagingArea *model.StagingArea, + fromBlockHash, toBlockHash *externalapi.DomainHash) (*externalapi.SelectedChainPath, error) { + + // Walk down from fromBlockHash until we reach the common selected + // parent chain ancestor of fromBlockHash and toBlockHash. Note + // that this slice will be empty if fromBlockHash is the selected + // parent of toBlockHash + var removed []*externalapi.DomainHash + current := fromBlockHash + for { + isCurrentInTheSelectedParentChainOfNewVirtualSelectedParent, err := + dtm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, current, toBlockHash) + if err != nil { + return nil, err + } + if isCurrentInTheSelectedParentChainOfNewVirtualSelectedParent { + break + } + removed = append(removed, current) + + currentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, stagingArea, current, false) + if err != nil { + return nil, err + } + current = currentGHOSTDAGData.SelectedParent() + } + commonAncestor := current + + // Walk down from the toBlockHash to the common ancestor + var added []*externalapi.DomainHash + current = toBlockHash + for !current.Equal(commonAncestor) { + added = append(added, current) + currentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, stagingArea, current, false) + if err != nil { + return nil, err + } + current = currentGHOSTDAGData.SelectedParent() + } + + // Reverse the order of `added` so that it's sorted from low hash to high hash + for i, j := 0, len(added)-1; i < j; i, j = i+1, j-1 { + added[i], added[j] = added[j], added[i] + } + + return &externalapi.SelectedChainPath{ + Added: added, + Removed: removed, + }, nil +} diff --git a/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager_test.go b/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager_test.go new file mode 100644 index 0000000..6aaf655 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/dagtraversalmanager_test.go @@ -0,0 +1,117 @@ +package dagtraversalmanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestLowestChainBlockAboveOrEqualToBlueScore(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.FinalityDuration = 10 * consensusConfig.TargetTimePerBlock + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, + "TestLowestChainBlockAboveOrEqualToBlueScore") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + stagingArea := model.NewStagingArea() + + checkExpectedBlock := func(highHash *externalapi.DomainHash, blueScore uint64, expected *externalapi.DomainHash) { + blockHash, err := tc.DAGTraversalManager().LowestChainBlockAboveOrEqualToBlueScore(stagingArea, highHash, blueScore) + if err != nil { + t.Fatalf("LowestChainBlockAboveOrEqualToBlueScore: %+v", err) + } + + if !blockHash.Equal(expected) { + t.Fatalf("Expected block %s but got %s", expected, blockHash) + } + } + + checkBlueScore := func(blockHash *externalapi.DomainHash, expectedBlueScore uint64) { + ghostdagData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, blockHash, false) + if err != nil { + t.Fatalf("GHOSTDAGDataStore().Get: %+v", err) + } + + if ghostdagData.BlueScore() != expectedBlueScore { + t.Fatalf("Expected blue score %d but got %d", expectedBlueScore, ghostdagData.BlueScore()) + } + } + + chain := []*externalapi.DomainHash{consensusConfig.GenesisHash} + tipHash := consensusConfig.GenesisHash + for i := 0; i < 9; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + } + + sideChain1TipHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{sideChain1TipHash, tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + blueScore11BlockHash := tipHash + checkBlueScore(blueScore11BlockHash, 11) + + for i := 0; i < 5; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + } + + sideChain2TipHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{sideChain2TipHash, tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + chain = append(chain, tipHash) + + blueScore18BlockHash := tipHash + checkBlueScore(blueScore18BlockHash, 18) + + for i := 0; i < 3; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + } + + // Check by exact blue score + checkExpectedBlock(tipHash, 0, consensusConfig.GenesisHash) + checkExpectedBlock(tipHash, 5, chain[5]) + checkExpectedBlock(tipHash, 19, chain[len(chain)-3]) + + // Check by non exact blue score + checkExpectedBlock(tipHash, 17, blueScore18BlockHash) + checkExpectedBlock(tipHash, 10, blueScore11BlockHash) + }) +} diff --git a/domain/consensus/processes/dagtraversalmanager/selected_child_iterator.go b/domain/consensus/processes/dagtraversalmanager/selected_child_iterator.go new file mode 100644 index 0000000..33e8326 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/selected_child_iterator.go @@ -0,0 +1,109 @@ +package dagtraversalmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type selectedChildIterator struct { + dagTraversalManager model.DAGTraversalManager + + includeLowHash bool + highHash, lowHash *externalapi.DomainHash + current *externalapi.DomainHash + err error + isClosed bool + stagingArea *model.StagingArea +} + +func (s *selectedChildIterator) First() bool { + if s.isClosed { + panic("Tried using a closed SelectedChildIterator") + } + s.current = s.lowHash + if s.includeLowHash { + return true + } + + return s.Next() +} + +func (s *selectedChildIterator) Next() bool { + if s.isClosed { + panic("Tried using a closed SelectedChildIterator") + } + if s.err != nil { + return true + } + + selectedChild, err := s.dagTraversalManager.SelectedChild(s.stagingArea, s.highHash, s.current) + if errors.Is(err, errNoSelectedChild) { + return false + } + if err != nil { + s.current = nil + s.err = err + return true + } + + s.current = selectedChild + return true +} + +func (s *selectedChildIterator) Get() (*externalapi.DomainHash, error) { + if s.isClosed { + return nil, errors.New("Tried using a closed SelectedChildIterator") + } + return s.current, s.err +} + +func (s *selectedChildIterator) Close() error { + if s.isClosed { + return errors.New("Tried using a closed SelectedChildIterator") + } + s.isClosed = true + s.highHash = nil + s.lowHash = nil + s.current = nil + s.err = nil + return nil +} + +// SelectedChildIterator returns a BlockIterator that iterates from lowHash (exclusive) to highHash (inclusive) over +// highHash's selected parent chain +func (dtm *dagTraversalManager) SelectedChildIterator(stagingArea *model.StagingArea, + highHash, lowHash *externalapi.DomainHash, includeLowHash bool) (model.BlockIterator, error) { + + isLowHashInSelectedParentChainOfHighHash, err := dtm.dagTopologyManager.IsInSelectedParentChainOf( + stagingArea, lowHash, highHash) + if err != nil { + return nil, err + } + + if !isLowHashInSelectedParentChainOfHighHash { + return nil, errors.Errorf("%s is not in the selected parent chain of %s", highHash, lowHash) + } + return &selectedChildIterator{ + dagTraversalManager: dtm, + includeLowHash: includeLowHash, + highHash: highHash, + lowHash: lowHash, + current: lowHash, + stagingArea: stagingArea, + }, nil +} + +var errNoSelectedChild = errors.New("errNoSelectedChild") + +func (dtm *dagTraversalManager) SelectedChild(stagingArea *model.StagingArea, + highHash, lowHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + + // The selected child is in fact the next reachability tree nextAncestor + nextAncestor, err := dtm.reachabilityManager.FindNextAncestor(stagingArea, highHash, lowHash) + if err != nil { + return nil, errors.Wrapf(errNoSelectedChild, "no selected child for %s from the point of view of %s", + lowHash, highHash) + } + return nextAncestor, nil +} diff --git a/domain/consensus/processes/dagtraversalmanager/window.go b/domain/consensus/processes/dagtraversalmanager/window.go new file mode 100644 index 0000000..742c1c9 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/window.go @@ -0,0 +1,200 @@ +package dagtraversalmanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func (dtm *dagTraversalManager) DAABlockWindow(stagingArea *model.StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + return dtm.BlockWindow(stagingArea, highHash, dtm.difficultyAdjustmentWindowSize) +} + +// BlockWindow returns a blockWindow of the given size that contains the +// blocks in the past of highHash, the sorting is unspecified. +// If the number of blocks in the past of startingNode is less then windowSize, +func (dtm *dagTraversalManager) BlockWindow(stagingArea *model.StagingArea, highHash *externalapi.DomainHash, + windowSize int) ([]*externalapi.DomainHash, error) { + + windowHeap, err := dtm.blockWindowHeap(stagingArea, highHash, windowSize) + if err != nil { + return nil, err + } + + window := make([]*externalapi.DomainHash, 0, len(windowHeap.impl.slice)) + for _, b := range windowHeap.impl.slice { + window = append(window, b.Hash) + } + return window, nil +} + +func (dtm *dagTraversalManager) blockWindowHeap(stagingArea *model.StagingArea, + highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) { + windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, highHash, windowSize) + sliceNotCached := database.IsNotFoundError(err) + if !sliceNotCached && err != nil { + return nil, err + } + if !sliceNotCached { + return dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice), nil + } + + heap, err := dtm.calculateBlockWindowHeap(stagingArea, highHash, windowSize) + if err != nil { + return nil, err + } + + if !highHash.Equal(model.VirtualBlockHash) { + dtm.windowHeapSliceStore.Stage(stagingArea, highHash, windowSize, heap.impl.slice) + } + return heap, nil +} + +func (dtm *dagTraversalManager) calculateBlockWindowHeap(stagingArea *model.StagingArea, + highHash *externalapi.DomainHash, windowSize int) (*sizedUpBlockHeap, error) { + + windowHeap := dtm.newSizedUpHeap(stagingArea, windowSize) + if highHash.Equal(dtm.genesisHash) { + return windowHeap, nil + } + if windowSize == 0 { + return windowHeap, nil + } + + current := highHash + currentGHOSTDAGData, err := dtm.ghostdagDataStore.Get(dtm.databaseContext, stagingArea, highHash, false) + if err != nil { + return nil, err + } + + // If the block has a trusted DAA window attached, we just take it as is and don't use cache of selected parent to + // build the window. This is because tryPushMergeSet might not be able to find all the GHOSTDAG data that is + // associated with the block merge set. + _, err = dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0) + isNonTrustedBlock := database.IsNotFoundError(err) + if !isNonTrustedBlock && err != nil { + return nil, err + } + + if isNonTrustedBlock && currentGHOSTDAGData.SelectedParent() != nil { + windowHeapSlice, err := dtm.windowHeapSliceStore.Get(stagingArea, currentGHOSTDAGData.SelectedParent(), windowSize) + selectedParentNotCached := database.IsNotFoundError(err) + if !selectedParentNotCached && err != nil { + return nil, err + } + if !selectedParentNotCached { + windowHeap := dtm.newSizedUpHeapFromSlice(stagingArea, windowHeapSlice) + if !currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) { + selectedParentGHOSTDAGData, err := dtm.ghostdagDataStore.Get( + dtm.databaseContext, stagingArea, currentGHOSTDAGData.SelectedParent(), false) + if err != nil { + return nil, err + } + + _, err = dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData) + if err != nil { + return nil, err + } + } + + return windowHeap, nil + } + } + + // Walk down the chain until you finish or find a trusted block and then take complete the rest + // of the window with the trusted window. + for { + if currentGHOSTDAGData.SelectedParent().Equal(dtm.genesisHash) { + break + } + + _, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, 0) + currentIsNonTrustedBlock := database.IsNotFoundError(err) + if !currentIsNonTrustedBlock && err != nil { + return nil, err + } + + if !currentIsNonTrustedBlock { + for i := uint64(0); ; i++ { + daaBlock, err := dtm.daaWindowStore.DAAWindowBlock(dtm.databaseContext, stagingArea, current, i) + if database.IsNotFoundError(err) { + break + } + if err != nil { + return nil, err + } + + _, err = windowHeap.tryPushWithGHOSTDAGData(daaBlock.Hash, daaBlock.GHOSTDAGData) + if err != nil { + return nil, err + } + + // Right now we go over all of the window of `current` and filter blocks on the fly. + // We can optimize it if we make sure that daaWindowStore stores sorted windows, and + // then return from this function once one block was not added to the heap. + } + break + } + + selectedParentGHOSTDAGData, err := dtm.ghostdagDataStore.Get( + dtm.databaseContext, stagingArea, currentGHOSTDAGData.SelectedParent(), false) + if err != nil { + return nil, err + } + + done, err := dtm.tryPushMergeSet(windowHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData) + if err != nil { + return nil, err + } + if done { + break + } + + current = currentGHOSTDAGData.SelectedParent() + currentGHOSTDAGData = selectedParentGHOSTDAGData + } + + return windowHeap, nil +} + +func (dtm *dagTraversalManager) tryPushMergeSet(windowHeap *sizedUpBlockHeap, currentGHOSTDAGData, selectedParentGHOSTDAGData *externalapi.BlockGHOSTDAGData) (bool, error) { + added, err := windowHeap.tryPushWithGHOSTDAGData(currentGHOSTDAGData.SelectedParent(), selectedParentGHOSTDAGData) + if err != nil { + return false, err + } + + // If the window is full and the selected parent is less than the minimum then we break + // because this means that there cannot be any more blocks in the past with higher blueWork + if !added { + return true, nil + } + + // Now we go over the merge set. + // Remove the SP from the blue merge set because we already added it. + mergeSetBlues := currentGHOSTDAGData.MergeSetBlues()[1:] + // Go over the merge set in reverse because it's ordered in reverse by blueWork. + for i := len(mergeSetBlues) - 1; i >= 0; i-- { + added, err := windowHeap.tryPush(mergeSetBlues[i]) + if err != nil { + return false, err + } + // If it's smaller than minimum then we won't be able to add the rest because they're even smaller. + if !added { + break + } + } + + mergeSetReds := currentGHOSTDAGData.MergeSetReds() + for i := len(mergeSetReds) - 1; i >= 0; i-- { + added, err := windowHeap.tryPush(mergeSetReds[i]) + if err != nil { + return false, err + } + // If it's smaller than minimum then we won't be able to add the rest because they're even smaller. + if !added { + break + } + } + + return false, nil +} diff --git a/domain/consensus/processes/dagtraversalmanager/window_test.go b/domain/consensus/processes/dagtraversalmanager/window_test.go new file mode 100644 index 0000000..0588de8 --- /dev/null +++ b/domain/consensus/processes/dagtraversalmanager/window_test.go @@ -0,0 +1,368 @@ +package dagtraversalmanager_test + +import ( + "reflect" + "sort" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func TestBlockWindow(t *testing.T) { + tests := map[string][]*struct { + parents []string + id string //id is a virtual entity that is used only for tests so we can define relations between blocks without knowing their hash + expectedWindow []string + }{ + dagconfig.MainnetParams.Name: { + { + parents: []string{"A"}, + id: "B", + expectedWindow: []string{}, + }, + { + parents: []string{"B"}, + id: "C", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"B"}, + id: "D", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"C", "D"}, + id: "E", + expectedWindow: []string{"C", "D", "B"}, + }, + { + parents: []string{"C", "D"}, + id: "F", + expectedWindow: []string{"C", "D", "B"}, + }, + { + parents: []string{"A"}, + id: "G", + expectedWindow: []string{}, + }, + { + parents: []string{"G"}, + id: "H", + expectedWindow: []string{"G"}, + }, + { + parents: []string{"H", "F"}, + id: "I", + expectedWindow: []string{"F", "C", "H", "D", "B", "G"}, + }, + { + parents: []string{"I"}, + id: "J", + expectedWindow: []string{"I", "F", "C", "H", "D", "B", "G"}, + }, + { + parents: []string{"J"}, + id: "K", + expectedWindow: []string{"J", "I", "F", "C", "H", "D", "B", "G"}, + }, + { + parents: []string{"K"}, + id: "L", + expectedWindow: []string{"K", "J", "I", "F", "C", "H", "D", "B", "G"}, + }, + { + parents: []string{"L"}, + id: "M", + expectedWindow: []string{"L", "K", "J", "I", "F", "C", "H", "D", "B", "G"}, + }, + { + parents: []string{"M"}, + id: "N", + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "H", "D", "B"}, + }, + { + parents: []string{"N"}, + id: "O", + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "H", "D"}, + }, + }, + dagconfig.TestnetParams.Name: { + { + parents: []string{"A"}, + id: "B", + expectedWindow: []string{}, + }, + { + parents: []string{"B"}, + id: "C", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"B"}, + id: "D", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"C", "D"}, + id: "E", + expectedWindow: []string{"D", "C", "B"}, + }, + { + parents: []string{"C", "D"}, + id: "F", + expectedWindow: []string{"D", "C", "B"}, + }, + { + parents: []string{"A"}, + id: "G", + expectedWindow: []string{}, + }, + { + parents: []string{"G"}, + id: "H", + expectedWindow: []string{"G"}, + }, + { + parents: []string{"H", "F"}, + id: "I", + expectedWindow: []string{"F", "D", "H", "C", "B", "G"}, + }, + { + parents: []string{"I"}, + id: "J", + expectedWindow: []string{"I", "F", "D", "H", "C", "B", "G"}, + }, + { + parents: []string{"J"}, + id: "K", + expectedWindow: []string{"J", "I", "F", "D", "H", "C", "B", "G"}, + }, + { + parents: []string{"K"}, + id: "L", + expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "B", "G"}, + }, + { + parents: []string{"L"}, + id: "M", + expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "B", "G"}, + }, + { + parents: []string{"M"}, + id: "N", + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "B"}, + }, + { + parents: []string{"N"}, + id: "O", + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"}, + }, + }, + dagconfig.DevnetParams.Name: { + { + parents: []string{"A"}, + id: "B", + expectedWindow: []string{}, + }, + { + parents: []string{"B"}, + id: "C", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"B"}, + id: "D", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"C", "D"}, + id: "E", + expectedWindow: []string{"D", "C", "B"}, + }, + { + parents: []string{"C", "D"}, + id: "F", + expectedWindow: []string{"D", "C", "B"}, + }, + { + parents: []string{"A"}, + id: "G", + expectedWindow: []string{}, + }, + { + parents: []string{"G"}, + id: "H", + expectedWindow: []string{"G"}, + }, + { + parents: []string{"H", "F"}, + id: "I", + expectedWindow: []string{"F", "D", "C", "H", "G", "B"}, + }, + { + parents: []string{"I"}, + id: "J", + expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"}, + }, + { + parents: []string{"J"}, + id: "K", + expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"}, + }, + { + parents: []string{"K"}, + id: "L", + expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"}, + }, + { + parents: []string{"L"}, + id: "M", + expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"}, + }, + { + parents: []string{"M"}, + id: "N", + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"}, + }, + { + parents: []string{"N"}, + id: "O", + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"}, + }, + }, + dagconfig.SimnetParams.Name: { + { + parents: []string{"A"}, + id: "B", + expectedWindow: []string{}, + }, + { + parents: []string{"B"}, + id: "C", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"B"}, + id: "D", + expectedWindow: []string{"B"}, + }, + { + parents: []string{"C", "D"}, + id: "E", + expectedWindow: []string{"C", "D", "B"}, + }, + { + parents: []string{"C", "D"}, + id: "F", + expectedWindow: []string{"C", "D", "B"}, + }, + { + parents: []string{"A"}, + id: "G", + expectedWindow: []string{}, + }, + { + parents: []string{"G"}, + id: "H", + expectedWindow: []string{"G"}, + }, + { + parents: []string{"H", "F"}, + id: "I", + expectedWindow: []string{"F", "C", "D", "H", "B", "G"}, + }, + { + parents: []string{"I"}, + id: "J", + expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"}, + }, + { + parents: []string{"J"}, + id: "K", + expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"}, + }, + { + parents: []string{"K"}, + id: "L", + expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"}, + }, + { + parents: []string{"L"}, + id: "M", + expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"}, + }, + { + parents: []string{"M"}, + id: "N", + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"}, + }, + { + parents: []string{"N"}, + id: "O", + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"}, + }, + }, + } + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.K = 1 + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestBlockWindow") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + windowSize := 10 + blockByIDMap := make(map[string]*externalapi.DomainHash) + idByBlockMap := make(map[externalapi.DomainHash]string) + blockByIDMap["A"] = consensusConfig.GenesisHash + idByBlockMap[*consensusConfig.GenesisHash] = "A" + + blocksData := tests[consensusConfig.Name] + + for _, blockData := range blocksData { + parents := hashset.New() + for _, parentID := range blockData.parents { + parent := blockByIDMap[parentID] + parents.Add(parent) + } + + block, _, err := tc.AddBlock(parents.ToSlice(), nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + blockByIDMap[blockData.id] = block + idByBlockMap[*block] = blockData.id + + stagingArea := model.NewStagingArea() + + window, err := tc.DAGTraversalManager().BlockWindow(stagingArea, block, windowSize) + if err != nil { + t.Fatalf("BlockWindow: %s", err) + } + sort.Sort(testutils.NewTestGhostDAGSorter(stagingArea, window, tc, t)) + if err := checkWindowIDs(window, blockData.expectedWindow, idByBlockMap); err != nil { + t.Errorf("Unexpected values for window for block %s: %s", blockData.id, err) + } + } + }) +} + +func checkWindowIDs(window []*externalapi.DomainHash, expectedIDs []string, idByBlockMap map[externalapi.DomainHash]string) error { + ids := make([]string, len(window)) + for i, node := range window { + ids[i] = idByBlockMap[*node] + } + if !reflect.DeepEqual(ids, expectedIDs) { + return errors.Errorf("window expected to have blocks %s but got %s", expectedIDs, ids) + } + return nil +} diff --git a/domain/consensus/processes/difficultymanager/blockwindow.go b/domain/consensus/processes/difficultymanager/blockwindow.go new file mode 100644 index 0000000..c3451c2 --- /dev/null +++ b/domain/consensus/processes/difficultymanager/blockwindow.go @@ -0,0 +1,103 @@ +package difficultymanager + +import ( + "math" + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/difficulty" +) + +type difficultyBlock struct { + timeInMilliseconds int64 + Bits uint32 + hash *externalapi.DomainHash + blueWork *big.Int +} + +type blockWindow []difficultyBlock + +func (dm *difficultyManager) getDifficultyBlock( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (difficultyBlock, error) { + + header, err := dm.headerStore.BlockHeader(dm.databaseContext, stagingArea, blockHash) + if err != nil { + return difficultyBlock{}, err + } + return difficultyBlock{ + timeInMilliseconds: header.TimeInMilliseconds(), + Bits: header.Bits(), + hash: blockHash, + blueWork: header.BlueWork(), + }, nil +} + +// blockWindow returns a blockWindow of the given size that contains the +// blocks in the past of startingNode, the sorting is unspecified. +// If the number of blocks in the past of startingNode is less then windowSize, +// the window will be padded by genesis blocks to achieve a size of windowSize. +func (dm *difficultyManager) blockWindow(stagingArea *model.StagingArea, startingNode *externalapi.DomainHash, windowSize int) (blockWindow, + []*externalapi.DomainHash, error) { + + window := make(blockWindow, 0, windowSize) + windowHashes, err := dm.dagTraversalManager.BlockWindow(stagingArea, startingNode, windowSize) + if err != nil { + return nil, nil, err + } + + for _, hash := range windowHashes { + block, err := dm.getDifficultyBlock(stagingArea, hash) + if err != nil { + return nil, nil, err + } + window = append(window, block) + } + return window, windowHashes, nil +} + +func ghostdagLess(blockA *difficultyBlock, blockB *difficultyBlock) bool { + switch blockA.blueWork.Cmp(blockB.blueWork) { + case -1: + return true + case 1: + return false + case 0: + return blockA.hash.Less(blockB.hash) + default: + panic("big.Int.Cmp is defined to always return -1/1/0 and nothing else") + } +} + +func (window blockWindow) minMaxTimestamps() (min, max int64, minIndex int) { + min = math.MaxInt64 + minIndex = 0 + max = 0 + for i, block := range window { + // If timestamps are equal we ghostdag compare in order to reach consensus on `minIndex` + if block.timeInMilliseconds < min || + (block.timeInMilliseconds == min && ghostdagLess(&block, &window[minIndex])) { + min = block.timeInMilliseconds + minIndex = i + } + if block.timeInMilliseconds > max { + max = block.timeInMilliseconds + } + } + return +} + +func (window *blockWindow) remove(n int) { + (*window)[n] = (*window)[len(*window)-1] + *window = (*window)[:len(*window)-1] +} + +func (window blockWindow) averageTarget() *big.Int { + averageTarget := new(big.Int) + targetTmp := new(big.Int) + for _, block := range window { + difficulty.CompactToBigWithDestination(block.Bits, targetTmp) + averageTarget.Add(averageTarget, targetTmp) + } + return averageTarget.Div(averageTarget, big.NewInt(int64(len(window)))) +} diff --git a/domain/consensus/processes/difficultymanager/difficultymanager.go b/domain/consensus/processes/difficultymanager/difficultymanager.go new file mode 100644 index 0000000..b52baa3 --- /dev/null +++ b/domain/consensus/processes/difficultymanager/difficultymanager.go @@ -0,0 +1,213 @@ +package difficultymanager + +import ( + "math/big" + "time" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/math" + + "github.com/spectre-project/spectred/util/difficulty" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// DifficultyManager provides a method to resolve the +// difficulty value of a block +type difficultyManager struct { + databaseContext model.DBReader + ghostdagManager model.GHOSTDAGManager + ghostdagStore model.GHOSTDAGDataStore + headerStore model.BlockHeaderStore + daaBlocksStore model.DAABlocksStore + dagTopologyManager model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + genesisHash *externalapi.DomainHash + powMax *big.Int + difficultyAdjustmentWindowSize int + disableDifficultyAdjustment bool + targetTimePerBlock time.Duration + genesisBits uint32 +} + +// New instantiates a new DifficultyManager +func New(databaseContext model.DBReader, + ghostdagManager model.GHOSTDAGManager, + ghostdagStore model.GHOSTDAGDataStore, + headerStore model.BlockHeaderStore, + daaBlocksStore model.DAABlocksStore, + dagTopologyManager model.DAGTopologyManager, + dagTraversalManager model.DAGTraversalManager, + powMax *big.Int, + difficultyAdjustmentWindowSize int, + disableDifficultyAdjustment bool, + targetTimePerBlock time.Duration, + genesisHash *externalapi.DomainHash, + genesisBits uint32) model.DifficultyManager { + return &difficultyManager{ + databaseContext: databaseContext, + ghostdagManager: ghostdagManager, + ghostdagStore: ghostdagStore, + headerStore: headerStore, + daaBlocksStore: daaBlocksStore, + dagTopologyManager: dagTopologyManager, + dagTraversalManager: dagTraversalManager, + powMax: powMax, + difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, + disableDifficultyAdjustment: disableDifficultyAdjustment, + targetTimePerBlock: targetTimePerBlock, + genesisHash: genesisHash, + genesisBits: genesisBits, + } +} + +// StageDAADataAndReturnRequiredDifficulty calculates the DAA window, stages the DAA score and DAA added +// blocks, and returns the required difficulty for the given block. +// The reason this function both stages DAA data and returns the difficulty is because in order to calculate +// both of them we need to calculate the DAA window, which is a relatively heavy operation, so we reuse the +// block window instead of recalculating it for the two purposes. +// For cases where no staging should happen and the caller only needs to know the difficulty he should +// use RequiredDifficulty. +func (dm *difficultyManager) StageDAADataAndReturnRequiredDifficulty( + stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + isBlockWithTrustedData bool) (uint32, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "StageDAADataAndReturnRequiredDifficulty") + defer onEnd() + + targetsWindow, windowHashes, err := dm.blockWindow(stagingArea, blockHash, dm.difficultyAdjustmentWindowSize) + if err != nil { + return 0, err + } + + err = dm.stageDAAScoreAndAddedBlocks(stagingArea, blockHash, windowHashes, isBlockWithTrustedData) + if err != nil { + return 0, err + } + + return dm.requiredDifficultyFromTargetsWindow(targetsWindow) +} + +// RequiredDifficulty returns the difficulty required for some block +func (dm *difficultyManager) RequiredDifficulty(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (uint32, error) { + targetsWindow, _, err := dm.blockWindow(stagingArea, blockHash, dm.difficultyAdjustmentWindowSize) + if err != nil { + return 0, err + } + + return dm.requiredDifficultyFromTargetsWindow(targetsWindow) +} + +func (dm *difficultyManager) requiredDifficultyFromTargetsWindow(targetsWindow blockWindow) (uint32, error) { + if dm.disableDifficultyAdjustment { + return dm.genesisBits, nil + } + + // in the past this was < 2 as the comment explains, we changed it to under the window size to + // make the hashrate(which is ~1.5GH/s) constant in the first 2641 blocks so that we won't have a lot of tips + + // We need at least 2 blocks to get a timestamp interval + // We could instead clamp the timestamp difference to `targetTimePerBlock`, + // but then everything will cancel out and we'll get the target from the last block, which will be the same as genesis. + // We add 64 as a safety margin + if len(targetsWindow) < 2 || len(targetsWindow) < dm.difficultyAdjustmentWindowSize { + return dm.genesisBits, nil + } + + windowMinTimestamp, windowMaxTimeStamp, windowMinIndex := targetsWindow.minMaxTimestamps() + // Remove the last block from the window so to calculate the average target of dag.difficultyAdjustmentWindowSize blocks + targetsWindow.remove(windowMinIndex) + + // Calculate new target difficulty as: + // averageWindowTarget * (windowMinTimestamp / (targetTimePerBlock * windowSize)) + // The result uses integer division which means it will be slightly + // rounded down. + div := new(big.Int) + newTarget := targetsWindow.averageTarget() + newTarget. + // We need to clamp the timestamp difference to 1 so that we'll never get a 0 target. + Mul(newTarget, div.SetInt64(math.MaxInt64(windowMaxTimeStamp-windowMinTimestamp, 1))). + Div(newTarget, div.SetInt64(dm.targetTimePerBlock.Milliseconds())). + Div(newTarget, div.SetUint64(uint64(len(targetsWindow)))) + if newTarget.Cmp(dm.powMax) > 0 { + return difficulty.BigToCompact(dm.powMax), nil + } + newTargetBits := difficulty.BigToCompact(newTarget) + return newTargetBits, nil +} + +func (dm *difficultyManager) stageDAAScoreAndAddedBlocks(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + windowHashes []*externalapi.DomainHash, + isBlockWithTrustedData bool) error { + + onEnd := logger.LogAndMeasureExecutionTime(log, "stageDAAScoreAndAddedBlocks") + defer onEnd() + + daaScore, addedBlocks, err := dm.calculateDaaScoreAndAddedBlocks(stagingArea, blockHash, windowHashes, isBlockWithTrustedData) + if err != nil { + return err + } + + dm.daaBlocksStore.StageDAAScore(stagingArea, blockHash, daaScore) + dm.daaBlocksStore.StageBlockDAAAddedBlocks(stagingArea, blockHash, addedBlocks) + return nil +} + +func (dm *difficultyManager) calculateDaaScoreAndAddedBlocks(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash, + windowHashes []*externalapi.DomainHash, + isBlockWithTrustedData bool) (uint64, []*externalapi.DomainHash, error) { + + if blockHash.Equal(dm.genesisHash) { + genesisHeader, err := dm.headerStore.BlockHeader(dm.databaseContext, stagingArea, dm.genesisHash) + if err != nil { + return 0, nil, err + } + return genesisHeader.DAAScore(), nil, nil + } + + ghostdagData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return 0, nil, err + } + mergeSetLength := len(ghostdagData.MergeSetBlues()) + len(ghostdagData.MergeSetReds()) + mergeSet := make(map[externalapi.DomainHash]struct{}, mergeSetLength) + for _, hash := range ghostdagData.MergeSetBlues() { + mergeSet[*hash] = struct{}{} + } + + for _, hash := range ghostdagData.MergeSetReds() { + mergeSet[*hash] = struct{}{} + } + + // TODO: Consider optimizing by breaking the loop once you arrive to the + // window block with blue work higher than all non-added merge set blocks. + daaAddedBlocks := make([]*externalapi.DomainHash, 0, len(mergeSet)) + for _, hash := range windowHashes { + if _, exists := mergeSet[*hash]; exists { + daaAddedBlocks = append(daaAddedBlocks, hash) + if len(daaAddedBlocks) == len(mergeSet) { + break + } + } + } + + var daaScore uint64 + if isBlockWithTrustedData { + daaScore, err = dm.daaBlocksStore.DAAScore(dm.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, nil, err + } + } else { + selectedParentDAAScore, err := dm.daaBlocksStore.DAAScore(dm.databaseContext, stagingArea, ghostdagData.SelectedParent()) + if err != nil { + return 0, nil, err + } + daaScore = selectedParentDAAScore + uint64(len(daaAddedBlocks)) + } + + return daaScore, daaAddedBlocks, nil +} diff --git a/domain/consensus/processes/difficultymanager/difficultymanager_test.go b/domain/consensus/processes/difficultymanager/difficultymanager_test.go new file mode 100644 index 0000000..66debed --- /dev/null +++ b/domain/consensus/processes/difficultymanager/difficultymanager_test.go @@ -0,0 +1,358 @@ +package difficultymanager_test + +import ( + "testing" + "time" + + "github.com/spectre-project/spectred/util/difficulty" + + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func TestDifficulty(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + if consensusConfig.DisableDifficultyAdjustment { + return + } + // This test generates 3066 blocks above genesis with at least 1 second between each block, amounting to + // a bit less then an hour of timestamps. + // To prevent rejected blocks due to timestamps in the future, the following safeguard makes sure + // the genesis block is at least 1 hour in the past. + if consensusConfig.GenesisBlock.Header.TimeInMilliseconds() > mstime.ToMSTime(time.Now().Add(-time.Hour)).UnixMilliseconds() { + t.Fatalf("TestDifficulty requires the GenesisBlock to be at least 1 hour old to pass") + } + + consensusConfig.K = 1 + consensusConfig.DifficultyAdjustmentWindowSize = 140 + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDifficulty") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + stagingArea := model.NewStagingArea() + + addBlock := func(blockTime int64, parents ...*externalapi.DomainHash) (*externalapi.DomainBlock, *externalapi.DomainHash) { + bluestParent, err := tc.GHOSTDAGManager().ChooseSelectedParent(stagingArea, parents...) + if err != nil { + t.Fatalf("ChooseSelectedParent: %+v", err) + } + + if blockTime == 0 { + header, err := tc.BlockHeaderStore().BlockHeader(tc.DatabaseContext(), stagingArea, bluestParent) + if err != nil { + t.Fatalf("BlockHeader: %+v", err) + } + + blockTime = header.TimeInMilliseconds() + consensusConfig.TargetTimePerBlock.Milliseconds() + } + + block, _, err := tc.BuildBlockWithParents(parents, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %+v", err) + } + + newHeader := block.Header.ToMutable() + newHeader.SetTimeInMilliseconds(blockTime) + block.Header = newHeader.ToImmutable() + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + return block, consensushashing.BlockHash(block) + } + + minimumTime := func(parents ...*externalapi.DomainHash) int64 { + var tempHash externalapi.DomainHash + stagingArea := model.NewStagingArea() + tc.BlockRelationStore().StageBlockRelation(stagingArea, &tempHash, &model.BlockRelations{ + Parents: parents, + Children: nil, + }) + + err = tc.GHOSTDAGManager().GHOSTDAG(stagingArea, &tempHash) + if err != nil { + t.Fatalf("GHOSTDAG: %+v", err) + } + + pastMedianTime, err := tc.PastMedianTimeManager().PastMedianTime(stagingArea, &tempHash) + if err != nil { + t.Fatalf("PastMedianTime: %+v", err) + } + + return pastMedianTime + 1 + } + + addBlockWithMinimumTime := func(parents ...*externalapi.DomainHash) (*externalapi.DomainBlock, *externalapi.DomainHash) { + minTime := minimumTime(parents...) + return addBlock(minTime, parents...) + } + + tipHash := consensusConfig.GenesisHash + tip := consensusConfig.GenesisBlock + for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize; i++ { + tip, tipHash = addBlock(0, tipHash) + if tip.Header.Bits() != consensusConfig.GenesisBlock.Header.Bits() { + t.Fatalf("As long as the block blue score is less then the difficulty adjustment " + + "window size, the difficulty should be the same as genesis'") + } + } + for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+10; i++ { + tip, tipHash = addBlock(0, tipHash) + if tip.Header.Bits() != consensusConfig.GenesisBlock.Header.Bits() { + t.Fatalf("As long as the block rate remains the same, the difficulty shouldn't change") + } + } + + blockInThePast, tipHash := addBlockWithMinimumTime(tipHash) + if blockInThePast.Header.Bits() != tip.Header.Bits() { + t.Fatalf("The difficulty should only change when blockInThePast is in the past of a block") + } + tip = blockInThePast + + tip, tipHash = addBlock(0, tipHash) + if compareBits(tip.Header.Bits(), blockInThePast.Header.Bits()) >= 0 { + t.Fatalf("tip.bits should be smaller than blockInThePast.bits because blockInThePast increased the " + + "block rate, so the difficulty should increase as well") + } + + var expectedBits uint32 + switch consensusConfig.Name { + case dagconfig.TestnetParams.Name: + expectedBits = uint32(0x1e7f007a) + case dagconfig.DevnetParams.Name: + expectedBits = uint32(0x1f4e54ab) + case dagconfig.MainnetParams.Name: + expectedBits = uint32(0x1e7465aa) + } + + if tip.Header.Bits() != expectedBits { + t.Errorf("tip.bits was expected to be %x but got %x", expectedBits, tip.Header.Bits()) + } + + // Increase block rate to increase difficulty + for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize; i++ { + tip, tipHash = addBlockWithMinimumTime(tipHash) + tipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, tipHash, false) + if err != nil { + t.Fatalf("GHOSTDAGDataStore: %+v", err) + } + + selectedParentHeader, err := + tc.BlockHeaderStore().BlockHeader(tc.DatabaseContext(), stagingArea, tipGHOSTDAGData.SelectedParent()) + if err != nil { + t.Fatalf("BlockHeader: %+v", err) + } + + if compareBits(tip.Header.Bits(), selectedParentHeader.Bits()) > 0 { + t.Fatalf("Because we're increasing the block rate, the difficulty can't decrease") + } + } + + // Add blocks until difficulty stabilizes + lastBits := tip.Header.Bits() + sameBitsCount := 0 + for sameBitsCount < consensusConfig.DifficultyAdjustmentWindowSize+1 { + tip, tipHash = addBlock(0, tipHash) + if tip.Header.Bits() == lastBits { + sameBitsCount++ + } else { + lastBits = tip.Header.Bits() + sameBitsCount = 0 + } + } + + slowBlockTime := tip.Header.TimeInMilliseconds() + consensusConfig.TargetTimePerBlock.Milliseconds() + 1000 + slowBlock, tipHash := addBlock(slowBlockTime, tipHash) + if slowBlock.Header.Bits() != tip.Header.Bits() { + t.Fatalf("The difficulty should only change when slowBlock is in the past of a block") + } + + tip = slowBlock + + tip, tipHash = addBlock(0, tipHash) + if compareBits(tip.Header.Bits(), slowBlock.Header.Bits()) <= 0 { + t.Fatalf("tip.bits should be smaller than slowBlock.bits because slowBlock decreased the block" + + " rate, so the difficulty should decrease as well") + } + + // Here we create two chains: a chain of blue blocks, and a chain of red blocks with + // very low timestamps. Because the red blocks should be part of the difficulty + // window, their low timestamps should lower the difficulty, and we check it by + // comparing the bits of two blocks with the same blue score, one with the red + // blocks in its past and one without. + splitBlockHash := tipHash + blueTipHash := splitBlockHash + for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize; i++ { + _, blueTipHash = addBlock(0, blueTipHash) + } + + redChainTipHash := splitBlockHash + const redChainLength = 10 + for i := 0; i < redChainLength; i++ { + _, redChainTipHash = addBlockWithMinimumTime(redChainTipHash) + } + tipWithRedPast, _ := addBlock(0, redChainTipHash, blueTipHash) + tipWithoutRedPast, _ := addBlock(0, blueTipHash) + if tipWithRedPast.Header.Bits() <= tipWithoutRedPast.Header.Bits() { + t.Fatalf("tipWithRedPast.bits should be greater than tipWithoutRedPast.bits because the red blocks" + + " blocks have very low timestamp and should lower the difficulty") + } + + // We repeat the test, but now we make the blue chain longer in order to filter + // out the red blocks from the window, and check that the red blocks don't + // affect the difficulty. + blueTipHash = splitBlockHash + for i := 0; i < consensusConfig.DifficultyAdjustmentWindowSize+redChainLength+1; i++ { + _, blueTipHash = addBlock(0, blueTipHash) + } + + redChainTipHash = splitBlockHash + for i := 0; i < redChainLength; i++ { + _, redChainTipHash = addBlockWithMinimumTime(redChainTipHash) + } + tipWithRedPast, _ = addBlock(0, redChainTipHash, blueTipHash) + tipWithoutRedPast, _ = addBlock(0, blueTipHash) + if tipWithRedPast.Header.Bits() != tipWithoutRedPast.Header.Bits() { + t.Fatalf("tipWithoutRedPast.bits should be the same as tipWithRedPast.bits because the red blocks" + + " are not part of the difficulty window") + } + }) +} + +func TestDAAScore(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.DifficultyAdjustmentWindowSize = 86 + + stagingArea := model.NewStagingArea() + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDAAScore") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // We create a small DAG in order to skip from block with blue score of 1 directly to 3 + split1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + block, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + blockBlueScore3, _, err := tc.AddBlock([]*externalapi.DomainHash{split1Hash, block}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + tipHash := blockBlueScore3 + blockBlueScore3DAAScore, err := tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("DAAScore: %+v", err) + } + + blockBlueScore3ExpectedDAAScore := uint64(2) + consensusConfig.GenesisBlock.Header.DAAScore() + if blockBlueScore3DAAScore != blockBlueScore3ExpectedDAAScore { + t.Fatalf("DAA score is expected to be %d but got %d", blockBlueScore3ExpectedDAAScore, blockBlueScore3DAAScore) + } + tipDAAScore := blockBlueScore3ExpectedDAAScore + + for i := uint64(0); i < 10; i++ { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + tipDAAScore, err = tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("DAAScore: %+v", err) + } + + expectedDAAScore := blockBlueScore3ExpectedDAAScore + i + 1 + if tipDAAScore != expectedDAAScore { + t.Fatalf("DAA score is expected to be %d but got %d", expectedDAAScore, tipDAAScore) + } + } + + split2Hash := tipHash + split2DAAScore := tipDAAScore + for i := uint64(0); i < uint64(consensusConfig.DifficultyAdjustmentWindowSize)-1; i++ { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + tipDAAScore, err = tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("DAAScore: %+v", err) + } + + expectedDAAScore := split2DAAScore + i + 1 + if tipDAAScore != expectedDAAScore { + t.Fatalf("DAA score is expected to be %d but got %d", expectedDAAScore, split2DAAScore) + } + } + + // This block should have blue score of 2 so it shouldn't be added to the DAA window of a merging block + blockAboveSplit1, _, err := tc.AddBlock([]*externalapi.DomainHash{split1Hash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // This block is in the anticone of consensusConfig.DifficultyAdjustmentWindowSize-1 blocks, so it must be part + // of the DAA window of a merging block + blockAboveSplit2, _, err := tc.AddBlock([]*externalapi.DomainHash{split2Hash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + currentSelectedTipDAAScore := tipDAAScore + currentSelectedTip := tipHash + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{blockAboveSplit1, blockAboveSplit2, tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + tipDAAScore, err = tc.DAABlocksStore().DAAScore(tc.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("DAAScore: %+v", err) + } + + // The DAA score should be increased only by 2, because 1 of the 3 merged blocks + // is not in the DAA window + expectedDAAScore := currentSelectedTipDAAScore + 2 + if tipDAAScore != expectedDAAScore { + t.Fatalf("DAA score is expected to be %d but got %d", expectedDAAScore, tipDAAScore) + } + + tipDAAAddedBlocks, err := tc.DAABlocksStore().DAAAddedBlocks(tc.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("DAAScore: %+v", err) + } + + // blockAboveSplit2 should be excluded from the DAA added blocks because it's not in the tip's + // DAA window. + expectedDAABlocks := []*externalapi.DomainHash{blockAboveSplit2, currentSelectedTip} + if !externalapi.HashesEqual(tipDAAAddedBlocks, expectedDAABlocks) { + t.Fatalf("DAA added blocks are expected to be %s but got %s", expectedDAABlocks, tipDAAAddedBlocks) + } + }) +} + +func compareBits(a uint32, b uint32) int { + aTarget := difficulty.CompactToBig(a) + bTarget := difficulty.CompactToBig(b) + return aTarget.Cmp(bTarget) +} diff --git a/domain/consensus/processes/difficultymanager/hashrate.go b/domain/consensus/processes/difficultymanager/hashrate.go new file mode 100644 index 0000000..f7aedd8 --- /dev/null +++ b/domain/consensus/processes/difficultymanager/hashrate.go @@ -0,0 +1,74 @@ +package difficultymanager + +import ( + "math/big" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func (dm *difficultyManager) EstimateNetworkHashesPerSecond(startHash *externalapi.DomainHash, windowSize int) (uint64, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "EstimateNetworkHashesPerSecond") + defer onEnd() + + stagingArea := model.NewStagingArea() + return dm.estimateNetworkHashesPerSecond(stagingArea, startHash, windowSize) +} + +func (dm *difficultyManager) estimateNetworkHashesPerSecond(stagingArea *model.StagingArea, + startHash *externalapi.DomainHash, windowSize int) (uint64, error) { + + const minWindowSize = 1000 + if windowSize < minWindowSize { + return 0, errors.Errorf("windowSize must be equal to or greater than %d", minWindowSize) + } + + blockWindow, windowHashes, err := dm.blockWindow(stagingArea, startHash, windowSize) + if err != nil { + return 0, err + } + + // return 0 if no blocks had been mined yet + if len(windowHashes) == 0 { + return 0, nil + } + + minWindowTimestamp, maxWindowTimestamp, _ := blockWindow.minMaxTimestamps() + if minWindowTimestamp == maxWindowTimestamp { + return 0, errors.Errorf("min window timestamp is equal to the max window timestamp") + } + + firstHash := windowHashes[0] + firstBlockGHOSTDAGData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, firstHash, false) + if err != nil { + return 0, err + } + firstBlockBlueWork := firstBlockGHOSTDAGData.BlueWork() + minWindowBlueWork := firstBlockBlueWork + maxWindowBlueWork := firstBlockBlueWork + for _, hash := range windowHashes[1:] { + blockGHOSTDAGData, err := dm.ghostdagStore.Get(dm.databaseContext, stagingArea, hash, false) + if err != nil { + return 0, err + } + blockBlueWork := blockGHOSTDAGData.BlueWork() + if blockBlueWork.Cmp(minWindowBlueWork) < 0 { + minWindowBlueWork = blockBlueWork + } + if blockBlueWork.Cmp(maxWindowBlueWork) > 0 { + maxWindowBlueWork = blockBlueWork + } + } + + windowsDiff := (maxWindowTimestamp - minWindowTimestamp) / 1000 // Divided by 1000 to convert milliseconds to seconds + if windowsDiff == 0 { + return 0, nil + } + + nominator := new(big.Int).Sub(maxWindowBlueWork, minWindowBlueWork) + denominator := big.NewInt(windowsDiff) + networkHashesPerSecondBigInt := new(big.Int).Div(nominator, denominator) + return networkHashesPerSecondBigInt.Uint64(), nil +} diff --git a/domain/consensus/processes/difficultymanager/log.go b/domain/consensus/processes/difficultymanager/log.go new file mode 100644 index 0000000..25f4f9f --- /dev/null +++ b/domain/consensus/processes/difficultymanager/log.go @@ -0,0 +1,7 @@ +package difficultymanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("DAA") diff --git a/domain/consensus/processes/finalitymanager/finality_manager.go b/domain/consensus/processes/finalitymanager/finality_manager.go new file mode 100644 index 0000000..3007e04 --- /dev/null +++ b/domain/consensus/processes/finalitymanager/finality_manager.go @@ -0,0 +1,158 @@ +package finalitymanager + +import ( + "errors" + + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type finalityManager struct { + databaseContext model.DBReader + dagTopologyManager model.DAGTopologyManager + finalityStore model.FinalityStore + ghostdagDataStore model.GHOSTDAGDataStore + pruningStore model.PruningStore + genesisHash *externalapi.DomainHash + finalityDepth uint64 +} + +// New instantiates a new FinalityManager +func New(databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + finalityStore model.FinalityStore, + ghostdagDataStore model.GHOSTDAGDataStore, + pruningStore model.PruningStore, + genesisHash *externalapi.DomainHash, + finalityDepth uint64) model.FinalityManager { + + return &finalityManager{ + databaseContext: databaseContext, + genesisHash: genesisHash, + dagTopologyManager: dagTopologyManager, + finalityStore: finalityStore, + ghostdagDataStore: ghostdagDataStore, + pruningStore: pruningStore, + finalityDepth: finalityDepth, + } +} + +func (fm *finalityManager) VirtualFinalityPoint(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + log.Tracef("virtualFinalityPoint start") + defer log.Tracef("virtualFinalityPoint end") + + virtualFinalityPoint, err := fm.calculateFinalityPoint(stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + log.Debugf("The current virtual finality block is: %s", virtualFinalityPoint) + + return virtualFinalityPoint, nil +} + +func (fm *finalityManager) FinalityPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (*externalapi.DomainHash, error) { + log.Tracef("FinalityPoint start") + defer log.Tracef("FinalityPoint end") + if blockHash.Equal(model.VirtualBlockHash) { + return fm.VirtualFinalityPoint(stagingArea) + } + finalityPoint, err := fm.finalityStore.FinalityPoint(fm.databaseContext, stagingArea, blockHash) + if err != nil { + log.Debugf("%s finality point not found in store - calculating", blockHash) + if errors.Is(err, database.ErrNotFound) { + return fm.calculateAndStageFinalityPoint(stagingArea, blockHash, isBlockWithTrustedData) + } + return nil, err + } + return finalityPoint, nil +} + +func (fm *finalityManager) calculateAndStageFinalityPoint( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (*externalapi.DomainHash, error) { + + finalityPoint, err := fm.calculateFinalityPoint(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return nil, err + } + fm.finalityStore.StageFinalityPoint(stagingArea, blockHash, finalityPoint) + return finalityPoint, nil +} + +func (fm *finalityManager) calculateFinalityPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) ( + *externalapi.DomainHash, error) { + + log.Tracef("calculateFinalityPoint start") + defer log.Tracef("calculateFinalityPoint end") + + if isBlockWithTrustedData { + return model.VirtualGenesisBlockHash, nil + } + + ghostdagData, err := fm.ghostdagDataStore.Get(fm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + if ghostdagData.BlueScore() < fm.finalityDepth { + log.Debugf("%s blue score lower then finality depth - returning genesis as finality point", blockHash) + return fm.genesisHash, nil + } + + pruningPoint, err := fm.pruningStore.PruningPoint(fm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + pruningPointGhostdagData, err := fm.ghostdagDataStore.Get(fm.databaseContext, stagingArea, pruningPoint, false) + if err != nil { + return nil, err + } + if ghostdagData.BlueScore() < pruningPointGhostdagData.BlueScore()+fm.finalityDepth { + log.Debugf("%s blue score less than finality distance over pruning point - returning virtual genesis as finality point", blockHash) + return model.VirtualGenesisBlockHash, nil + } + isPruningPointOnChain, err := fm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, blockHash) + if err != nil { + return nil, err + } + if !isPruningPointOnChain { + log.Debugf("pruning point not in selected chain of %s - returning virtual genesis as finality point", blockHash) + return model.VirtualGenesisBlockHash, nil + } + + selectedParent := ghostdagData.SelectedParent() + if selectedParent.Equal(fm.genesisHash) { + return fm.genesisHash, nil + } + + current, err := fm.finalityStore.FinalityPoint(fm.databaseContext, stagingArea, ghostdagData.SelectedParent()) + if err != nil { + return nil, err + } + // In this case we expect the pruning point or a block above it to be the finality point. + // Note that above we already verified the chain and distance conditions for this + if current.Equal(model.VirtualGenesisBlockHash) { + current = pruningPoint + } + + requiredBlueScore := ghostdagData.BlueScore() - fm.finalityDepth + log.Debugf("%s's finality point is the one having the highest blue score lower then %d", blockHash, requiredBlueScore) + + var next *externalapi.DomainHash + for { + next, err = fm.dagTopologyManager.ChildInSelectedParentChainOf(stagingArea, current, blockHash) + if err != nil { + return nil, err + } + nextGHOSTDAGData, err := fm.ghostdagDataStore.Get(fm.databaseContext, stagingArea, next, false) + if err != nil { + return nil, err + } + if nextGHOSTDAGData.BlueScore() >= requiredBlueScore { + log.Debugf("%s's finality point is %s", blockHash, current) + return current, nil + } + + current = next + } +} diff --git a/domain/consensus/processes/finalitymanager/log.go b/domain/consensus/processes/finalitymanager/log.go new file mode 100644 index 0000000..2372bb8 --- /dev/null +++ b/domain/consensus/processes/finalitymanager/log.go @@ -0,0 +1,7 @@ +package finalitymanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BDAG") diff --git a/domain/consensus/processes/ghostdag2/ghostdagimpl.go b/domain/consensus/processes/ghostdag2/ghostdagimpl.go new file mode 100644 index 0000000..926f6d6 --- /dev/null +++ b/domain/consensus/processes/ghostdag2/ghostdagimpl.go @@ -0,0 +1,416 @@ +package ghostdag2 + +import ( + "sort" + + "github.com/spectre-project/spectred/util/difficulty" + + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type ghostdagHelper struct { + k externalapi.KType + dataStore model.GHOSTDAGDataStore + dbAccess model.DBReader + dagTopologyManager model.DAGTopologyManager + headerStore model.BlockHeaderStore +} + +// New creates a new instance of this alternative ghostdag impl +func New( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + ghostdagDataStore model.GHOSTDAGDataStore, + headerStore model.BlockHeaderStore, + k externalapi.KType, + genesisHash *externalapi.DomainHash) model.GHOSTDAGManager { + + return &ghostdagHelper{ + dbAccess: databaseContext, + dagTopologyManager: dagTopologyManager, + dataStore: ghostdagDataStore, + headerStore: headerStore, + k: k, + } +} + +/* --------------------------------------------- */ + +func (gh *ghostdagHelper) GHOSTDAG(stagingArea *model.StagingArea, blockCandidate *externalapi.DomainHash) error { + myWork := new(big.Int) + maxWork := new(big.Int) + var myScore uint64 + var spScore uint64 + /* find the selectedParent */ + blockParents, err := gh.dagTopologyManager.Parents(stagingArea, blockCandidate) + if err != nil { + return err + } + var selectedParent = blockParents[0] + for _, parent := range blockParents { + blockData, err := gh.dataStore.Get(gh.dbAccess, stagingArea, parent, false) + if err != nil { + return err + } + blockWork := blockData.BlueWork() + blockScore := blockData.BlueScore() + if blockWork.Cmp(maxWork) == 1 { + selectedParent = parent + maxWork = blockWork + spScore = blockScore + } + if blockWork.Cmp(maxWork) == 0 && ismoreHash(parent, selectedParent) { + selectedParent = parent + maxWork = blockWork + spScore = blockScore + } + } + myWork.Set(maxWork) + myScore = spScore + + /* Goal: iterate blockCandidate's mergeSet and divide it to : blue, blues, reds. */ + var mergeSetBlues = make([]*externalapi.DomainHash, 0) + var mergeSetReds = make([]*externalapi.DomainHash, 0) + var blueSet = make([]*externalapi.DomainHash, 0) + + mergeSetBlues = append(mergeSetBlues, selectedParent) + + mergeSetArr, err := gh.findMergeSet(stagingArea, blockParents, selectedParent) + if err != nil { + return err + } + + err = gh.sortByBlueWork(stagingArea, mergeSetArr) + if err != nil { + return err + } + err = gh.findBlueSet(stagingArea, &blueSet, selectedParent) + if err != nil { + return err + } + + for _, mergeSetBlock := range mergeSetArr { + if mergeSetBlock.Equal(selectedParent) { + if !contains(selectedParent, mergeSetBlues) { + mergeSetBlues = append(mergeSetBlues, selectedParent) + blueSet = append(blueSet, selectedParent) + } + continue + } + err := gh.divideBlueRed(stagingArea, selectedParent, mergeSetBlock, &mergeSetBlues, &mergeSetReds, &blueSet) + if err != nil { + return err + } + } + myScore += uint64(len(mergeSetBlues)) + + // We add up all the *work*(not blueWork) that all our blues and selected parent did + for _, blue := range mergeSetBlues { + header, err := gh.headerStore.BlockHeader(gh.dbAccess, stagingArea, blue) + if err != nil { + return err + } + myWork.Add(myWork, difficulty.CalcWork(header.Bits())) + } + + e := externalapi.NewBlockGHOSTDAGData(myScore, myWork, selectedParent, mergeSetBlues, mergeSetReds, nil) + gh.dataStore.Stage(stagingArea, blockCandidate, e, false) + return nil +} + +/* --------isMoreHash(w, selectedParent)----------------*/ +func ismoreHash(parent *externalapi.DomainHash, selectedParent *externalapi.DomainHash) bool { + parentByteArray := parent.ByteArray() + selectedParentByteArray := selectedParent.ByteArray() + //Check if parentHash is more then selectedParentHash + for i := 0; i < len(parentByteArray); i++ { + switch { + case parentByteArray[i] < selectedParentByteArray[i]: + return false + case parentByteArray[i] > selectedParentByteArray[i]: + return true + } + } + return false +} + +/* 1. blue = selectedParent.blue + blues + 2. not connected to at most K blocks (from the blue group) + 3. for each block at blue , check if not destroy +*/ + +/* ---------------divideBluesReds--------------------- */ +func (gh *ghostdagHelper) divideBlueRed(stagingArea *model.StagingArea, + selectedParent *externalapi.DomainHash, desiredBlock *externalapi.DomainHash, + blues *[]*externalapi.DomainHash, reds *[]*externalapi.DomainHash, blueSet *[]*externalapi.DomainHash) error { + + var k = int(gh.k) + counter := 0 + + var suspectsBlues = make([]*externalapi.DomainHash, 0) + isMergeBlue := true + //check that not-connected to at most k. + for _, block := range *blueSet { + isAnticone, err := gh.isAnticone(stagingArea, block, desiredBlock) + if err != nil { + return err + } + if isAnticone { + counter++ + suspectsBlues = append(suspectsBlues, block) + } + if counter > k { + isMergeBlue = false + break + } + } + if !isMergeBlue { + if !contains(desiredBlock, *reds) { + *reds = append(*reds, desiredBlock) + } + return nil + } + + // check that the k-cluster of each blue is still valid. + for _, blue := range suspectsBlues { + isDestroyed, err := gh.checkIfDestroy(stagingArea, blue, blueSet) + if err != nil { + return err + } + if isDestroyed { + isMergeBlue = false + break + } + } + if !isMergeBlue { + if !contains(desiredBlock, *reds) { + *reds = append(*reds, desiredBlock) + } + return nil + } + if !contains(desiredBlock, *blues) { + *blues = append(*blues, desiredBlock) + } + if !contains(desiredBlock, *blueSet) { + *blueSet = append(*blueSet, desiredBlock) + } + return nil +} + +/* ---------------isAnticone-------------------------- */ +func (gh *ghostdagHelper) isAnticone(stagingArea *model.StagingArea, blockA, blockB *externalapi.DomainHash) (bool, error) { + isAAncestorOfAB, err := gh.dagTopologyManager.IsAncestorOf(stagingArea, blockA, blockB) + if err != nil { + return false, err + } + isBAncestorOfA, err := gh.dagTopologyManager.IsAncestorOf(stagingArea, blockB, blockA) + if err != nil { + return false, err + } + return !isAAncestorOfAB && !isBAncestorOfA, nil + +} + +/* ----------------validateKCluster------------------- */ +func (gh *ghostdagHelper) validateKCluster(stagingArea *model.StagingArea, chain *externalapi.DomainHash, + checkedBlock *externalapi.DomainHash, counter *int, blueSet *[]*externalapi.DomainHash) (bool, error) { + + var k = int(gh.k) + isAnticone, err := gh.isAnticone(stagingArea, chain, checkedBlock) + if err != nil { + return false, err + } + if isAnticone { + if *counter > k { + return false, nil + } + ifDestroy, err := gh.checkIfDestroy(stagingArea, chain, blueSet) + if err != nil { + return false, err + } + if ifDestroy { + return false, nil + } + *counter++ + return true, nil + } + isAncestorOf, err := gh.dagTopologyManager.IsAncestorOf(stagingArea, checkedBlock, chain) + if err != nil { + return false, err + } + if isAncestorOf { + dataStore, err := gh.BlockData(stagingArea, chain) + if err != nil { + return false, err + } + if mergeSetReds := dataStore.MergeSetReds(); contains(checkedBlock, mergeSetReds) { + return false, nil + } + } else { + return true, nil + } + + return false, nil +} + +/*----------------contains-------------------------- */ +func contains(item *externalapi.DomainHash, items []*externalapi.DomainHash) bool { + for _, r := range items { + if r.Equal(item) { + return true + } + } + return false +} + +/* ----------------checkIfDestroy------------------- */ +/* find number of not-connected in his blue*/ +func (gh *ghostdagHelper) checkIfDestroy(stagingArea *model.StagingArea, blockBlue *externalapi.DomainHash, + blueSet *[]*externalapi.DomainHash) (bool, error) { + + // Goal: check that the K-cluster of each block in the blueSet is not destroyed when adding the block to the mergeSet. + var k = int(gh.k) + counter := 0 + for _, blue := range *blueSet { + isAnticone, err := gh.isAnticone(stagingArea, blue, blockBlue) + if err != nil { + return true, err + } + if isAnticone { + counter++ + } + if counter > k { + return true, nil + } + } + return false, nil +} + +/* ----------------findMergeSet------------------- */ +func (gh *ghostdagHelper) findMergeSet(stagingArea *model.StagingArea, parents []*externalapi.DomainHash, + selectedParent *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + allMergeSet := make([]*externalapi.DomainHash, 0) + blockQueue := make([]*externalapi.DomainHash, 0) + for _, parent := range parents { + if !contains(parent, blockQueue) { + blockQueue = append(blockQueue, parent) + } + + } + for len(blockQueue) > 0 { + block := blockQueue[0] + blockQueue = blockQueue[1:] + if selectedParent.Equal(block) { + if !contains(block, allMergeSet) { + allMergeSet = append(allMergeSet, block) + } + continue + } + isancestorOf, err := gh.dagTopologyManager.IsAncestorOf(stagingArea, block, selectedParent) + if err != nil { + return nil, err + } + if isancestorOf { + continue + } + if !contains(block, allMergeSet) { + allMergeSet = append(allMergeSet, block) + } + err = gh.insertParent(stagingArea, block, &blockQueue) + if err != nil { + return nil, err + } + + } + return allMergeSet, nil +} + +/* ----------------insertParent------------------- */ +/* Insert all parents to the queue*/ +func (gh *ghostdagHelper) insertParent(stagingArea *model.StagingArea, child *externalapi.DomainHash, + queue *[]*externalapi.DomainHash) error { + + parents, err := gh.dagTopologyManager.Parents(stagingArea, child) + if err != nil { + return err + } + for _, parent := range parents { + if contains(parent, *queue) { + continue + } + *queue = append(*queue, parent) + } + return nil +} + +/* ----------------findBlueSet------------------- */ +func (gh *ghostdagHelper) findBlueSet(stagingArea *model.StagingArea, blueSet *[]*externalapi.DomainHash, selectedParent *externalapi.DomainHash) error { + for selectedParent != nil { + if !contains(selectedParent, *blueSet) { + *blueSet = append(*blueSet, selectedParent) + } + blockData, err := gh.dataStore.Get(gh.dbAccess, stagingArea, selectedParent, false) + if err != nil { + return err + } + mergeSetBlue := blockData.MergeSetBlues() + for _, blue := range mergeSetBlue { + if contains(blue, *blueSet) { + continue + } + *blueSet = append(*blueSet, blue) + } + selectedParent = blockData.SelectedParent() + } + return nil +} + +/* ----------------sortByBlueScore------------------- */ +func (gh *ghostdagHelper) sortByBlueWork(stagingArea *model.StagingArea, arr []*externalapi.DomainHash) error { + + var err error = nil + sort.Slice(arr, func(i, j int) bool { + + blockLeft, error := gh.dataStore.Get(gh.dbAccess, stagingArea, arr[i], false) + if error != nil { + err = error + return false + } + + blockRight, error := gh.dataStore.Get(gh.dbAccess, stagingArea, arr[j], false) + if error != nil { + err = error + return false + } + + if blockLeft.BlueWork().Cmp(blockRight.BlueWork()) == -1 { + return true + } + if blockLeft.BlueWork().Cmp(blockRight.BlueWork()) == 0 { + return ismoreHash(arr[j], arr[i]) + } + return false + }) + return err +} + +/* --------------------------------------------- */ + +func (gh *ghostdagHelper) BlockData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.BlockGHOSTDAGData, error) { + return gh.dataStore.Get(gh.dbAccess, stagingArea, blockHash, false) +} +func (gh *ghostdagHelper) ChooseSelectedParent(stagingArea *model.StagingArea, blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error) { + panic("implement me") +} + +func (gh *ghostdagHelper) Less(blockHashA *externalapi.DomainHash, ghostdagDataA *externalapi.BlockGHOSTDAGData, blockHashB *externalapi.DomainHash, ghostdagDataB *externalapi.BlockGHOSTDAGData) bool { + panic("implement me") +} + +func (gh *ghostdagHelper) GetSortedMergeSet(*model.StagingArea, *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + panic("implement me") +} diff --git a/domain/consensus/processes/ghostdagmanager/compare.go b/domain/consensus/processes/ghostdagmanager/compare.go new file mode 100644 index 0000000..2cb13a2 --- /dev/null +++ b/domain/consensus/processes/ghostdagmanager/compare.go @@ -0,0 +1,69 @@ +package ghostdagmanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (gm *ghostdagManager) findSelectedParent(stagingArea *model.StagingArea, parentHashes []*externalapi.DomainHash) ( + *externalapi.DomainHash, error) { + + var selectedParent *externalapi.DomainHash + for _, hash := range parentHashes { + if selectedParent == nil { + selectedParent = hash + continue + } + isHashBiggerThanSelectedParent, err := gm.less(stagingArea, selectedParent, hash) + if err != nil { + return nil, err + } + if isHashBiggerThanSelectedParent { + selectedParent = hash + } + } + return selectedParent, nil +} + +func (gm *ghostdagManager) less(stagingArea *model.StagingArea, blockHashA, blockHashB *externalapi.DomainHash) (bool, error) { + chosenSelectedParent, err := gm.ChooseSelectedParent(stagingArea, blockHashA, blockHashB) + if err != nil { + return false, err + } + return chosenSelectedParent == blockHashB, nil +} + +func (gm *ghostdagManager) ChooseSelectedParent(stagingArea *model.StagingArea, blockHashes ...*externalapi.DomainHash) (*externalapi.DomainHash, error) { + selectedParent := blockHashes[0] + selectedParentGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, selectedParent, false) + if err != nil { + return nil, err + } + for _, blockHash := range blockHashes { + blockGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + if gm.Less(selectedParent, selectedParentGHOSTDAGData, blockHash, blockGHOSTDAGData) { + selectedParent = blockHash + selectedParentGHOSTDAGData = blockGHOSTDAGData + } + } + + return selectedParent, nil +} + +func (gm *ghostdagManager) Less(blockHashA *externalapi.DomainHash, ghostdagDataA *externalapi.BlockGHOSTDAGData, + blockHashB *externalapi.DomainHash, ghostdagDataB *externalapi.BlockGHOSTDAGData) bool { + switch ghostdagDataA.BlueWork().Cmp(ghostdagDataB.BlueWork()) { + case -1: + return true + case 1: + return false + case 0: + return blockHashA.Less(blockHashB) + default: + panic("big.Int.Cmp is defined to always return -1/1/0 and nothing else") + } +} diff --git a/domain/consensus/processes/ghostdagmanager/ghostdag.go b/domain/consensus/processes/ghostdagmanager/ghostdag.go new file mode 100644 index 0000000..48a9993 --- /dev/null +++ b/domain/consensus/processes/ghostdagmanager/ghostdag.go @@ -0,0 +1,269 @@ +package ghostdagmanager + +import ( + "math/big" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/difficulty" +) + +type blockGHOSTDAGData struct { + blueScore uint64 + blueWork *big.Int + selectedParent *externalapi.DomainHash + mergeSetBlues []*externalapi.DomainHash + mergeSetReds []*externalapi.DomainHash + bluesAnticoneSizes map[externalapi.DomainHash]externalapi.KType +} + +func (bg *blockGHOSTDAGData) toModel() *externalapi.BlockGHOSTDAGData { + return externalapi.NewBlockGHOSTDAGData(bg.blueScore, bg.blueWork, bg.selectedParent, bg.mergeSetBlues, bg.mergeSetReds, bg.bluesAnticoneSizes) +} + +// GHOSTDAG runs the GHOSTDAG protocol and calculates the block BlockGHOSTDAGData by the given parents. +// The function calculates MergeSetBlues by iterating over the blocks in +// the anticone of the new block selected parent (which is the parent with the +// highest blue score) and adds any block to newNode.blues if by adding +// it to MergeSetBlues these conditions will not be violated: +// +// 1) |anticone-of-candidate-block ∩ blue-set-of-newBlock| ≤ K +// +// 2. For every blue block in blue-set-of-newBlock: +// |(anticone-of-blue-block ∩ blue-set-newBlock) ∪ {candidate-block}| ≤ K. +// We validate this condition by maintaining a map BluesAnticoneSizes for +// each block which holds all the blue anticone sizes that were affected by +// the new added blue blocks. +// So to find out what is |anticone-of-blue ∩ blue-set-of-newBlock| we just iterate in +// the selected parent chain of the new block until we find an existing entry in +// BluesAnticoneSizes. +// +// For further details see the article https://eprint.iacr.org/2018/104.pdf +func (gm *ghostdagManager) GHOSTDAG(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + newBlockData := &blockGHOSTDAGData{ + blueWork: new(big.Int), + mergeSetBlues: make([]*externalapi.DomainHash, 0), + mergeSetReds: make([]*externalapi.DomainHash, 0), + bluesAnticoneSizes: make(map[externalapi.DomainHash]externalapi.KType), + } + + blockParents, err := gm.dagTopologyManager.Parents(stagingArea, blockHash) + if err != nil { + return err + } + + isGenesis := len(blockParents) == 0 + if !isGenesis { + selectedParent, err := gm.findSelectedParent(stagingArea, blockParents) + if err != nil { + return err + } + + newBlockData.selectedParent = selectedParent + newBlockData.mergeSetBlues = append(newBlockData.mergeSetBlues, selectedParent) + newBlockData.bluesAnticoneSizes[*selectedParent] = 0 + } + + mergeSetWithoutSelectedParent, err := gm.mergeSetWithoutSelectedParent( + stagingArea, newBlockData.selectedParent, blockParents) + if err != nil { + return err + } + + for _, blueCandidate := range mergeSetWithoutSelectedParent { + isBlue, candidateAnticoneSize, candidateBluesAnticoneSizes, err := gm.checkBlueCandidate( + stagingArea, newBlockData.toModel(), blueCandidate) + if err != nil { + return err + } + + if isBlue { + // No k-cluster violation found, we can now set the candidate block as blue + newBlockData.mergeSetBlues = append(newBlockData.mergeSetBlues, blueCandidate) + newBlockData.bluesAnticoneSizes[*blueCandidate] = candidateAnticoneSize + for blue, blueAnticoneSize := range candidateBluesAnticoneSizes { + newBlockData.bluesAnticoneSizes[blue] = blueAnticoneSize + 1 + } + } else { + newBlockData.mergeSetReds = append(newBlockData.mergeSetReds, blueCandidate) + } + } + + if !isGenesis { + selectedParentGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, newBlockData.selectedParent, false) + if err != nil { + return err + } + newBlockData.blueScore = selectedParentGHOSTDAGData.BlueScore() + uint64(len(newBlockData.mergeSetBlues)) + // We inherit the bluework from the selected parent + newBlockData.blueWork.Set(selectedParentGHOSTDAGData.BlueWork()) + // Then we add up all the *work*(not blueWork) that all of newBlock merge set blues and selected parent did + for _, blue := range newBlockData.mergeSetBlues { + // We don't count the work of the virtual genesis + if blue.Equal(model.VirtualGenesisBlockHash) { + continue + } + + header, err := gm.headerStore.BlockHeader(gm.databaseContext, stagingArea, blue) + if err != nil { + return err + } + newBlockData.blueWork.Add(newBlockData.blueWork, difficulty.CalcWork(header.Bits())) + } + } else { + // Genesis's blue score is defined to be 0. + newBlockData.blueScore = 0 + newBlockData.blueWork.SetUint64(0) + } + + gm.ghostdagDataStore.Stage(stagingArea, blockHash, newBlockData.toModel(), false) + + return nil +} + +type chainBlockData struct { + hash *externalapi.DomainHash + blockData *externalapi.BlockGHOSTDAGData +} + +func (gm *ghostdagManager) checkBlueCandidate(stagingArea *model.StagingArea, newBlockData *externalapi.BlockGHOSTDAGData, + blueCandidate *externalapi.DomainHash) (isBlue bool, candidateAnticoneSize externalapi.KType, + candidateBluesAnticoneSizes map[externalapi.DomainHash]externalapi.KType, err error) { + + // The maximum length of node.blues can be K+1 because + // it contains the selected parent. + if externalapi.KType(len(newBlockData.MergeSetBlues())) == gm.k+1 { + return false, 0, nil, nil + } + + candidateBluesAnticoneSizes = make(map[externalapi.DomainHash]externalapi.KType, gm.k) + + // Iterate over all blocks in the blue set of newNode that are not in the past + // of blueCandidate, and check for each one of them if blueCandidate potentially + // enlarges their blue anticone to be over K, or that they enlarge the blue anticone + // of blueCandidate to be over K. + chainBlock := chainBlockData{ + blockData: newBlockData, + } + + for { + isBlue, isRed, err := gm.checkBlueCandidateWithChainBlock(stagingArea, newBlockData, chainBlock, blueCandidate, + candidateBluesAnticoneSizes, &candidateAnticoneSize) + if err != nil { + return false, 0, nil, err + } + + if isBlue { + break + } + + if isRed { + return false, 0, nil, nil + } + + selectedParentGHOSTDAGData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, chainBlock.blockData.SelectedParent(), false) + if err != nil { + return false, 0, nil, err + } + + chainBlock = chainBlockData{hash: chainBlock.blockData.SelectedParent(), + blockData: selectedParentGHOSTDAGData, + } + } + + return true, candidateAnticoneSize, candidateBluesAnticoneSizes, nil +} + +func (gm *ghostdagManager) checkBlueCandidateWithChainBlock(stagingArea *model.StagingArea, + newBlockData *externalapi.BlockGHOSTDAGData, chainBlock chainBlockData, blueCandidate *externalapi.DomainHash, + candidateBluesAnticoneSizes map[externalapi.DomainHash]externalapi.KType, + candidateAnticoneSize *externalapi.KType) (isBlue, isRed bool, err error) { + + // If blueCandidate is in the future of chainBlock, it means + // that all remaining blues are in the past of chainBlock and thus + // in the past of blueCandidate. In this case we know for sure that + // the anticone of blueCandidate will not exceed K, and we can mark + // it as blue. + // + // The new block is always in the future of blueCandidate, so there's + // no point in checking it. + + // We check if chainBlock is not the new block by checking if it has a hash. + if chainBlock.hash != nil { + isAncestorOfBlueCandidate, err := gm.dagTopologyManager.IsAncestorOf(stagingArea, chainBlock.hash, blueCandidate) + if err != nil { + return false, false, err + } + if isAncestorOfBlueCandidate { + return true, false, nil + } + } + + for _, block := range chainBlock.blockData.MergeSetBlues() { + // Skip blocks that exist in the past of blueCandidate. + isAncestorOfBlueCandidate, err := gm.dagTopologyManager.IsAncestorOf(stagingArea, block, blueCandidate) + if err != nil { + return false, false, err + } + + if isAncestorOfBlueCandidate { + continue + } + + candidateBluesAnticoneSizes[*block], err = gm.blueAnticoneSize(stagingArea, block, newBlockData) + if err != nil { + return false, false, err + } + *candidateAnticoneSize++ + + if *candidateAnticoneSize > gm.k { + // k-cluster violation: The candidate's blue anticone exceeded k + return false, true, nil + } + + if candidateBluesAnticoneSizes[*block] == gm.k { + // k-cluster violation: A block in candidate's blue anticone already + // has k blue blocks in its own anticone + return false, true, nil + } + + // This is a sanity check that validates that a blue + // block's blue anticone is not already larger than K. + if candidateBluesAnticoneSizes[*block] > gm.k { + return false, false, errors.New("found blue anticone size larger than k") + } + } + + return false, false, nil +} + +// blueAnticoneSize returns the blue anticone size of 'block' from the worldview of 'context'. +// Expects 'block' to be in the blue set of 'context' +func (gm *ghostdagManager) blueAnticoneSize(stagingArea *model.StagingArea, + block *externalapi.DomainHash, context *externalapi.BlockGHOSTDAGData) (externalapi.KType, error) { + + isTrustedData := false + for current := context; current != nil; { + if blueAnticoneSize, ok := current.BluesAnticoneSizes()[*block]; ok { + return blueAnticoneSize, nil + } + if current.SelectedParent().Equal(gm.genesisHash) { + break + } + + var err error + current, err = gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, current.SelectedParent(), isTrustedData) + if err != nil { + return 0, err + } + if current.SelectedParent().Equal(model.VirtualGenesisBlockHash) { + isTrustedData = true + current, err = gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, current.SelectedParent(), isTrustedData) + if err != nil { + return 0, err + } + } + } + return 0, errors.Errorf("block %s is not in blue set of the given context", block) +} diff --git a/domain/consensus/processes/ghostdagmanager/ghostdag_test.go b/domain/consensus/processes/ghostdagmanager/ghostdag_test.go new file mode 100644 index 0000000..95501c3 --- /dev/null +++ b/domain/consensus/processes/ghostdagmanager/ghostdag_test.go @@ -0,0 +1,460 @@ +package ghostdagmanager_test + +import ( + "encoding/json" + "math" + "math/big" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/processes/ghostdag2" + "github.com/spectre-project/spectred/domain/consensus/processes/ghostdagmanager" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/util/difficulty" +) + +type block struct { + ID string `json:"ID"` + Score uint64 `json:"ExpectedScore"` + SelectedParent string `json:"ExpectedSelectedParent"` + MergeSetReds []string `json:"ExpectedReds"` + MergeSetBlues []string `json:"ExpectedBlues"` + Parents []string `json:"Parents"` +} + +// json struct: +type testDag struct { + K externalapi.KType `json:"K"` + GenesisID string `json:"GenesisID"` + ExpectedMergeSetReds []string `json:"ExpectedReds"` + Blocks []block `json:"Blocks"` +} + +type implManager struct { + function func( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + ghostdagDataStore model.GHOSTDAGDataStore, + headerStore model.BlockHeaderStore, + k externalapi.KType, + genesisHash *externalapi.DomainHash) model.GHOSTDAGManager + implName string +} + +// TestGHOSTDAG iterates over several dag simulations, and checks +// that the blue score, blue set and selected parent of each +// block are calculated as expected. +func TestGHOSTDAG(t *testing.T) { + //NOTE: FOR ADDING/REMOVING AN IMPLEMENTATION CHANGE BELOW: + implementationFactories := []implManager{ + {ghostdagmanager.New, "Original"}, + {ghostdag2.New, "Tal's impl"}, + } + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + dagTopology := &DAGTopologyManagerImpl{ + parentsMap: make(map[externalapi.DomainHash][]*externalapi.DomainHash), + } + + ghostdagDataStore := &GHOSTDAGDataStoreImpl{ + dagMap: make(map[externalapi.DomainHash]*externalapi.BlockGHOSTDAGData), + } + + blockHeadersStore := &blockHeadersStore{ + dagMap: make(map[externalapi.DomainHash]externalapi.BlockHeader), + } + + blockGHOSTDAGDataGenesis := externalapi.NewBlockGHOSTDAGData(0, new(big.Int), nil, nil, nil, nil) + genesisHeader := consensusConfig.GenesisBlock.Header + genesisWork := difficulty.CalcWork(genesisHeader.Bits()) + + var testsCounter int + err := filepath.Walk("../../testdata/dags", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + jsonFile, err := os.Open(path) + if err != nil { + t.Fatalf("TestGHOSTDAG : failed opening the json file %s: %v", info.Name(), err) + } + defer jsonFile.Close() + var test testDag + decoder := json.NewDecoder(jsonFile) + decoder.DisallowUnknownFields() + err = decoder.Decode(&test) + if err != nil { + t.Fatalf("TestGHOSTDAG:failed decoding json: %v", err) + } + consensusConfig.K = test.K + + genesisHash := *StringToDomainHash(test.GenesisID) + + dagTopology.parentsMap[genesisHash] = nil + + ghostdagDataStore.dagMap[genesisHash] = blockGHOSTDAGDataGenesis + blockHeadersStore.dagMap[genesisHash] = genesisHeader + + for _, factory := range implementationFactories { + g := factory.function(nil, dagTopology, ghostdagDataStore, blockHeadersStore, test.K, &genesisHash) + + for _, testBlockData := range test.Blocks { + blockID := StringToDomainHash(testBlockData.ID) + dagTopology.parentsMap[*blockID] = StringToDomainHashSlice(testBlockData.Parents) + blockHeadersStore.dagMap[*blockID] = blockheader.NewImmutableBlockHeader( + constants.BlockVersion, + []externalapi.BlockLevelParents{StringToDomainHashSlice(testBlockData.Parents)}, + nil, + nil, + nil, + 0, + genesisHeader.Bits(), + 0, + 0, + 0, + big.NewInt(0), + nil, + ) + + err := g.GHOSTDAG(nil, blockID) + if err != nil { + t.Fatalf("Test failed: \n Impl: %s,FileName: %s \n error on GHOSTDAG - block %s: %s.", + factory.implName, info.Name(), testBlockData.ID, err) + } + ghostdagData, err := ghostdagDataStore.Get(nil, nil, blockID, false) + if err != nil { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: ghostdagDataStore error: %v.", + factory.implName, info.Name(), testBlockData.ID, err) + } + + // because the difficulty is constant and equal to genesis the work should be blueScore*genesisWork. + expectedWork := new(big.Int).Mul(genesisWork, new(big.Int).SetUint64(testBlockData.Score)) + if expectedWork.Cmp(ghostdagData.BlueWork()) != 0 { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: expected blue work %d but got %d.", + factory.implName, info.Name(), testBlockData.ID, expectedWork, ghostdagData.BlueWork()) + } + if testBlockData.Score != (ghostdagData.BlueScore()) { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: expected blue score %d but got %d.", + factory.implName, info.Name(), testBlockData.ID, testBlockData.Score, ghostdagData.BlueScore()) + } + + if !StringToDomainHash(testBlockData.SelectedParent).Equal(ghostdagData.SelectedParent()) { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: expected selected parent %v but got %s.", + factory.implName, info.Name(), testBlockData.ID, testBlockData.SelectedParent, ghostdagData.SelectedParent()) + } + + if !reflect.DeepEqual(StringToDomainHashSlice(testBlockData.MergeSetBlues), ghostdagData.MergeSetBlues()) { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: expected merge set blues %v but got %v.", + factory.implName, info.Name(), testBlockData.ID, testBlockData.MergeSetBlues, hashesToStrings(ghostdagData.MergeSetBlues())) + } + + if !reflect.DeepEqual(StringToDomainHashSlice(testBlockData.MergeSetReds), ghostdagData.MergeSetReds()) { + t.Fatalf("\nTEST FAILED:\n Impl: %s, FileName: %s \nBlock: %s, \nError: expected merge set reds %v but got %v.", + factory.implName, info.Name(), testBlockData.ID, testBlockData.MergeSetReds, hashesToStrings(ghostdagData.MergeSetReds())) + } + } + dagTopology.parentsMap = make(map[externalapi.DomainHash][]*externalapi.DomainHash) + dagTopology.parentsMap[genesisHash] = nil + ghostdagDataStore.dagMap = make(map[externalapi.DomainHash]*externalapi.BlockGHOSTDAGData) + ghostdagDataStore.dagMap[genesisHash] = blockGHOSTDAGDataGenesis + blockHeadersStore.dagMap = make(map[externalapi.DomainHash]externalapi.BlockHeader) + blockHeadersStore.dagMap[genesisHash] = genesisHeader + } + + testsCounter++ + return nil + }) + if err != nil { + t.Fatal(err) + } + if testsCounter != 6 { + t.Fatalf("Expected 6 test files, ran %d instead", testsCounter) + } + }) +} + +// TestBlueWork tests if GHOSTDAG picks as selected parent the parent +// with the most blue work, even if its blue score is not the greatest. +// To do that it creates one chain of 3 blocks over genesis, and another +// chain of 2 blocks with more blue work than the 3 blocks chain, and +// checks that a block that points to both chain tips will have the +// 2 blocks chain tip as its selected parent. +func TestBlueWork(t *testing.T) { + dagTopology := &DAGTopologyManagerImpl{ + parentsMap: make(map[externalapi.DomainHash][]*externalapi.DomainHash), + } + + ghostdagDataStore := &GHOSTDAGDataStoreImpl{ + dagMap: make(map[externalapi.DomainHash]*externalapi.BlockGHOSTDAGData), + } + + blockHeadersStore := &blockHeadersStore{ + dagMap: make(map[externalapi.DomainHash]externalapi.BlockHeader), + } + + fakeGenesisHash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0}) + longestChainBlock1Hash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}) + longestChainBlock2Hash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}) + longestChainBlock3Hash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}) + heaviestChainBlock1Hash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}) + heaviestChainBlock2Hash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{5}) + tipHash := externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{6}) + + lowDifficultyHeader := blockheader.NewImmutableBlockHeader( + 0, + nil, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + 0, + 0, + 0, + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ) + + dagTopology.parentsMap[*fakeGenesisHash] = nil + ghostdagDataStore.dagMap[*fakeGenesisHash] = externalapi.NewBlockGHOSTDAGData(0, new(big.Int), nil, nil, nil, nil) + blockHeadersStore.dagMap[*fakeGenesisHash] = lowDifficultyHeader + + dagTopology.parentsMap[*longestChainBlock1Hash] = []*externalapi.DomainHash{fakeGenesisHash} + blockHeadersStore.dagMap[*longestChainBlock1Hash] = lowDifficultyHeader + + dagTopology.parentsMap[*longestChainBlock2Hash] = []*externalapi.DomainHash{longestChainBlock1Hash} + blockHeadersStore.dagMap[*longestChainBlock2Hash] = lowDifficultyHeader + + dagTopology.parentsMap[*longestChainBlock3Hash] = []*externalapi.DomainHash{longestChainBlock2Hash} + blockHeadersStore.dagMap[*longestChainBlock3Hash] = lowDifficultyHeader + + dagTopology.parentsMap[*heaviestChainBlock1Hash] = []*externalapi.DomainHash{fakeGenesisHash} + blockHeadersStore.dagMap[*heaviestChainBlock1Hash] = blockheader.NewImmutableBlockHeader( + 0, + nil, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &externalapi.DomainHash{}, + 0, + math.MaxUint32, // Put a very high difficulty so the chain that contains this block will have a very high blue work + 0, + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ) + + dagTopology.parentsMap[*heaviestChainBlock2Hash] = []*externalapi.DomainHash{heaviestChainBlock1Hash} + blockHeadersStore.dagMap[*heaviestChainBlock2Hash] = lowDifficultyHeader + + dagTopology.parentsMap[*tipHash] = []*externalapi.DomainHash{heaviestChainBlock2Hash, longestChainBlock3Hash} + blockHeadersStore.dagMap[*tipHash] = lowDifficultyHeader + + manager := ghostdagmanager.New(nil, dagTopology, ghostdagDataStore, blockHeadersStore, 18, fakeGenesisHash) + blocksForGHOSTDAG := []*externalapi.DomainHash{ + longestChainBlock1Hash, + longestChainBlock2Hash, + longestChainBlock3Hash, + heaviestChainBlock1Hash, + heaviestChainBlock2Hash, + tipHash, + } + + for _, blockHash := range blocksForGHOSTDAG { + err := manager.GHOSTDAG(nil, blockHash) + if err != nil { + t.Fatalf("GHOSTDAG: %+v", err) + } + } + + if ghostdagDataStore.dagMap[*longestChainBlock3Hash].BlueScore() <= ghostdagDataStore.dagMap[*heaviestChainBlock2Hash].BlueScore() { + t.Fatalf("Expected longestChainBlock3Hash to have greater blue score than heaviestChainBlock2Hash") + } + + if !ghostdagDataStore.dagMap[*tipHash].SelectedParent().Equal(heaviestChainBlock2Hash) { + t.Fatalf("Expected the block with the most blue work to be the selected parent of the tip") + } +} + +func hashesToStrings(arr []*externalapi.DomainHash) []string { + var strArr = make([]string, len(arr)) + for i, hash := range arr { + strArr[i] = string(hash.ByteSlice()) + } + return strArr +} + +func StringToDomainHash(strID string) *externalapi.DomainHash { + var genesisHashArray [externalapi.DomainHashSize]byte + copy(genesisHashArray[:], strID) + return externalapi.NewDomainHashFromByteArray(&genesisHashArray) +} + +func StringToDomainHashSlice(stringIDArr []string) []*externalapi.DomainHash { + domainHashArr := make([]*externalapi.DomainHash, len(stringIDArr)) + for i, strID := range stringIDArr { + domainHashArr[i] = StringToDomainHash(strID) + } + return domainHashArr +} + +/* ---------------------- */ +type GHOSTDAGDataStoreImpl struct { + dagMap map[externalapi.DomainHash]*externalapi.BlockGHOSTDAGData +} + +func (ds *GHOSTDAGDataStoreImpl) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockGHOSTDAGData *externalapi.BlockGHOSTDAGData, isTrustedData bool) { + ds.dagMap[*blockHash] = blockGHOSTDAGData +} + +func (ds *GHOSTDAGDataStoreImpl) IsStaged(*model.StagingArea) bool { + panic("implement me") +} + +func (ds *GHOSTDAGDataStoreImpl) Commit(dbTx model.DBTransaction) error { + panic("implement me") +} + +func (ds *GHOSTDAGDataStoreImpl) Get(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isTrustedData bool) (*externalapi.BlockGHOSTDAGData, error) { + v, ok := ds.dagMap[*blockHash] + if ok { + return v, nil + } + return nil, nil +} + +func (ds *GHOSTDAGDataStoreImpl) UnstageAll(stagingArea *model.StagingArea) { + panic("implement me") +} + +type DAGTopologyManagerImpl struct { + parentsMap map[externalapi.DomainHash][]*externalapi.DomainHash +} + +func (dt *DAGTopologyManagerImpl) ChildInSelectedParentChainOf(stagingArea *model.StagingArea, lowHash, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + panic("implement me") +} + +func (dt *DAGTopologyManagerImpl) Tips() ([]*externalapi.DomainHash, error) { + panic("implement me") +} + +func (dt *DAGTopologyManagerImpl) AddTip(tipHash *externalapi.DomainHash) error { + panic("implement me") +} + +func (dt *DAGTopologyManagerImpl) Parents(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + v, ok := dt.parentsMap[*blockHash] + if !ok { + return []*externalapi.DomainHash{}, nil + } + + return v, nil +} + +func (dt *DAGTopologyManagerImpl) Children(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + panic("unimplemented") +} + +func (dt *DAGTopologyManagerImpl) IsParentOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + panic("unimplemented") +} + +func (dt *DAGTopologyManagerImpl) IsChildOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + panic("unimplemented") +} + +func (dt *DAGTopologyManagerImpl) IsAncestorOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + blockBParents, isOk := dt.parentsMap[*blockHashB] + if !isOk { + return false, nil + } + + for _, parentOfB := range blockBParents { + if parentOfB.Equal(blockHashA) { + return true, nil + } + } + + for _, parentOfB := range blockBParents { + isAncestorOf, err := dt.IsAncestorOf(stagingArea, blockHashA, parentOfB) + if err != nil { + return false, err + } + if isAncestorOf { + return true, nil + } + } + return false, nil + +} + +func (dt *DAGTopologyManagerImpl) IsAncestorOfAny(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, potentialDescendants []*externalapi.DomainHash) (bool, error) { + panic("unimplemented") +} +func (dt *DAGTopologyManagerImpl) IsAnyAncestorOf(*model.StagingArea, []*externalapi.DomainHash, *externalapi.DomainHash) (bool, error) { + panic("unimplemented") +} +func (dt *DAGTopologyManagerImpl) IsInSelectedParentChainOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + panic("unimplemented") +} + +func (dt *DAGTopologyManagerImpl) SetParents(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, parentHashes []*externalapi.DomainHash) error { + panic("unimplemented") +} + +type blockHeadersStore struct { + dagMap map[externalapi.DomainHash]externalapi.BlockHeader +} + +func (b *blockHeadersStore) Discard() { panic("unimplemented") } + +func (b *blockHeadersStore) Commit(_ model.DBTransaction) error { panic("unimplemented") } + +func (b *blockHeadersStore) Stage(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, blockHeader externalapi.BlockHeader) { + b.dagMap[*blockHash] = blockHeader +} + +func (b *blockHeadersStore) IsStaged(*model.StagingArea) bool { panic("unimplemented") } + +func (b *blockHeadersStore) BlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (externalapi.BlockHeader, error) { + header, ok := b.dagMap[*blockHash] + if ok { + return header, nil + } + return nil, errors.New("Header isn't in the store") +} + +func (b *blockHeadersStore) HasBlockHeader(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + _, ok := b.dagMap[*blockHash] + return ok, nil +} + +func (b *blockHeadersStore) BlockHeaders(dbContext model.DBReader, stagingArea *model.StagingArea, blockHashes []*externalapi.DomainHash) ([]externalapi.BlockHeader, error) { + res := make([]externalapi.BlockHeader, 0, len(blockHashes)) + for _, hash := range blockHashes { + header, err := b.BlockHeader(nil, nil, hash) + if err != nil { + return nil, err + } + res = append(res, header) + } + return res, nil +} + +func (b *blockHeadersStore) Delete(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + delete(b.dagMap, *blockHash) +} + +func (b *blockHeadersStore) Count(*model.StagingArea) uint64 { + return uint64(len(b.dagMap)) +} diff --git a/domain/consensus/processes/ghostdagmanager/ghostdagmanager.go b/domain/consensus/processes/ghostdagmanager/ghostdagmanager.go new file mode 100644 index 0000000..96a3905 --- /dev/null +++ b/domain/consensus/processes/ghostdagmanager/ghostdagmanager.go @@ -0,0 +1,36 @@ +package ghostdagmanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// ghostdagManager resolves and manages GHOSTDAG block data +type ghostdagManager struct { + databaseContext model.DBReader + dagTopologyManager model.DAGTopologyManager + ghostdagDataStore model.GHOSTDAGDataStore + headerStore model.BlockHeaderStore + + k externalapi.KType + genesisHash *externalapi.DomainHash +} + +// New instantiates a new GHOSTDAGManager +func New( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + ghostdagDataStore model.GHOSTDAGDataStore, + headerStore model.BlockHeaderStore, + k externalapi.KType, + genesisHash *externalapi.DomainHash) model.GHOSTDAGManager { + + return &ghostdagManager{ + databaseContext: databaseContext, + dagTopologyManager: dagTopologyManager, + ghostdagDataStore: ghostdagDataStore, + headerStore: headerStore, + k: k, + genesisHash: genesisHash, + } +} diff --git a/domain/consensus/processes/ghostdagmanager/mergeset.go b/domain/consensus/processes/ghostdagmanager/mergeset.go new file mode 100644 index 0000000..c4b6932 --- /dev/null +++ b/domain/consensus/processes/ghostdagmanager/mergeset.go @@ -0,0 +1,128 @@ +package ghostdagmanager + +import ( + "sort" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (gm *ghostdagManager) mergeSetWithoutSelectedParent(stagingArea *model.StagingArea, + selectedParent *externalapi.DomainHash, blockParents []*externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + mergeSetMap := make(map[externalapi.DomainHash]struct{}, gm.k) + mergeSetSlice := make([]*externalapi.DomainHash, 0, gm.k) + selectedParentPast := make(map[externalapi.DomainHash]struct{}) + queue := []*externalapi.DomainHash{} + // Queueing all parents (other than the selected parent itself) for processing. + for _, parent := range blockParents { + if parent.Equal(selectedParent) { + continue + } + mergeSetMap[*parent] = struct{}{} + mergeSetSlice = append(mergeSetSlice, parent) + queue = append(queue, parent) + } + + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + // For each parent of the current block we check whether it is in the past of the selected parent. If not, + // we add the it to the resulting anticone-set and queue it for further processing. + currentParents, err := gm.dagTopologyManager.Parents(stagingArea, current) + if err != nil { + return nil, err + } + for _, parent := range currentParents { + if _, ok := mergeSetMap[*parent]; ok { + continue + } + + if _, ok := selectedParentPast[*parent]; ok { + continue + } + + isAncestorOfSelectedParent, err := gm.dagTopologyManager.IsAncestorOf(stagingArea, parent, selectedParent) + if err != nil { + return nil, err + } + + if isAncestorOfSelectedParent { + selectedParentPast[*parent] = struct{}{} + continue + } + + mergeSetMap[*parent] = struct{}{} + mergeSetSlice = append(mergeSetSlice, parent) + queue = append(queue, parent) + } + } + + err := gm.sortMergeSet(stagingArea, mergeSetSlice) + if err != nil { + return nil, err + } + + return mergeSetSlice, nil +} + +func (gm *ghostdagManager) sortMergeSet(stagingArea *model.StagingArea, mergeSetSlice []*externalapi.DomainHash) error { + var err error + sort.Slice(mergeSetSlice, func(i, j int) bool { + if err != nil { + return false + } + isLess, lessErr := gm.less(stagingArea, mergeSetSlice[i], mergeSetSlice[j]) + if lessErr != nil { + err = lessErr + return false + } + return isLess + }) + return err +} + +// GetSortedMergeSet return the merge set sorted in a toplogical order. +func (gm *ghostdagManager) GetSortedMergeSet(stagingArea *model.StagingArea, + current *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + currentGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, current, false) + if err != nil { + return nil, err + } + + blueMergeSet := currentGhostdagData.MergeSetBlues() + redMergeSet := currentGhostdagData.MergeSetReds() + sortedMergeSet := make([]*externalapi.DomainHash, 0, len(blueMergeSet)+len(redMergeSet)) + // If the current block is the genesis block: + if len(blueMergeSet) == 0 { + return sortedMergeSet, nil + } + selectedParent, blueMergeSet := blueMergeSet[0], blueMergeSet[1:] + sortedMergeSet = append(sortedMergeSet, selectedParent) + i, j := 0, 0 + for i < len(blueMergeSet) && j < len(redMergeSet) { + currentBlue := blueMergeSet[i] + currentBlueGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, currentBlue, false) + if err != nil { + return nil, err + } + currentRed := redMergeSet[j] + currentRedGhostdagData, err := gm.ghostdagDataStore.Get(gm.databaseContext, stagingArea, currentRed, false) + if err != nil { + return nil, err + } + if gm.Less(currentBlue, currentBlueGhostdagData, currentRed, currentRedGhostdagData) { + sortedMergeSet = append(sortedMergeSet, currentBlue) + i++ + } else { + sortedMergeSet = append(sortedMergeSet, currentRed) + j++ + } + } + sortedMergeSet = append(sortedMergeSet, blueMergeSet[i:]...) + sortedMergeSet = append(sortedMergeSet, redMergeSet[j:]...) + + return sortedMergeSet, nil +} diff --git a/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager.go b/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager.go new file mode 100644 index 0000000..b35bf2e --- /dev/null +++ b/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager.go @@ -0,0 +1,79 @@ +package headersselectedtipmanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type headerTipsManager struct { + databaseContext model.DBReader + + dagTopologyManager model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + ghostdagManager model.GHOSTDAGManager + headersSelectedTipStore model.HeaderSelectedTipStore + headersSelectedChainStore model.HeadersSelectedChainStore +} + +// New instantiates a new HeadersSelectedTipManager +func New(databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + dagTraversalManager model.DAGTraversalManager, + ghostdagManager model.GHOSTDAGManager, + headersSelectedTipStore model.HeaderSelectedTipStore, + headersSelectedChainStore model.HeadersSelectedChainStore) model.HeadersSelectedTipManager { + + return &headerTipsManager{ + databaseContext: databaseContext, + dagTopologyManager: dagTopologyManager, + dagTraversalManager: dagTraversalManager, + ghostdagManager: ghostdagManager, + headersSelectedTipStore: headersSelectedTipStore, + headersSelectedChainStore: headersSelectedChainStore, + } +} + +func (h *headerTipsManager) AddHeaderTip(stagingArea *model.StagingArea, hash *externalapi.DomainHash) error { + hasSelectedTip, err := h.headersSelectedTipStore.Has(h.databaseContext, stagingArea) + if err != nil { + return err + } + + if !hasSelectedTip { + h.headersSelectedTipStore.Stage(stagingArea, hash) + + err := h.headersSelectedChainStore.Stage(h.databaseContext, stagingArea, &externalapi.SelectedChainPath{ + Added: []*externalapi.DomainHash{hash}, + Removed: nil, + }) + if err != nil { + return err + } + } else { + headersSelectedTip, err := h.headersSelectedTipStore.HeadersSelectedTip(h.databaseContext, stagingArea) + if err != nil { + return err + } + + newHeadersSelectedTip, err := h.ghostdagManager.ChooseSelectedParent(stagingArea, headersSelectedTip, hash) + if err != nil { + return err + } + + if !newHeadersSelectedTip.Equal(headersSelectedTip) { + h.headersSelectedTipStore.Stage(stagingArea, newHeadersSelectedTip) + + chainChanges, err := h.dagTraversalManager.CalculateChainPath(stagingArea, headersSelectedTip, newHeadersSelectedTip) + if err != nil { + return err + } + + err = h.headersSelectedChainStore.Stage(h.databaseContext, stagingArea, chainChanges) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager_test.go b/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager_test.go new file mode 100644 index 0000000..1bfc072 --- /dev/null +++ b/domain/consensus/processes/headersselectedtipmanager/headersselectedtipmanager_test.go @@ -0,0 +1,79 @@ +package headersselectedtipmanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func TestAddHeaderTip(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestAddHeaderTip") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + stagingArea := model.NewStagingArea() + checkExpectedSelectedChain := func(expectedSelectedChain []*externalapi.DomainHash) { + for i, blockHash := range expectedSelectedChain { + chainBlockHash, err := tc.HeadersSelectedChainStore().GetHashByIndex(tc.DatabaseContext(), stagingArea, uint64(i)) + if err != nil { + t.Fatalf("GetHashByIndex: %+v", err) + } + + if !blockHash.Equal(chainBlockHash) { + t.Fatalf("chain block %d is expected to be %s but got %s", i, blockHash, chainBlockHash) + } + + index, err := tc.HeadersSelectedChainStore().GetIndexByHash(tc.DatabaseContext(), stagingArea, blockHash) + if err != nil { + t.Fatalf("GetIndexByHash: %+v", err) + } + + if uint64(i) != index { + t.Fatalf("chain block %s is expected to be %d but got %d", blockHash, i, index) + } + } + + _, err := tc.HeadersSelectedChainStore().GetHashByIndex(tc.DatabaseContext(), stagingArea, uint64(len(expectedSelectedChain)+1)) + if !errors.Is(err, database.ErrNotFound) { + t.Fatalf("index %d is not expected to exist, but instead got error: %+v", + uint64(len(expectedSelectedChain)+1), err) + } + } + + expectedSelectedChain := []*externalapi.DomainHash{consensusConfig.GenesisHash} + tipHash := consensusConfig.GenesisHash + for i := 0; i < 10; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + expectedSelectedChain = append(expectedSelectedChain, tipHash) + checkExpectedSelectedChain(expectedSelectedChain) + } + + expectedSelectedChain = []*externalapi.DomainHash{consensusConfig.GenesisHash} + tipHash = consensusConfig.GenesisHash + for i := 0; i < 11; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + expectedSelectedChain = append(expectedSelectedChain, tipHash) + } + checkExpectedSelectedChain(expectedSelectedChain) + }) +} diff --git a/domain/consensus/processes/mergedepthmanager/log.go b/domain/consensus/processes/mergedepthmanager/log.go new file mode 100644 index 0000000..2d64874 --- /dev/null +++ b/domain/consensus/processes/mergedepthmanager/log.go @@ -0,0 +1,7 @@ +package mergedepthmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("MDMN") diff --git a/domain/consensus/processes/mergedepthmanager/merge_depth_manager.go b/domain/consensus/processes/mergedepthmanager/merge_depth_manager.go new file mode 100644 index 0000000..2f5b96b --- /dev/null +++ b/domain/consensus/processes/mergedepthmanager/merge_depth_manager.go @@ -0,0 +1,262 @@ +package mergedepthmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type mergeDepthManager struct { + databaseContext model.DBReader + dagTopologyManager model.DAGTopologyManager + dagTraversalManager model.DAGTraversalManager + finalityManager model.FinalityManager + + genesisHash *externalapi.DomainHash + mergeDepth uint64 + + ghostdagDataStore model.GHOSTDAGDataStore + mergeDepthRootStore model.MergeDepthRootStore + daaBlocksStore model.DAABlocksStore + pruningStore model.PruningStore + finalityStore model.FinalityStore +} + +// New instantiates a new MergeDepthManager +func New( + databaseContext model.DBReader, + dagTopologyManager model.DAGTopologyManager, + dagTraversalManager model.DAGTraversalManager, + finalityManager model.FinalityManager, + + genesisHash *externalapi.DomainHash, + mergeDepth uint64, + + ghostdagDataStore model.GHOSTDAGDataStore, + mergeDepthRootStore model.MergeDepthRootStore, + daaBlocksStore model.DAABlocksStore, + pruningStore model.PruningStore, + finalityStore model.FinalityStore) model.MergeDepthManager { + + return &mergeDepthManager{ + databaseContext: databaseContext, + dagTopologyManager: dagTopologyManager, + dagTraversalManager: dagTraversalManager, + finalityManager: finalityManager, + genesisHash: genesisHash, + mergeDepth: mergeDepth, + ghostdagDataStore: ghostdagDataStore, + mergeDepthRootStore: mergeDepthRootStore, + daaBlocksStore: daaBlocksStore, + pruningStore: pruningStore, + finalityStore: finalityStore, + } + +} + +// CheckBoundedMergeDepth is used for validation, so must follow the HF1 DAA score for determining the correct depth to verify +func (mdm *mergeDepthManager) CheckBoundedMergeDepth(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) error { + ghostdagData, err := mdm.ghostdagDataStore.Get(mdm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return err + } + + // Return nil on genesis + if ghostdagData.SelectedParent() == nil { + return nil + } + + mergeDepthRoot, err := mdm.MergeDepthRoot(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + + // We call FinalityPoint in order to save it to storage. + _, err = mdm.finalityManager.FinalityPoint(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return err + } + + nonBoundedMergeDepthViolatingBlues, err := mdm.NonBoundedMergeDepthViolatingBlues(stagingArea, blockHash, mergeDepthRoot) + if err != nil { + return err + } + + for _, red := range ghostdagData.MergeSetReds() { + doesRedHaveMergeRootInPast, err := mdm.dagTopologyManager.IsAncestorOf(stagingArea, mergeDepthRoot, red) + if err != nil { + return err + } + + if doesRedHaveMergeRootInPast { + continue + } + + isRedInPastOfAnyNonMergeDepthViolatingBlue, err := + mdm.dagTopologyManager.IsAncestorOfAny(stagingArea, red, nonBoundedMergeDepthViolatingBlues) + if err != nil { + return err + } + + if !isRedInPastOfAnyNonMergeDepthViolatingBlue { + return errors.Wrapf(ruleerrors.ErrViolatingBoundedMergeDepth, "block is violating bounded merge depth") + } + } + + return nil +} + +func (mdm *mergeDepthManager) NonBoundedMergeDepthViolatingBlues( + stagingArea *model.StagingArea, blockHash, mergeDepthRoot *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + + ghostdagData, err := mdm.ghostdagDataStore.Get(mdm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + nonBoundedMergeDepthViolatingBlues := make([]*externalapi.DomainHash, 0, len(ghostdagData.MergeSetBlues())) + + if err != nil { + return nil, err + } + for _, blue := range ghostdagData.MergeSetBlues() { + isMergeDepthRootInSelectedChainOfBlue, err := mdm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, mergeDepthRoot, blue) + if err != nil { + return nil, err + } + + if isMergeDepthRootInSelectedChainOfBlue { + nonBoundedMergeDepthViolatingBlues = append(nonBoundedMergeDepthViolatingBlues, blue) + } + } + + return nonBoundedMergeDepthViolatingBlues, nil +} + +func (mdm *mergeDepthManager) VirtualMergeDepthRoot(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + log.Tracef("VirtualMergeDepthRoot start") + defer log.Tracef("VirtualMergeDepthRoot end") + + virtualMergeDepthRoot, err := mdm.calculateMergeDepthRoot(stagingArea, model.VirtualBlockHash, false) + if err != nil { + return nil, err + } + log.Debugf("The current virtual merge depth root is: %s", virtualMergeDepthRoot) + + return virtualMergeDepthRoot, nil +} + +func (mdm *mergeDepthManager) MergeDepthRoot(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (*externalapi.DomainHash, error) { + log.Tracef("MergeDepthRoot start") + defer log.Tracef("MergeDepthRoot end") + if blockHash.Equal(model.VirtualBlockHash) { + return mdm.VirtualMergeDepthRoot(stagingArea) + } + root, err := mdm.mergeDepthRootStore.MergeDepthRoot(mdm.databaseContext, stagingArea, blockHash) + if err != nil { + log.Debugf("%s merge depth root not found in store - calculating", blockHash) + if errors.Is(err, database.ErrNotFound) { + return mdm.calculateAndStageMergeDepthRoot(stagingArea, blockHash, isBlockWithTrustedData) + } + return nil, err + } + return root, nil +} + +func (mdm *mergeDepthManager) calculateAndStageMergeDepthRoot( + stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) (*externalapi.DomainHash, error) { + + root, err := mdm.calculateMergeDepthRoot(stagingArea, blockHash, isBlockWithTrustedData) + if err != nil { + return nil, err + } + mdm.mergeDepthRootStore.StageMergeDepthRoot(stagingArea, blockHash, root) + return root, nil +} + +func (mdm *mergeDepthManager) calculateMergeDepthRoot(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, isBlockWithTrustedData bool) ( + *externalapi.DomainHash, error) { + + log.Tracef("calculateMergeDepthRoot start") + defer log.Tracef("calculateMergeDepthRoot end") + + if isBlockWithTrustedData { + return model.VirtualGenesisBlockHash, nil + } + + ghostdagData, err := mdm.ghostdagDataStore.Get(mdm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + if ghostdagData.BlueScore() < mdm.mergeDepth { + log.Debugf("%s blue score lower then merge depth - returning genesis as merge depth root", blockHash) + return mdm.genesisHash, nil + } + + pruningPoint, err := mdm.pruningStore.PruningPoint(mdm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + pruningPointGhostdagData, err := mdm.ghostdagDataStore.Get(mdm.databaseContext, stagingArea, pruningPoint, false) + if err != nil { + return nil, err + } + if ghostdagData.BlueScore() < pruningPointGhostdagData.BlueScore()+mdm.mergeDepth { + log.Debugf("%s blue score less than merge depth over pruning point - returning virtual genesis as merge depth root", blockHash) + return model.VirtualGenesisBlockHash, nil + } + isPruningPointOnChain, err := mdm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, blockHash) + if err != nil { + return nil, err + } + if !isPruningPointOnChain { + log.Debugf("pruning point not in selected chain of %s - returning virtual genesis as merge depth root", blockHash) + return model.VirtualGenesisBlockHash, nil + } + + selectedParent := ghostdagData.SelectedParent() + if selectedParent.Equal(mdm.genesisHash) { + return mdm.genesisHash, nil + } + + current, err := mdm.mergeDepthRootStore.MergeDepthRoot(mdm.databaseContext, stagingArea, ghostdagData.SelectedParent()) + if database.IsNotFoundError(err) { + // This should only occur for a few blocks following the upgrade + log.Debugf("merge point root not in store for %s, falling back to finality point", ghostdagData.SelectedParent()) + current, err = mdm.finalityStore.FinalityPoint(mdm.databaseContext, stagingArea, ghostdagData.SelectedParent()) + if err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + // In this case we expect the pruning point or a block above it to be the merge depth root. + // Note that above we already verified the chain and distance conditions for this + if current.Equal(model.VirtualGenesisBlockHash) { + current = pruningPoint + } + + requiredBlueScore := ghostdagData.BlueScore() - mdm.mergeDepth + log.Debugf("%s's merge depth root is the one having the highest blue score lower then %d", blockHash, requiredBlueScore) + + var next *externalapi.DomainHash + for { + next, err = mdm.dagTopologyManager.ChildInSelectedParentChainOf(stagingArea, current, blockHash) + if err != nil { + return nil, err + } + nextGHOSTDAGData, err := mdm.ghostdagDataStore.Get(mdm.databaseContext, stagingArea, next, false) + if err != nil { + return nil, err + } + if nextGHOSTDAGData.BlueScore() >= requiredBlueScore { + log.Debugf("%s's merge depth root is %s", blockHash, current) + return current, nil + } + + current = next + } +} diff --git a/domain/consensus/processes/parentsmanager/parentsmanager.go b/domain/consensus/processes/parentsmanager/parentsmanager.go new file mode 100644 index 0000000..f50d9cb --- /dev/null +++ b/domain/consensus/processes/parentsmanager/parentsmanager.go @@ -0,0 +1,42 @@ +package parentssanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type parentsManager struct { + genesisHash *externalapi.DomainHash + maxBlockLevel int +} + +// New instantiates a new ParentsManager +func New(genesisHash *externalapi.DomainHash, maxBlockLevel int) model.ParentsManager { + return &parentsManager{ + genesisHash: genesisHash, + maxBlockLevel: maxBlockLevel, + } +} + +func (pm *parentsManager) ParentsAtLevel(blockHeader externalapi.BlockHeader, level int) externalapi.BlockLevelParents { + var parentsAtLevel externalapi.BlockLevelParents + if len(blockHeader.Parents()) > level { + parentsAtLevel = blockHeader.Parents()[level] + } + + if len(parentsAtLevel) == 0 && len(blockHeader.DirectParents()) > 0 { + return externalapi.BlockLevelParents{pm.genesisHash} + } + + return parentsAtLevel +} + +func (pm *parentsManager) Parents(blockHeader externalapi.BlockHeader) []externalapi.BlockLevelParents { + numParents := pm.maxBlockLevel + 1 + parents := make([]externalapi.BlockLevelParents, numParents) + for i := 0; i < numParents; i++ { + parents[i] = pm.ParentsAtLevel(blockHeader, i) + } + + return parents +} diff --git a/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager.go b/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager.go new file mode 100644 index 0000000..3e0d32f --- /dev/null +++ b/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager.go @@ -0,0 +1,102 @@ +package pastmediantimemanager + +import ( + "sort" + + "github.com/spectre-project/spectred/domain/consensus/utils/sorters" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// pastMedianTimeManager provides a method to resolve the +// past median time of a block +type pastMedianTimeManager struct { + timestampDeviationTolerance int + + databaseContext model.DBReader + + dagTraversalManager model.DAGTraversalManager + + blockHeaderStore model.BlockHeaderStore + ghostdagDataStore model.GHOSTDAGDataStore + + genesisHash *externalapi.DomainHash + + virtualPastMedianTimeCache int64 +} + +// New instantiates a new PastMedianTimeManager +func New(timestampDeviationTolerance int, + databaseContext model.DBReader, + dagTraversalManager model.DAGTraversalManager, + blockHeaderStore model.BlockHeaderStore, + ghostdagDataStore model.GHOSTDAGDataStore, + genesisHash *externalapi.DomainHash) model.PastMedianTimeManager { + + return &pastMedianTimeManager{ + timestampDeviationTolerance: timestampDeviationTolerance, + databaseContext: databaseContext, + + dagTraversalManager: dagTraversalManager, + + blockHeaderStore: blockHeaderStore, + ghostdagDataStore: ghostdagDataStore, + genesisHash: genesisHash, + } +} + +// PastMedianTime returns the past median time for some block +func (pmtm *pastMedianTimeManager) PastMedianTime(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (int64, error) { + if blockHash == model.VirtualBlockHash && pmtm.virtualPastMedianTimeCache != 0 { + return pmtm.virtualPastMedianTimeCache, nil + } + window, err := pmtm.dagTraversalManager.BlockWindow(stagingArea, blockHash, 2*pmtm.timestampDeviationTolerance-1) + if err != nil { + return 0, err + } + if len(window) == 0 { + header, err := pmtm.blockHeaderStore.BlockHeader(pmtm.databaseContext, stagingArea, pmtm.genesisHash) + if err != nil { + return 0, err + } + return header.TimeInMilliseconds(), nil + } + + pastMedianTime, err := pmtm.windowMedianTimestamp(stagingArea, window) + if err != nil { + return 0, err + } + + if blockHash == model.VirtualBlockHash { + pmtm.virtualPastMedianTimeCache = pastMedianTime + } + + return pastMedianTime, nil +} + +func (pmtm *pastMedianTimeManager) windowMedianTimestamp( + stagingArea *model.StagingArea, window []*externalapi.DomainHash) (int64, error) { + + if len(window) == 0 { + return 0, errors.New("Cannot calculate median timestamp for an empty block window") + } + + timestamps := make([]int64, len(window)) + for i, blockHash := range window { + blockHeader, err := pmtm.blockHeaderStore.BlockHeader(pmtm.databaseContext, stagingArea, blockHash) + if err != nil { + return 0, err + } + timestamps[i] = blockHeader.TimeInMilliseconds() + } + + sort.Sort(sorters.Int64Slice(timestamps)) + + return timestamps[len(timestamps)/2], nil +} + +func (pmtm *pastMedianTimeManager) InvalidateVirtualPastMedianTimeCache() { + pmtm.virtualPastMedianTimeCache = 0 +} diff --git a/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager_test.go b/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager_test.go new file mode 100644 index 0000000..a45b38e --- /dev/null +++ b/domain/consensus/processes/pastmediantimemanager/pastmediantimemanager_test.go @@ -0,0 +1,85 @@ +package pastmediantimemanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestPastMedianTime(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestUpdateReindexRoot") + if err != nil { + t.Fatalf("NewTestConsensus: %+v", err) + } + defer tearDown(false) + + numBlocks := uint32(300) + blockHashes := make([]*externalapi.DomainHash, numBlocks) + blockHashes[0] = consensusConfig.GenesisHash + blockTime := consensusConfig.GenesisBlock.Header.TimeInMilliseconds() + for i := uint32(1); i < numBlocks; i++ { + blockTime += 1000 + block, _, err := tc.BuildBlockWithParents([]*externalapi.DomainHash{blockHashes[i-1]}, nil, nil) + if err != nil { + t.Fatalf("BuildBlockWithParents: %s", err) + } + + newHeader := block.Header.ToMutable() + newHeader.SetTimeInMilliseconds(blockTime) + block.Header = newHeader.ToImmutable() + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + blockHashes[i] = consensushashing.BlockHash(block) + } + + tests := []struct { + blockNumber uint32 + expectedMillisecondsSinceGenesis int64 + }{ + { + blockNumber: 263, + expectedMillisecondsSinceGenesis: 132000, + }, + { + blockNumber: 271, + expectedMillisecondsSinceGenesis: 139000, + }, + { + blockNumber: 241, + expectedMillisecondsSinceGenesis: 121000, + }, + { + blockNumber: 5, + expectedMillisecondsSinceGenesis: 3000, + }, + } + + stagingArea := model.NewStagingArea() + for _, test := range tests { + pastMedianTime, err := tc.PastMedianTimeManager().PastMedianTime(stagingArea, blockHashes[test.blockNumber]) + if err != nil { + t.Fatalf("PastMedianTime: %s", err) + } + + millisecondsSinceGenesis := pastMedianTime - + consensusConfig.GenesisBlock.Header.TimeInMilliseconds() + + if millisecondsSinceGenesis != test.expectedMillisecondsSinceGenesis { + t.Errorf("TestCalcPastMedianTime: expected past median time of block %v to be %v milliseconds "+ + "from genesis but got %v", + test.blockNumber, test.expectedMillisecondsSinceGenesis, millisecondsSinceGenesis) + } + } + }) + +} diff --git a/domain/consensus/processes/pruningmanager/log.go b/domain/consensus/processes/pruningmanager/log.go new file mode 100644 index 0000000..370725c --- /dev/null +++ b/domain/consensus/processes/pruningmanager/log.go @@ -0,0 +1,5 @@ +package pruningmanager + +import "github.com/spectre-project/spectred/infrastructure/logger" + +var log = logger.RegisterSubSystem("PRNM") diff --git a/domain/consensus/processes/pruningmanager/pruning_test.go b/domain/consensus/processes/pruningmanager/pruning_test.go new file mode 100644 index 0000000..5d60028 --- /dev/null +++ b/domain/consensus/processes/pruningmanager/pruning_test.go @@ -0,0 +1,226 @@ +package pruningmanager_test + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/spectre-project/spectred/infrastructure/db/database" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +type jsonBlock struct { + ID string `json:"ID"` + Parents []string `json:"Parents"` +} + +type testJSON struct { + MergeSetSizeLimit uint64 `json:"mergeSetSizeLimit"` + FinalityDepth uint64 `json:"finalityDepth"` + Blocks []*jsonBlock `json:"blocks"` +} + +func TestPruning(t *testing.T) { + expectedPruningPointByNet := map[string]map[string]string{ + "chain-for-test-pruning.json": { + dagconfig.MainnetParams.Name: "1582", + dagconfig.TestnetParams.Name: "1582", + dagconfig.DevnetParams.Name: "1582", + dagconfig.SimnetParams.Name: "1582", + }, + "dag-for-test-pruning.json": { + dagconfig.MainnetParams.Name: "503", + dagconfig.TestnetParams.Name: "502", + dagconfig.DevnetParams.Name: "502", + dagconfig.SimnetParams.Name: "502", + }, + } + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + // Improve the performance of the test a little + consensusConfig.DisableDifficultyAdjustment = true + err := filepath.Walk("./testdata", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + jsonFile, err := os.Open(path) + if err != nil { + t.Fatalf("TestPruning : failed opening json file %s: %s", path, err) + } + defer jsonFile.Close() + + test := &testJSON{} + decoder := json.NewDecoder(jsonFile) + decoder.DisallowUnknownFields() + err = decoder.Decode(&test) + if err != nil { + t.Fatalf("TestPruning: failed decoding json: %v", err) + } + + consensusConfig.FinalityDuration = time.Duration(test.FinalityDepth) * consensusConfig.TargetTimePerBlock + consensusConfig.MergeSetSizeLimit = test.MergeSetSizeLimit + consensusConfig.DifficultyAdjustmentWindowSize = 400 + + factory := consensus.NewFactory() + factory.SetTestLevelDBCacheSize(128) + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestPruning") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + blockIDToHash := map[string]*externalapi.DomainHash{ + "0": consensusConfig.GenesisHash, + } + + blockHashToID := map[externalapi.DomainHash]string{ + *consensusConfig.GenesisHash: "0", + } + + stagingArea := model.NewStagingArea() + + for _, dagBlock := range test.Blocks { + if dagBlock.ID == "0" { + continue + } + parentHashes := make([]*externalapi.DomainHash, 0, len(dagBlock.Parents)) + for _, parentID := range dagBlock.Parents { + parentHash, ok := blockIDToHash[parentID] + if !ok { + t.Fatalf("No hash was found for block with ID %s", parentID) + } + parentHashes = append(parentHashes, parentHash) + } + + blockHash, _, err := tc.AddBlock(parentHashes, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + blockIDToHash[dagBlock.ID] = blockHash + blockHashToID[*blockHash] = dagBlock.ID + + pruningPointCandidate, err := tc.PruningStore().PruningPointCandidate(tc.DatabaseContext(), stagingArea) + if database.IsNotFoundError(err) { + pruningPointCandidate = consensusConfig.GenesisHash + } else if err != nil { + return err + } + + isValidPruningPoint, err := tc.IsValidPruningPoint(pruningPointCandidate) + if err != nil { + return err + } + + if !isValidPruningPoint { + t.Fatalf("isValidPruningPoint is %t while expected %t", isValidPruningPoint, true) + } + } + + pruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatalf("PruningPoint: %+v", err) + } + + pruningPointID := blockHashToID[*pruningPoint] + expectedPruningPoint := expectedPruningPointByNet[info.Name()][consensusConfig.Name] + if expectedPruningPoint != pruningPointID { + t.Fatalf("%s: Expected pruning point to be %s but got %s", info.Name(), expectedPruningPoint, pruningPointID) + } + + // We expect blocks that are within the difficulty adjustment window size of + // the pruning point and its anticone to not get pruned + unprunedBlockHashesBelowPruningPoint := make(map[externalapi.DomainHash]struct{}) + pruningPointAndItsAnticone, err := tc.PruningPointAndItsAnticone() + if err != nil { + t.Fatalf("pruningPointAndItsAnticone: %+v", err) + } + for _, blockHash := range pruningPointAndItsAnticone { + unprunedBlockHashesBelowPruningPoint[*blockHash] = struct{}{} + blockWindow, err := tc.DAGTraversalManager().BlockWindow(stagingArea, blockHash, consensusConfig.DifficultyAdjustmentWindowSize) + if err != nil { + t.Fatalf("BlockWindow: %+v", err) + } + for _, windowBlockHash := range blockWindow { + unprunedBlockHashesBelowPruningPoint[*windowBlockHash] = struct{}{} + } + } + + for _, jsonBlock := range test.Blocks { + id := jsonBlock.ID + blockHash := blockIDToHash[id] + + isPruningPointAncestorOfBlock, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, pruningPoint, blockHash) + if err != nil { + t.Fatalf("IsAncestorOf: %+v", err) + } + + expectsBlock := true + if !isPruningPointAncestorOfBlock { + isBlockAncestorOfPruningPoint, err := tc.DAGTopologyManager().IsAncestorOf(stagingArea, blockHash, pruningPoint) + if err != nil { + t.Fatalf("IsAncestorOf: %+v", err) + } + + if isBlockAncestorOfPruningPoint { + if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok { + expectsBlock = false + } + } else { + virtualInfo, err := tc.GetVirtualInfo() + if err != nil { + t.Fatalf("GetVirtualInfo: %+v", err) + } + + isInPastOfVirtual := false + for _, virtualParent := range virtualInfo.ParentHashes { + isAncestorOfVirtualParent, err := tc.DAGTopologyManager().IsAncestorOf( + stagingArea, blockHash, virtualParent) + if err != nil { + t.Fatalf("IsAncestorOf: %+v", err) + } + + if isAncestorOfVirtualParent { + isInPastOfVirtual = true + break + } + } + + if !isInPastOfVirtual { + if _, ok := unprunedBlockHashesBelowPruningPoint[*blockHash]; !ok { + expectsBlock = false + } + } + } + + } + + hasBlock, err := tc.BlockStore().HasBlock(tc.DatabaseContext(), stagingArea, blockHash) + if err != nil { + t.Fatalf("HasBlock: %+v", err) + } + + if expectsBlock != hasBlock { + t.Fatalf("expected hasBlock to be %t for block %s but got %t", expectsBlock, id, hasBlock) + } + } + + return nil + }) + if err != nil { + t.Fatalf("Walk: %+v", err) + } + }) +} diff --git a/domain/consensus/processes/pruningmanager/pruningmanager.go b/domain/consensus/processes/pruningmanager/pruningmanager.go new file mode 100644 index 0000000..2e93069 --- /dev/null +++ b/domain/consensus/processes/pruningmanager/pruningmanager.go @@ -0,0 +1,1265 @@ +package pruningmanager + +import ( + "sort" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/multiset" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/consensus/utils/virtual" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +// pruningManager resolves and manages the current pruning point +type pruningManager struct { + databaseContext model.DBManager + + dagTraversalManager model.DAGTraversalManager + dagTopologyManager model.DAGTopologyManager + consensusStateManager model.ConsensusStateManager + finalityManager model.FinalityManager + + consensusStateStore model.ConsensusStateStore + ghostdagDataStore model.GHOSTDAGDataStore + pruningStore model.PruningStore + blockStatusStore model.BlockStatusStore + headerSelectedTipStore model.HeaderSelectedTipStore + blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore + multiSetStore model.MultisetStore + acceptanceDataStore model.AcceptanceDataStore + blocksStore model.BlockStore + blockHeaderStore model.BlockHeaderStore + utxoDiffStore model.UTXODiffStore + daaBlocksStore model.DAABlocksStore + reachabilityDataStore model.ReachabilityDataStore + + isArchivalNode bool + genesisHash *externalapi.DomainHash + finalityInterval uint64 + pruningDepth uint64 + shouldSanityCheckPruningUTXOSet bool + k externalapi.KType + difficultyAdjustmentWindowSize int + + cachedPruningPoint *externalapi.DomainHash + cachedPruningPointAnticone []*externalapi.DomainHash +} + +// New instantiates a new PruningManager +func New( + databaseContext model.DBManager, + + dagTraversalManager model.DAGTraversalManager, + dagTopologyManager model.DAGTopologyManager, + consensusStateManager model.ConsensusStateManager, + finalityManager model.FinalityManager, + + consensusStateStore model.ConsensusStateStore, + ghostdagDataStore model.GHOSTDAGDataStore, + pruningStore model.PruningStore, + blockStatusStore model.BlockStatusStore, + headerSelectedTipStore model.HeaderSelectedTipStore, + multiSetStore model.MultisetStore, + acceptanceDataStore model.AcceptanceDataStore, + blocksStore model.BlockStore, + blockHeaderStore model.BlockHeaderStore, + utxoDiffStore model.UTXODiffStore, + daaBlocksStore model.DAABlocksStore, + reachabilityDataStore model.ReachabilityDataStore, + blocksWithTrustedDataDAAWindowStore model.BlocksWithTrustedDataDAAWindowStore, + + isArchivalNode bool, + genesisHash *externalapi.DomainHash, + finalityInterval uint64, + pruningDepth uint64, + shouldSanityCheckPruningUTXOSet bool, + k externalapi.KType, + difficultyAdjustmentWindowSize int, +) model.PruningManager { + + return &pruningManager{ + databaseContext: databaseContext, + dagTraversalManager: dagTraversalManager, + dagTopologyManager: dagTopologyManager, + consensusStateManager: consensusStateManager, + finalityManager: finalityManager, + + consensusStateStore: consensusStateStore, + ghostdagDataStore: ghostdagDataStore, + pruningStore: pruningStore, + blockStatusStore: blockStatusStore, + multiSetStore: multiSetStore, + acceptanceDataStore: acceptanceDataStore, + blocksStore: blocksStore, + blockHeaderStore: blockHeaderStore, + utxoDiffStore: utxoDiffStore, + headerSelectedTipStore: headerSelectedTipStore, + daaBlocksStore: daaBlocksStore, + reachabilityDataStore: reachabilityDataStore, + blocksWithTrustedDataDAAWindowStore: blocksWithTrustedDataDAAWindowStore, + + isArchivalNode: isArchivalNode, + genesisHash: genesisHash, + pruningDepth: pruningDepth, + finalityInterval: finalityInterval, + shouldSanityCheckPruningUTXOSet: shouldSanityCheckPruningUTXOSet, + k: k, + difficultyAdjustmentWindowSize: difficultyAdjustmentWindowSize, + } +} + +func (pm *pruningManager) UpdatePruningPointByVirtual(stagingArea *model.StagingArea) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.UpdatePruningPointByVirtual") + defer onEnd() + hasPruningPoint, err := pm.pruningStore.HasPruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return err + } + + if !hasPruningPoint { + hasGenesis, err := pm.blocksStore.HasBlock(pm.databaseContext, stagingArea, pm.genesisHash) + if err != nil { + return err + } + + if hasGenesis { + err = pm.savePruningPoint(stagingArea, pm.genesisHash) + if err != nil { + return err + } + } + + // Pruning point should initially set manually on a pruned-headers node. + return nil + } + + virtualGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, model.VirtualBlockHash, false) + if err != nil { + return err + } + + if virtualGHOSTDAGData.SelectedParent().Equal(pm.genesisHash) { + return nil + } + + newPruningPoint, newCandidate, err := pm.nextPruningPointAndCandidateByBlockHash(stagingArea, virtualGHOSTDAGData.SelectedParent(), nil) + if err != nil { + return err + } + + currentCandidate, err := pm.pruningPointCandidate(stagingArea) + if err != nil { + return err + } + + if !newCandidate.Equal(currentCandidate) { + log.Debugf("Staged a new pruning candidate, old: %s, new: %s", currentCandidate, newCandidate) + pm.pruningStore.StagePruningPointCandidate(stagingArea, newCandidate) + } + + currentPruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return err + } + + if !newPruningPoint.Equal(currentPruningPoint) { + currentPruningPointGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningPoint, false) + if err != nil { + return err + } + + newPruningPointGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, newPruningPoint, false) + if err != nil { + return err + } + + if pm.finalityScore(newPruningPointGHOSTDAGData.BlueScore()) > pm.finalityScore(currentPruningPointGHOSTDAGData.BlueScore())+1 { + return errors.Errorf("cannot advance pruning point by more than one finality interval at once") + } + + log.Debugf("Moving pruning point from %s to %s", currentPruningPoint, newPruningPoint) + err = pm.savePruningPoint(stagingArea, newPruningPoint) + if err != nil { + return err + } + } + + return nil +} + +type blockIteratorFromOneBlock struct { + done, isClosed bool + hash *externalapi.DomainHash +} + +func (b *blockIteratorFromOneBlock) First() bool { + if b.isClosed { + panic("Tried using a closed blockIteratorFromOneBlock") + } + + b.done = false + return true +} + +func (b *blockIteratorFromOneBlock) Next() bool { + if b.isClosed { + panic("Tried using a closed blockIteratorFromOneBlock") + } + + b.done = true + return false +} + +func (b *blockIteratorFromOneBlock) Get() (*externalapi.DomainHash, error) { + if b.isClosed { + panic("Tried using a closed blockIteratorFromOneBlock") + } + + return b.hash, nil +} + +func (b *blockIteratorFromOneBlock) Close() error { + if b.isClosed { + panic("Tried using a closed blockIteratorFromOneBlock") + } + + b.isClosed = true + return nil +} + +func (pm *pruningManager) nextPruningPointAndCandidateByBlockHash(stagingArea *model.StagingArea, + blockHash, suggestedLowHash *externalapi.DomainHash) (*externalapi.DomainHash, *externalapi.DomainHash, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.nextPruningPointAndCandidateByBlockHash") + defer onEnd() + + currentCandidate, err := pm.pruningPointCandidate(stagingArea) + if err != nil { + return nil, nil, err + } + + lowHash := currentCandidate + if suggestedLowHash != nil { + isSuggestedLowHashInSelectedParentChainOfCurrentCandidate, err := pm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, suggestedLowHash, currentCandidate) + if err != nil { + return nil, nil, err + } + + if !isSuggestedLowHashInSelectedParentChainOfCurrentCandidate { + isCurrentCandidateInSelectedParentChainOfSuggestedLowHash, err := pm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, currentCandidate, suggestedLowHash) + if err != nil { + return nil, nil, err + } + + if !isCurrentCandidateInSelectedParentChainOfSuggestedLowHash { + panic(errors.Errorf("suggested low hash %s is not on the same selected chain as the pruning candidate %s", suggestedLowHash, currentCandidate)) + } + lowHash = suggestedLowHash + } + } + + currentPruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return nil, nil, err + } + + ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, nil, err + } + + currentPruningPointGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningPoint, false) + if err != nil { + return nil, nil, err + } + + // We iterate until the selected parent of the given block, in order to allow a situation where the given block hash + // belongs to the virtual. This shouldn't change anything since the max blue score difference between a block and its + // selected parent is K, and K << pm.pruningDepth. + var iterator model.BlockIterator + if blockHash.Equal(lowHash) { + iterator = &blockIteratorFromOneBlock{hash: lowHash} + } else { + iterator, err = pm.dagTraversalManager.SelectedChildIterator(stagingArea, ghostdagData.SelectedParent(), lowHash, true) + if err != nil { + return nil, nil, err + } + } + defer iterator.Close() + + // Finding the next pruning point candidate: look for the latest + // selected child of the current candidate that is in depth of at + // least pm.pruningDepth blocks from the virtual selected parent. + // + // Note: Sometimes the current candidate is less than pm.pruningDepth + // from the virtual. This can happen only if the virtual blue score + // got smaller, because virtual blue score is not guaranteed to always + // increase (because sometimes a block with higher blue work can have + // lower blue score). + // In such cases we still keep the same candidate because it's guaranteed + // that a block that was once in depth of pm.pruningDepth cannot be + // reorged without causing a finality conflict first. + newCandidate := currentCandidate + + newPruningPoint := currentPruningPoint + newPruningPointGHOSTDAGData := currentPruningPointGHOSTDAGData + for ok := iterator.First(); ok; ok = iterator.Next() { + selectedChild, err := iterator.Get() + if err != nil { + return nil, nil, err + } + selectedChildGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, selectedChild, false) + if err != nil { + return nil, nil, err + } + + if ghostdagData.BlueScore()-selectedChildGHOSTDAGData.BlueScore() < pm.pruningDepth { + break + } + + newCandidate = selectedChild + newCandidateGHOSTDAGData := selectedChildGHOSTDAGData + + // We move the pruning point every time the candidate's finality score is + // bigger than the current pruning point finality score. + if pm.finalityScore(newCandidateGHOSTDAGData.BlueScore()) > pm.finalityScore(newPruningPointGHOSTDAGData.BlueScore()) { + newPruningPoint = newCandidate + newPruningPointGHOSTDAGData = newCandidateGHOSTDAGData + } + } + + return newPruningPoint, newCandidate, nil +} + +func (pm *pruningManager) isInPruningFutureOrInVirtualPast(stagingArea *model.StagingArea, block *externalapi.DomainHash, + pruningPoint *externalapi.DomainHash, virtualParents []*externalapi.DomainHash) (bool, error) { + + hasPruningPointInPast, err := pm.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, block) + if err != nil { + return false, err + } + if hasPruningPointInPast { + return true, nil + } + // Because virtual doesn't have reachability data, we need to check reachability + // using it parents. + isInVirtualPast, err := pm.dagTopologyManager.IsAncestorOfAny(stagingArea, block, virtualParents) + if err != nil { + return false, err + } + if isInVirtualPast { + return true, nil + } + + return false, nil +} + +func (pm *pruningManager) deletePastBlocks(stagingArea *model.StagingArea, pruningPoint *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.deletePastBlocks") + defer onEnd() + + // Go over all pruningPoint.Past and pruningPoint.Anticone that's not in virtual.Past + queue := pm.dagTraversalManager.NewDownHeap(stagingArea) + virtualParents, err := pm.dagTopologyManager.Parents(stagingArea, model.VirtualBlockHash) + if err != nil { + return err + } + + // Start queue with all tips that are below the pruning point (and on the way remove them from list of tips) + prunedTips, err := pm.pruneTips(stagingArea, pruningPoint, virtualParents) + if err != nil { + return err + } + err = queue.PushSlice(prunedTips) + if err != nil { + return err + } + + // Add pruningPoint.Parents to queue + parents, err := pm.dagTopologyManager.Parents(stagingArea, pruningPoint) + if err != nil { + return err + } + + if !virtual.ContainsOnlyVirtualGenesis(parents) { + err = queue.PushSlice(parents) + if err != nil { + return err + } + } + + blocksToKeep, err := pm.calculateBlocksToKeep(stagingArea, pruningPoint) + if err != nil { + return err + } + err = pm.deleteBlocksDownward(stagingArea, queue, blocksToKeep) + if err != nil { + return err + } + + return nil +} + +func (pm *pruningManager) calculateBlocksToKeep(stagingArea *model.StagingArea, + pruningPoint *externalapi.DomainHash) (map[externalapi.DomainHash]struct{}, error) { + + pruningPointAnticone, err := pm.dagTraversalManager.AnticoneFromVirtualPOV(stagingArea, pruningPoint) + if err != nil { + return nil, err + } + pruningPointAndItsAnticone := append(pruningPointAnticone, pruningPoint) + blocksToKeep := make(map[externalapi.DomainHash]struct{}) + for _, blockHash := range pruningPointAndItsAnticone { + blocksToKeep[*blockHash] = struct{}{} + blockWindow, err := pm.dagTraversalManager.BlockWindow(stagingArea, blockHash, pm.difficultyAdjustmentWindowSize) + if err != nil { + return nil, err + } + for _, windowBlockHash := range blockWindow { + blocksToKeep[*windowBlockHash] = struct{}{} + } + } + return blocksToKeep, nil +} + +func (pm *pruningManager) deleteBlocksDownward(stagingArea *model.StagingArea, + queue model.BlockHeap, blocksToKeep map[externalapi.DomainHash]struct{}) error { + + visited := map[externalapi.DomainHash]struct{}{} + // Prune everything in the queue including its past, unless it's in `blocksToKeep` + for queue.Len() > 0 { + current := queue.Pop() + if _, ok := visited[*current]; ok { + continue + } + visited[*current] = struct{}{} + + shouldAddParents := true + if _, ok := blocksToKeep[*current]; !ok { + alreadyPruned, err := pm.deleteBlock(stagingArea, current) + if err != nil { + return err + } + shouldAddParents = !alreadyPruned + } + + if shouldAddParents { + parents, err := pm.dagTopologyManager.Parents(stagingArea, current) + if err != nil { + return err + } + + if !virtual.ContainsOnlyVirtualGenesis(parents) { + err = queue.PushSlice(parents) + if err != nil { + return err + } + } + } + } + return nil +} + +func (pm *pruningManager) pruneTips(stagingArea *model.StagingArea, pruningPoint *externalapi.DomainHash, + virtualParents []*externalapi.DomainHash) (prunedTips []*externalapi.DomainHash, err error) { + + // Find P.AC that's not in V.Past + dagTips, err := pm.consensusStateStore.Tips(stagingArea, pm.databaseContext) + if err != nil { + return nil, err + } + newTips := make([]*externalapi.DomainHash, 0, len(dagTips)) + for _, tip := range dagTips { + isInPruningFutureOrInVirtualPast, err := + pm.isInPruningFutureOrInVirtualPast(stagingArea, tip, pruningPoint, virtualParents) + if err != nil { + return nil, err + } + if !isInPruningFutureOrInVirtualPast { + prunedTips = append(prunedTips, tip) + } else { + newTips = append(newTips, tip) + } + } + pm.consensusStateStore.StageTips(stagingArea, newTips) + + return prunedTips, nil +} + +func (pm *pruningManager) savePruningPoint(stagingArea *model.StagingArea, pruningPointHash *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.savePruningPoint") + defer onEnd() + err := pm.pruningStore.StagePruningPoint(pm.databaseContext, stagingArea, pruningPointHash) + if err != nil { + return err + } + pm.pruningStore.StageStartUpdatingPruningPointUTXOSet(stagingArea) + + return nil +} + +func (pm *pruningManager) deleteBlock(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + alreadyPruned bool, err error) { + + status, err := pm.blockStatusStore.Get(pm.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + if status == externalapi.StatusHeaderOnly { + return true, nil + } + + pm.blockStatusStore.Stage(stagingArea, blockHash, externalapi.StatusHeaderOnly) + if pm.isArchivalNode { + return false, nil + } + + pm.multiSetStore.Delete(stagingArea, blockHash) + pm.acceptanceDataStore.Delete(stagingArea, blockHash) + pm.blocksStore.Delete(stagingArea, blockHash) + pm.utxoDiffStore.Delete(stagingArea, blockHash) + pm.daaBlocksStore.Delete(stagingArea, blockHash) + + return false, nil +} + +func (pm *pruningManager) IsValidPruningPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + if *pm.genesisHash == *blockHash { + return true, nil + } + + headersSelectedTip, err := pm.headerSelectedTipStore.HeadersSelectedTip(pm.databaseContext, stagingArea) + if err != nil { + return false, err + } + + // A pruning point has to be in the selected chain of the headers selected tip. + headersSelectedTipGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, headersSelectedTip, false) + if err != nil { + return false, err + } + + isInSelectedParentChainOfHeadersSelectedTip, err := + pm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, blockHash, headersSelectedTip) + if err != nil { + return false, err + } + + if !isInSelectedParentChainOfHeadersSelectedTip { + return false, nil + } + + ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return false, err + } + + // A pruning point has to be at depth of at least pm.pruningDepth + if headersSelectedTipGHOSTDAGData.BlueScore()-ghostdagData.BlueScore() < pm.pruningDepth { + return false, nil + } + + return true, nil +} + +func (pm *pruningManager) ArePruningPointsViolatingFinality(stagingArea *model.StagingArea, + pruningPoints []externalapi.BlockHeader) (bool, error) { + + virtualFinalityPoint, err := pm.finalityManager.VirtualFinalityPoint(stagingArea) + if err != nil { + return false, err + } + + virtualFinalityPointFinalityPoint, err := pm.finalityManager.FinalityPoint(stagingArea, virtualFinalityPoint, false) + if err != nil { + return false, err + } + + // We need to check if virtualFinalityPointFinalityPoint is in the selected chain of + // the most recent known pruning point, so we iterate the pruning points from the most + // recent one until we find a known pruning point. + for i := len(pruningPoints) - 1; i >= 0; i-- { + blockHash := consensushashing.HeaderHash(pruningPoints[i]) + exists, err := pm.blockStatusStore.Exists(pm.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + if !exists { + continue + } + + isInSelectedParentChainOfVirtualFinalityPointFinalityPoint, err := pm.dagTopologyManager. + IsInSelectedParentChainOf(stagingArea, virtualFinalityPointFinalityPoint, blockHash) + if err != nil { + return false, err + } + + return !isInSelectedParentChainOfVirtualFinalityPointFinalityPoint, nil + } + + // If no pruning point is known, there's definitely a finality violation + return true, nil +} + +func (pm *pruningManager) ArePruningPointsInValidChain(stagingArea *model.StagingArea) (bool, error) { + lastPruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return false, err + } + + expectedPruningPoints := make([]*externalapi.DomainHash, 0) + headersSelectedTip, err := pm.headerSelectedTipStore.HeadersSelectedTip(pm.databaseContext, stagingArea) + if err != nil { + return false, err + } + + current := headersSelectedTip + for !current.Equal(lastPruningPoint) { + header, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, current) + if err != nil { + return false, err + } + + if len(expectedPruningPoints) == 0 || + !expectedPruningPoints[len(expectedPruningPoints)-1].Equal(header.PruningPoint()) { + + expectedPruningPoints = append(expectedPruningPoints, header.PruningPoint()) + } + + currentGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, current, false) + if err != nil { + return false, err + } + + current = currentGHOSTDAGData.SelectedParent() + } + + lastPruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea) + if err != nil { + return false, err + } + + for i := lastPruningPointIndex; ; i-- { + pruningPoint, err := pm.pruningStore.PruningPointByIndex(pm.databaseContext, stagingArea, i) + if err != nil { + return false, err + } + + header, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPoint) + if err != nil { + return false, err + } + + var expectedPruningPoint *externalapi.DomainHash + expectedPruningPoint, expectedPruningPoints = expectedPruningPoints[0], expectedPruningPoints[1:] + if !pruningPoint.Equal(expectedPruningPoint) { + return false, nil + } + + if i == 0 { + if len(expectedPruningPoints) != 0 { + return false, nil + } + if !pruningPoint.Equal(pm.genesisHash) { + return false, nil + } + break + } + + if !expectedPruningPoints[len(expectedPruningPoints)-1].Equal(header.PruningPoint()) { + expectedPruningPoints = append(expectedPruningPoints, header.PruningPoint()) + } + } + + return true, nil +} + +func (pm *pruningManager) pruningPointCandidate(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + hasPruningPointCandidate, err := pm.pruningStore.HasPruningPointCandidate(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if !hasPruningPointCandidate { + return pm.genesisHash, nil + } + + return pm.pruningStore.PruningPointCandidate(pm.databaseContext, stagingArea) +} + +// validateUTXOSetFitsCommitment makes sure that the calculated UTXOSet of the new pruning point fits the commitment. +// This is a sanity test, to make sure that spectred doesn't store, and subsequently sends syncing peers the wrong UTXOSet. +func (pm *pruningManager) validateUTXOSetFitsCommitment(stagingArea *model.StagingArea, pruningPointHash *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.validateUTXOSetFitsCommitment") + defer onEnd() + + utxoSetIterator, err := pm.pruningStore.PruningPointUTXOIterator(pm.databaseContext) + if err != nil { + return err + } + defer utxoSetIterator.Close() + + utxoSetMultiset := multiset.New() + for ok := utxoSetIterator.First(); ok; ok = utxoSetIterator.Next() { + outpoint, entry, err := utxoSetIterator.Get() + if err != nil { + return err + } + serializedUTXO, err := utxo.SerializeUTXO(entry, outpoint) + if err != nil { + return err + } + utxoSetMultiset.Add(serializedUTXO) + } + utxoSetHash := utxoSetMultiset.Hash() + + header, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointHash) + if err != nil { + return err + } + expectedUTXOCommitment := header.UTXOCommitment() + + if !expectedUTXOCommitment.Equal(utxoSetHash) { + return errors.Errorf("Calculated UTXOSet for next pruning point %s doesn't match it's UTXO commitment\n"+ + "Calculated UTXOSet hash: %s. Commitment: %s", + pruningPointHash, utxoSetHash, expectedUTXOCommitment) + } + + log.Debugf("Validated the pruning point %s UTXO commitment: %s", pruningPointHash, utxoSetHash) + + return nil +} + +// This function takes 2 points (currentPruningHash, previousPruningHash) and traverses the UTXO diff children DAG +// until it finds a common descendant, at the worse case this descendant will be the current SelectedTip. +// it then creates 2 diffs, one from that descendant to previousPruningHash and another from that descendant to currentPruningHash +// then using `DiffFrom` it converts these 2 diffs to a single diff from previousPruningHash to currentPruningHash. +// this way should be the fastest way to get the difference between the 2 points, and should perform much better than restoring the full UTXO set. +func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPoints(stagingArea *model.StagingArea, currentPruningHash *externalapi.DomainHash) (externalapi.UTXODiff, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.calculateDiffBetweenPreviousAndCurrentPruningPoints") + defer onEnd() + if currentPruningHash.Equal(pm.genesisHash) { + iter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(stagingArea, currentPruningHash) + if err != nil { + return nil, err + } + set := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry) + for ok := iter.First(); ok; ok = iter.Next() { + outpoint, entry, err := iter.Get() + if err != nil { + return nil, err + } + set[*outpoint] = entry + } + return utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(set), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry))) + } + + pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if pruningPointIndex == 0 { + return nil, errors.Errorf("previous pruning point doesn't exist") + } + + previousPruningHash, err := pm.pruningStore.PruningPointByIndex(pm.databaseContext, stagingArea, pruningPointIndex-1) + if err != nil { + return nil, err + } + currentPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningHash, false) + if err != nil { + return nil, err + } + previousPruningGhostDAG, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningHash, false) + if err != nil { + return nil, err + } + + currentPruningCurrentDiffChild := currentPruningHash + previousPruningCurrentDiffChild := previousPruningHash + // We need to use BlueWork because it's the only thing that's monotonic in the whole DAG + // We use the BlueWork to know which point is currently lower on the DAG so we can keep climbing its children, + // that way we keep climbing on the lowest point until they both reach the exact same descendant + currentPruningCurrentDiffChildBlueWork := currentPruningGhostDAG.BlueWork() + previousPruningCurrentDiffChildBlueWork := previousPruningGhostDAG.BlueWork() + + var diffHashesFromPrevious []*externalapi.DomainHash + var diffHashesFromCurrent []*externalapi.DomainHash + for { + // if currentPruningCurrentDiffChildBlueWork > previousPruningCurrentDiffChildBlueWork + if currentPruningCurrentDiffChildBlueWork.Cmp(previousPruningCurrentDiffChildBlueWork) == 1 { + diffHashesFromPrevious = append(diffHashesFromPrevious, previousPruningCurrentDiffChild) + previousPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild) + if err != nil { + return nil, err + } + diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, previousPruningCurrentDiffChild, false) + if err != nil { + return nil, err + } + previousPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork() + } else if currentPruningCurrentDiffChild.Equal(previousPruningCurrentDiffChild) { + break + } else { + diffHashesFromCurrent = append(diffHashesFromCurrent, currentPruningCurrentDiffChild) + currentPruningCurrentDiffChild, err = pm.utxoDiffStore.UTXODiffChild(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild) + if err != nil { + return nil, err + } + diffChildGhostDag, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, currentPruningCurrentDiffChild, false) + if err != nil { + return nil, err + } + currentPruningCurrentDiffChildBlueWork = diffChildGhostDag.BlueWork() + } + } + // The order in which we apply the diffs should be from top to bottom, but we traversed from bottom to top + // so we apply the diffs in reverse order. + oldDiff := utxo.NewMutableUTXODiff() + for i := len(diffHashesFromPrevious) - 1; i >= 0; i-- { + utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromPrevious[i]) + if err != nil { + return nil, err + } + err = oldDiff.WithDiffInPlace(utxoDiff) + if err != nil { + return nil, err + } + } + newDiff := utxo.NewMutableUTXODiff() + for i := len(diffHashesFromCurrent) - 1; i >= 0; i-- { + utxoDiff, err := pm.utxoDiffStore.UTXODiff(pm.databaseContext, stagingArea, diffHashesFromCurrent[i]) + if err != nil { + return nil, err + } + err = newDiff.WithDiffInPlace(utxoDiff) + if err != nil { + return nil, err + } + } + return oldDiff.DiffFrom(newDiff.ToImmutable()) +} + +// This function takes 2 chain blocks (currentPruningHash, previousPruningHash) and finds +// the UTXO diff between them by iterating over acceptance data of the chain blocks in between. +func (pm *pruningManager) calculateDiffBetweenPreviousAndCurrentPruningPointsUsingAcceptanceData(stagingArea *model.StagingArea, currentPruningHash *externalapi.DomainHash) (externalapi.UTXODiff, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "pruningManager.calculateDiffBetweenPreviousAndCurrentPruningPoints__UsingAcceptanceData") + defer onEnd() + if currentPruningHash.Equal(pm.genesisHash) { + iter, err := pm.consensusStateManager.RestorePastUTXOSetIterator(stagingArea, currentPruningHash) + if err != nil { + return nil, err + } + set := make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry) + for ok := iter.First(); ok; ok = iter.Next() { + outpoint, entry, err := iter.Get() + if err != nil { + return nil, err + } + set[*outpoint] = entry + } + return utxo.NewUTXODiffFromCollections(utxo.NewUTXOCollection(set), utxo.NewUTXOCollection(make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry))) + } + + pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if pruningPointIndex == 0 { + return nil, errors.Errorf("previous pruning point doesn't exist") + } + + previousPruningHash, err := pm.pruningStore.PruningPointByIndex(pm.databaseContext, stagingArea, pruningPointIndex-1) + if err != nil { + return nil, err + } + + utxoDiff := utxo.NewMutableUTXODiff() + + iterator, err := pm.dagTraversalManager.SelectedChildIterator(stagingArea, currentPruningHash, previousPruningHash, false) + if err != nil { + return nil, err + } + defer iterator.Close() + + for ok := iterator.First(); ok; ok = iterator.Next() { + child, err := iterator.Get() + if err != nil { + return nil, err + } + chainBlockAcceptanceData, err := pm.acceptanceDataStore.Get(pm.databaseContext, stagingArea, child) + if err != nil { + return nil, err + } + chainBlockHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, child) + if err != nil { + return nil, err + } + for _, blockAcceptanceData := range chainBlockAcceptanceData { + for _, transactionAcceptanceData := range blockAcceptanceData.TransactionAcceptanceData { + if transactionAcceptanceData.IsAccepted { + err = utxoDiff.AddTransaction(transactionAcceptanceData.Transaction, chainBlockHeader.DAAScore()) + if err != nil { + return nil, err + } + } + } + } + } + + return utxoDiff.ToImmutable(), err +} + +// finalityScore is the number of finality intervals passed since +// the given block. +func (pm *pruningManager) finalityScore(blueScore uint64) uint64 { + return blueScore / pm.finalityInterval +} + +func (pm *pruningManager) ClearImportedPruningPointData() error { + err := pm.pruningStore.ClearImportedPruningPointMultiset(pm.databaseContext) + if err != nil { + return err + } + return pm.pruningStore.ClearImportedPruningPointUTXOs(pm.databaseContext) +} + +func (pm *pruningManager) AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs []*externalapi.OutpointAndUTXOEntryPair) error { + dbTx, err := pm.databaseContext.Begin() + if err != nil { + return err + } + defer dbTx.RollbackUnlessClosed() + + importedMultiset, err := pm.pruningStore.ImportedPruningPointMultiset(dbTx) + if err != nil { + if !database.IsNotFoundError(err) { + return err + } + importedMultiset = multiset.New() + } + for _, outpointAndUTXOEntryPair := range outpointAndUTXOEntryPairs { + serializedUTXO, err := utxo.SerializeUTXO(outpointAndUTXOEntryPair.UTXOEntry, outpointAndUTXOEntryPair.Outpoint) + if err != nil { + return err + } + importedMultiset.Add(serializedUTXO) + } + err = pm.pruningStore.UpdateImportedPruningPointMultiset(dbTx, importedMultiset) + if err != nil { + return err + } + + err = pm.pruningStore.AppendImportedPruningPointUTXOs(dbTx, outpointAndUTXOEntryPairs) + if err != nil { + return err + } + + return dbTx.Commit() +} + +func (pm *pruningManager) UpdatePruningPointIfRequired() error { + hadStartedUpdatingPruningPointUTXOSet, err := pm.pruningStore.HadStartedUpdatingPruningPointUTXOSet(pm.databaseContext) + if err != nil { + return err + } + if !hadStartedUpdatingPruningPointUTXOSet { + return nil + } + + log.Debugf("Pruning point UTXO set update is required") + err = pm.updatePruningPoint() + if err != nil { + return err + } + log.Debugf("Pruning point UTXO set updated") + + return nil +} + +func (pm *pruningManager) updatePruningPoint() error { + onEnd := logger.LogAndMeasureExecutionTime(log, "updatePruningPoint") + defer onEnd() + + logger.LogMemoryStats(log, "updatePruningPoint start") + defer logger.LogMemoryStats(log, "updatePruningPoint end") + + stagingArea := model.NewStagingArea() + log.Debugf("Getting the pruning point") + pruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return err + } + + log.Debugf("Restoring the pruning point UTXO set") + utxoSetDiff, err := pm.calculateDiffBetweenPreviousAndCurrentPruningPoints(stagingArea, pruningPoint) + if err != nil { + log.Infof("Calculating pruning points diff through utxo diff children failed %s. Falling back to calculation "+ + "through acceptance data", err) + + utxoSetDiff, err = pm.calculateDiffBetweenPreviousAndCurrentPruningPointsUsingAcceptanceData(stagingArea, pruningPoint) + if err != nil { + return err + } + } + log.Debugf("Updating the pruning point UTXO set") + err = pm.pruningStore.UpdatePruningPointUTXOSet(pm.databaseContext, utxoSetDiff) + if err != nil { + return err + } + if pm.shouldSanityCheckPruningUTXOSet && !pruningPoint.Equal(pm.genesisHash) { + err = pm.validateUTXOSetFitsCommitment(stagingArea, pruningPoint) + if err != nil { + return err + } + } + err = pm.deletePastBlocks(stagingArea, pruningPoint) + if err != nil { + return err + } + + err = staging.CommitAllChanges(pm.databaseContext, stagingArea) + if err != nil { + return err + } + + log.Debugf("Finishing updating the pruning point UTXO set") + return pm.pruningStore.FinishUpdatingPruningPointUTXOSet(pm.databaseContext) +} + +func (pm *pruningManager) PruneAllBlocksBelow(stagingArea *model.StagingArea, pruningPointHash *externalapi.DomainHash) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "PruneAllBlocksBelow") + defer onEnd() + + iterator, err := pm.blocksStore.AllBlockHashesIterator(pm.databaseContext) + if err != nil { + return err + } + defer iterator.Close() + + for ok := iterator.First(); ok; ok = iterator.Next() { + blockHash, err := iterator.Get() + if err != nil { + return err + } + isInPastOfPruningPoint, err := pm.dagTopologyManager.IsAncestorOf(stagingArea, pruningPointHash, blockHash) + if err != nil { + return err + } + if !isInPastOfPruningPoint { + continue + } + _, err = pm.deleteBlock(stagingArea, blockHash) + if err != nil { + return err + } + } + + return nil +} + +func (pm *pruningManager) PruningPointAndItsAnticone() ([]*externalapi.DomainHash, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "PruningPointAndItsAnticone") + defer onEnd() + + stagingArea := model.NewStagingArea() + pruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + // By the Prunality proof, the pruning point anticone is a closed set (i.e., guaranteed not to change) , + // so we can safely cache it. + if pm.cachedPruningPoint != nil && pm.cachedPruningPoint.Equal(pruningPoint) { + return append([]*externalapi.DomainHash{pruningPoint}, pm.cachedPruningPointAnticone...), nil + } + + pruningPointAnticone, err := pm.dagTraversalManager.AnticoneFromVirtualPOV(stagingArea, pruningPoint) + if err != nil { + return nil, err + } + + // Sorting the blocks in topological order + var sortErr error + sort.Slice(pruningPointAnticone, func(i, j int) bool { + headerI, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[i]) + if err != nil { + sortErr = err + return false + } + + headerJ, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPointAnticone[j]) + if err != nil { + sortErr = err + return false + } + + return headerI.BlueWork().Cmp(headerJ.BlueWork()) < 0 + }) + if sortErr != nil { + return nil, sortErr + } + + pm.cachedPruningPoint = pruningPoint + pm.cachedPruningPointAnticone = pruningPointAnticone + + // The pruning point should always come first + return append([]*externalapi.DomainHash{pruningPoint}, pruningPointAnticone...), nil +} + +func (pm *pruningManager) ExpectedHeaderPruningPoint(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return nil, err + } + + if ghostdagData.SelectedParent().Equal(pm.genesisHash) { + return pm.genesisHash, nil + } + + selectedParentHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, ghostdagData.SelectedParent()) + if err != nil { + return nil, err + } + + selectedParentPruningPointHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint()) + if err != nil { + return nil, err + } + + nextOrCurrentPruningPoint := selectedParentHeader.PruningPoint() + pruningPoint, err := pm.pruningStore.PruningPoint(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + // If the block doesn't have the pruning in its selected chain we know for sure that it can't trigger a pruning point + // change (we check the selected parent to take care of the case where the block is the virtual which doesn't have reachability data). + hasPruningPointInItsSelectedChain, err := pm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, pruningPoint, ghostdagData.SelectedParent()) + if err != nil { + return nil, err + } + + // Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of pm.pruningDepth and + // its finality score is greater than the previous pruning point. This is why the diff between finalityScore(selectedParent.blueScore + 1) * finalityInterval + // and the current block blue score is less than pm.pruningDepth we can know for sure that this block didn't trigger a pruning point change. + minRequiredBlueScoreForNextPruningPoint := (pm.finalityScore(selectedParentPruningPointHeader.BlueScore()) + 1) * pm.finalityInterval + + if hasPruningPointInItsSelectedChain && + minRequiredBlueScoreForNextPruningPoint+pm.pruningDepth <= ghostdagData.BlueScore() { + var suggestedLowHash *externalapi.DomainHash + hasReachabilityData, err := pm.reachabilityDataStore.HasReachabilityData(pm.databaseContext, stagingArea, selectedParentHeader.PruningPoint()) + if err != nil { + return nil, err + } + + if hasReachabilityData { + // nextPruningPointAndCandidateByBlockHash needs suggestedLowHash to be in the future of the pruning point because + // otherwise reachability selected chain data is unreliable. + isInFutureOfCurrentPruningPoint, err := pm.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, selectedParentHeader.PruningPoint()) + if err != nil { + return nil, err + } + if isInFutureOfCurrentPruningPoint { + suggestedLowHash = selectedParentHeader.PruningPoint() + } + } + + nextOrCurrentPruningPoint, _, err = pm.nextPruningPointAndCandidateByBlockHash(stagingArea, blockHash, suggestedLowHash) + if err != nil { + return nil, err + } + } + + isHeaderPruningPoint, err := pm.isPruningPointInPruningDepth(stagingArea, blockHash, nextOrCurrentPruningPoint) + if err != nil { + return nil, err + } + + if isHeaderPruningPoint { + return nextOrCurrentPruningPoint, nil + } + + pruningPointIndex, err := pm.pruningStore.CurrentPruningPointIndex(pm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + for i := pruningPointIndex; ; i-- { + currentPruningPoint, err := pm.pruningStore.PruningPointByIndex(pm.databaseContext, stagingArea, i) + if err != nil { + return nil, err + } + + isHeaderPruningPoint, err := pm.isPruningPointInPruningDepth(stagingArea, blockHash, currentPruningPoint) + if err != nil { + return nil, err + } + + if isHeaderPruningPoint { + return currentPruningPoint, nil + } + + if i == 0 { + break + } + } + + return pm.genesisHash, nil +} + +func (pm *pruningManager) isPruningPointInPruningDepth(stagingArea *model.StagingArea, blockHash, pruningPoint *externalapi.DomainHash) (bool, error) { + pruningPointHeader, err := pm.blockHeaderStore.BlockHeader(pm.databaseContext, stagingArea, pruningPoint) + if err != nil { + return false, err + } + + blockGHOSTDAGData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return false, err + } + + return blockGHOSTDAGData.BlueScore() >= pruningPointHeader.BlueScore()+pm.pruningDepth, nil +} + +func (pm *pruningManager) TrustedBlockAssociatedGHOSTDAGDataBlockHashes(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + blockHashes := make([]*externalapi.DomainHash, 0, pm.k) + current := blockHash + isTrustedData := false + for i := externalapi.KType(0); i <= pm.k; i++ { + ghostdagData, err := pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, current, isTrustedData) + isNotFoundError := database.IsNotFoundError(err) + if !isNotFoundError && err != nil { + return nil, err + } + if isNotFoundError || ghostdagData.SelectedParent().Equal(model.VirtualGenesisBlockHash) { + isTrustedData = true + ghostdagData, err = pm.ghostdagDataStore.Get(pm.databaseContext, stagingArea, current, true) + if err != nil { + return nil, err + } + } + + blockHashes = append(blockHashes, current) + + if ghostdagData.SelectedParent().Equal(pm.genesisHash) { + break + } + + if current.Equal(pm.genesisHash) { + break + } + + current = ghostdagData.SelectedParent() + } + + return blockHashes, nil +} diff --git a/domain/consensus/processes/pruningmanager/testdata/chain-for-test-pruning.json b/domain/consensus/processes/pruningmanager/testdata/chain-for-test-pruning.json new file mode 100644 index 0000000..9fe6961 --- /dev/null +++ b/domain/consensus/processes/pruningmanager/testdata/chain-for-test-pruning.json @@ -0,0 +1,12004 @@ +{ + "mergeSetSizeLimit": 5, + "finalityDepth": 7, + "blocks": [ + { + "id": "0", + "parents": [] + }, + { + "id": "1", + "parents": [ + "0" + ] + }, + { + "id": "2", + "parents": [ + "1" + ] + }, + { + "id": "3", + "parents": [ + "2" + ] + }, + { + "id": "4", + "parents": [ + "3" + ] + }, + { + "id": "5", + "parents": [ + "4" + ] + }, + { + "id": "6", + "parents": [ + "5" + ] + }, + { + "id": "7", + "parents": [ + "6" + ] + }, + { + "id": "8", + "parents": [ + "7" + ] + }, + { + "id": "9", + "parents": [ + "8" + ] + }, + { + "id": "10", + "parents": [ + "9" + ] + }, + { + "id": "11", + "parents": [ + "10" + ] + }, + { + "id": "12", + "parents": [ + "11" + ] + }, + { + "id": "13", + "parents": [ + "12" + ] + }, + { + "id": "14", + "parents": [ + "13" + ] + }, + { + "id": "15", + "parents": [ + "14" + ] + }, + { + "id": "16", + "parents": [ + "15" + ] + }, + { + "id": "17", + "parents": [ + "16" + ] + }, + { + "id": "18", + "parents": [ + "17" + ] + }, + { + "id": "19", + "parents": [ + "18" + ] + }, + { + "id": "20", + "parents": [ + "19" + ] + }, + { + "id": "21", + "parents": [ + "20" + ] + }, + { + "id": "22", + "parents": [ + "21" + ] + }, + { + "id": "23", + "parents": [ + "22" + ] + }, + { + "id": "24", + "parents": [ + "23" + ] + }, + { + "id": "25", + "parents": [ + "24" + ] + }, + { + "id": "26", + "parents": [ + "25" + ] + }, + { + "id": "27", + "parents": [ + "26" + ] + }, + { + "id": "28", + "parents": [ + "27" + ] + }, + { + "id": "29", + "parents": [ + "28" + ] + }, + { + "id": "30", + "parents": [ + "29" + ] + }, + { + "id": "31", + "parents": [ + "30" + ] + }, + { + "id": "32", + "parents": [ + "31" + ] + }, + { + "id": "33", + "parents": [ + "32" + ] + }, + { + "id": "34", + "parents": [ + "33" + ] + }, + { + "id": "35", + "parents": [ + "34" + ] + }, + { + "id": "36", + "parents": [ + "35" + ] + }, + { + "id": "37", + "parents": [ + "36" + ] + }, + { + "id": "38", + "parents": [ + "37" + ] + }, + { + "id": "39", + "parents": [ + "38" + ] + }, + { + "id": "40", + "parents": [ + "39" + ] + }, + { + "id": "41", + "parents": [ + "40" + ] + }, + { + "id": "42", + "parents": [ + "41" + ] + }, + { + "id": "43", + "parents": [ + "42" + ] + }, + { + "id": "44", + "parents": [ + "43" + ] + }, + { + "id": "45", + "parents": [ + "44" + ] + }, + { + "id": "46", + "parents": [ + "45" + ] + }, + { + "id": "47", + "parents": [ + "46" + ] + }, + { + "id": "48", + "parents": [ + "47" + ] + }, + { + "id": "49", + "parents": [ + "48" + ] + }, + { + "id": "50", + "parents": [ + "49" + ] + }, + { + "id": "51", + "parents": [ + "50" + ] + }, + { + "id": "52", + "parents": [ + "51" + ] + }, + { + "id": "53", + "parents": [ + "52" + ] + }, + { + "id": "54", + "parents": [ + "53" + ] + }, + { + "id": "55", + "parents": [ + "54" + ] + }, + { + "id": "56", + "parents": [ + "55" + ] + }, + { + "id": "57", + "parents": [ + "56" + ] + }, + { + "id": "58", + "parents": [ + "57" + ] + }, + { + "id": "59", + "parents": [ + "58" + ] + }, + { + "id": "60", + "parents": [ + "59" + ] + }, + { + "id": "61", + "parents": [ + "60" + ] + }, + { + "id": "62", + "parents": [ + "61" + ] + }, + { + "id": "63", + "parents": [ + "62" + ] + }, + { + "id": "64", + "parents": [ + "63" + ] + }, + { + "id": "65", + "parents": [ + "64" + ] + }, + { + "id": "66", + "parents": [ + "65" + ] + }, + { + "id": "67", + "parents": [ + "66" + ] + }, + { + "id": "68", + "parents": [ + "67" + ] + }, + { + "id": "69", + "parents": [ + "68" + ] + }, + { + "id": "70", + "parents": [ + "69" + ] + }, + { + "id": "71", + "parents": [ + "70" + ] + }, + { + "id": "72", + "parents": [ + "71" + ] + }, + { + "id": "73", + "parents": [ + "72" + ] + }, + { + "id": "74", + "parents": [ + "73" + ] + }, + { + "id": "75", + "parents": [ + "74" + ] + }, + { + "id": "76", + "parents": [ + "75" + ] + }, + { + "id": "77", + "parents": [ + "76" + ] + }, + { + "id": "78", + "parents": [ + "77" + ] + }, + { + "id": "79", + "parents": [ + "78" + ] + }, + { + "id": "80", + "parents": [ + "79" + ] + }, + { + "id": "81", + "parents": [ + "80" + ] + }, + { + "id": "82", + "parents": [ + "81" + ] + }, + { + "id": "83", + "parents": [ + "82" + ] + }, + { + "id": "84", + "parents": [ + "83" + ] + }, + { + "id": "85", + "parents": [ + "84" + ] + }, + { + "id": "86", + "parents": [ + "85" + ] + }, + { + "id": "87", + "parents": [ + "86" + ] + }, + { + "id": "88", + "parents": [ + "87" + ] + }, + { + "id": "89", + "parents": [ + "88" + ] + }, + { + "id": "90", + "parents": [ + "89" + ] + }, + { + "id": "91", + "parents": [ + "90" + ] + }, + { + "id": "92", + "parents": [ + "91" + ] + }, + { + "id": "93", + "parents": [ + "92" + ] + }, + { + "id": "94", + "parents": [ + "93" + ] + }, + { + "id": "95", + "parents": [ + "94" + ] + }, + { + "id": "96", + "parents": [ + "95" + ] + }, + { + "id": "97", + "parents": [ + "96" + ] + }, + { + "id": "98", + "parents": [ + "97" + ] + }, + { + "id": "99", + "parents": [ + "98" + ] + }, + { + "id": "100", + "parents": [ + "99" + ] + }, + { + "id": "101", + "parents": [ + "100" + ] + }, + { + "id": "102", + "parents": [ + "101" + ] + }, + { + "id": "103", + "parents": [ + "102" + ] + }, + { + "id": "104", + "parents": [ + "103" + ] + }, + { + "id": "105", + "parents": [ + "104" + ] + }, + { + "id": "106", + "parents": [ + "105" + ] + }, + { + "id": "107", + "parents": [ + "106" + ] + }, + { + "id": "108", + "parents": [ + "107" + ] + }, + { + "id": "109", + "parents": [ + "108" + ] + }, + { + "id": "110", + "parents": [ + "109" + ] + }, + { + "id": "111", + "parents": [ + "110" + ] + }, + { + "id": "112", + "parents": [ + "111" + ] + }, + { + "id": "113", + "parents": [ + "112" + ] + }, + { + "id": "114", + "parents": [ + "113" + ] + }, + { + "id": "115", + "parents": [ + "114" + ] + }, + { + "id": "116", + "parents": [ + "115" + ] + }, + { + "id": "117", + "parents": [ + "116" + ] + }, + { + "id": "118", + "parents": [ + "117" + ] + }, + { + "id": "119", + "parents": [ + "118" + ] + }, + { + "id": "120", + "parents": [ + "119" + ] + }, + { + "id": "121", + "parents": [ + "120" + ] + }, + { + "id": "122", + "parents": [ + "121" + ] + }, + { + "id": "123", + "parents": [ + "122" + ] + }, + { + "id": "124", + "parents": [ + "123" + ] + }, + { + "id": "125", + "parents": [ + "124" + ] + }, + { + "id": "126", + "parents": [ + "125" + ] + }, + { + "id": "127", + "parents": [ + "126" + ] + }, + { + "id": "128", + "parents": [ + "127" + ] + }, + { + "id": "129", + "parents": [ + "128" + ] + }, + { + "id": "130", + "parents": [ + "129" + ] + }, + { + "id": "131", + "parents": [ + "130" + ] + }, + { + "id": "132", + "parents": [ + "131" + ] + }, + { + "id": "133", + "parents": [ + "132" + ] + }, + { + "id": "134", + "parents": [ + "133" + ] + }, + { + "id": "135", + "parents": [ + "134" + ] + }, + { + "id": "136", + "parents": [ + "135" + ] + }, + { + "id": "137", + "parents": [ + "136" + ] + }, + { + "id": "138", + "parents": [ + "137" + ] + }, + { + "id": "139", + "parents": [ + "138" + ] + }, + { + "id": "140", + "parents": [ + "139" + ] + }, + { + "id": "141", + "parents": [ + "140" + ] + }, + { + "id": "142", + "parents": [ + "141" + ] + }, + { + "id": "143", + "parents": [ + "142" + ] + }, + { + "id": "144", + "parents": [ + "143" + ] + }, + { + "id": "145", + "parents": [ + "144" + ] + }, + { + "id": "146", + "parents": [ + "145" + ] + }, + { + "id": "147", + "parents": [ + "146" + ] + }, + { + "id": "148", + "parents": [ + "147" + ] + }, + { + "id": "149", + "parents": [ + "148" + ] + }, + { + "id": "150", + "parents": [ + "149" + ] + }, + { + "id": "151", + "parents": [ + "150" + ] + }, + { + "id": "152", + "parents": [ + "151" + ] + }, + { + "id": "153", + "parents": [ + "152" + ] + }, + { + "id": "154", + "parents": [ + "153" + ] + }, + { + "id": "155", + "parents": [ + "154" + ] + }, + { + "id": "156", + "parents": [ + "155" + ] + }, + { + "id": "157", + "parents": [ + "156" + ] + }, + { + "id": "158", + "parents": [ + "157" + ] + }, + { + "id": "159", + "parents": [ + "158" + ] + }, + { + "id": "160", + "parents": [ + "159" + ] + }, + { + "id": "161", + "parents": [ + "160" + ] + }, + { + "id": "162", + "parents": [ + "161" + ] + }, + { + "id": "163", + "parents": [ + "162" + ] + }, + { + "id": "164", + "parents": [ + "163" + ] + }, + { + "id": "165", + "parents": [ + "164" + ] + }, + { + "id": "166", + "parents": [ + "165" + ] + }, + { + "id": "167", + "parents": [ + "166" + ] + }, + { + "id": "168", + "parents": [ + "167" + ] + }, + { + "id": "169", + "parents": [ + "168" + ] + }, + { + "id": "170", + "parents": [ + "169" + ] + }, + { + "id": "171", + "parents": [ + "170" + ] + }, + { + "id": "172", + "parents": [ + "171" + ] + }, + { + "id": "173", + "parents": [ + "172" + ] + }, + { + "id": "174", + "parents": [ + "173" + ] + }, + { + "id": "175", + "parents": [ + "174" + ] + }, + { + "id": "176", + "parents": [ + "175" + ] + }, + { + "id": "177", + "parents": [ + "176" + ] + }, + { + "id": "178", + "parents": [ + "177" + ] + }, + { + "id": "179", + "parents": [ + "178" + ] + }, + { + "id": "180", + "parents": [ + "179" + ] + }, + { + "id": "181", + "parents": [ + "180" + ] + }, + { + "id": "182", + "parents": [ + "181" + ] + }, + { + "id": "183", + "parents": [ + "182" + ] + }, + { + "id": "184", + "parents": [ + "183" + ] + }, + { + "id": "185", + "parents": [ + "184" + ] + }, + { + "id": "186", + "parents": [ + "185" + ] + }, + { + "id": "187", + "parents": [ + "186" + ] + }, + { + "id": "188", + "parents": [ + "187" + ] + }, + { + "id": "189", + "parents": [ + "188" + ] + }, + { + "id": "190", + "parents": [ + "189" + ] + }, + { + "id": "191", + "parents": [ + "190" + ] + }, + { + "id": "192", + "parents": [ + "191" + ] + }, + { + "id": "193", + "parents": [ + "192" + ] + }, + { + "id": "194", + "parents": [ + "193" + ] + }, + { + "id": "195", + "parents": [ + "194" + ] + }, + { + "id": "196", + "parents": [ + "195" + ] + }, + { + "id": "197", + "parents": [ + "196" + ] + }, + { + "id": "198", + "parents": [ + "197" + ] + }, + { + "id": "199", + "parents": [ + "198" + ] + }, + { + "id": "200", + "parents": [ + "199" + ] + }, + { + "id": "201", + "parents": [ + "200" + ] + }, + { + "id": "202", + "parents": [ + "201" + ] + }, + { + "id": "203", + "parents": [ + "202" + ] + }, + { + "id": "204", + "parents": [ + "203" + ] + }, + { + "id": "205", + "parents": [ + "204" + ] + }, + { + "id": "206", + "parents": [ + "205" + ] + }, + { + "id": "207", + "parents": [ + "206" + ] + }, + { + "id": "208", + "parents": [ + "207" + ] + }, + { + "id": "209", + "parents": [ + "208" + ] + }, + { + "id": "210", + "parents": [ + "209" + ] + }, + { + "id": "211", + "parents": [ + "210" + ] + }, + { + "id": "212", + "parents": [ + "211" + ] + }, + { + "id": "213", + "parents": [ + "212" + ] + }, + { + "id": "214", + "parents": [ + "213" + ] + }, + { + "id": "215", + "parents": [ + "214" + ] + }, + { + "id": "216", + "parents": [ + "215" + ] + }, + { + "id": "217", + "parents": [ + "216" + ] + }, + { + "id": "218", + "parents": [ + "217" + ] + }, + { + "id": "219", + "parents": [ + "218" + ] + }, + { + "id": "220", + "parents": [ + "219" + ] + }, + { + "id": "221", + "parents": [ + "220" + ] + }, + { + "id": "222", + "parents": [ + "221" + ] + }, + { + "id": "223", + "parents": [ + "222" + ] + }, + { + "id": "224", + "parents": [ + "223" + ] + }, + { + "id": "225", + "parents": [ + "224" + ] + }, + { + "id": "226", + "parents": [ + "225" + ] + }, + { + "id": "227", + "parents": [ + "226" + ] + }, + { + "id": "228", + "parents": [ + "227" + ] + }, + { + "id": "229", + "parents": [ + "228" + ] + }, + { + "id": "230", + "parents": [ + "229" + ] + }, + { + "id": "231", + "parents": [ + "230" + ] + }, + { + "id": "232", + "parents": [ + "231" + ] + }, + { + "id": "233", + "parents": [ + "232" + ] + }, + { + "id": "234", + "parents": [ + "233" + ] + }, + { + "id": "235", + "parents": [ + "234" + ] + }, + { + "id": "236", + "parents": [ + "235" + ] + }, + { + "id": "237", + "parents": [ + "236" + ] + }, + { + "id": "238", + "parents": [ + "237" + ] + }, + { + "id": "239", + "parents": [ + "238" + ] + }, + { + "id": "240", + "parents": [ + "239" + ] + }, + { + "id": "241", + "parents": [ + "240" + ] + }, + { + "id": "242", + "parents": [ + "241" + ] + }, + { + "id": "243", + "parents": [ + "242" + ] + }, + { + "id": "244", + "parents": [ + "243" + ] + }, + { + "id": "245", + "parents": [ + "244" + ] + }, + { + "id": "246", + "parents": [ + "245" + ] + }, + { + "id": "247", + "parents": [ + "246" + ] + }, + { + "id": "248", + "parents": [ + "247" + ] + }, + { + "id": "249", + "parents": [ + "248" + ] + }, + { + "id": "250", + "parents": [ + "249" + ] + }, + { + "id": "251", + "parents": [ + "250" + ] + }, + { + "id": "252", + "parents": [ + "251" + ] + }, + { + "id": "253", + "parents": [ + "252" + ] + }, + { + "id": "254", + "parents": [ + "253" + ] + }, + { + "id": "255", + "parents": [ + "254" + ] + }, + { + "id": "256", + "parents": [ + "255" + ] + }, + { + "id": "257", + "parents": [ + "256" + ] + }, + { + "id": "258", + "parents": [ + "257" + ] + }, + { + "id": "259", + "parents": [ + "258" + ] + }, + { + "id": "260", + "parents": [ + "259" + ] + }, + { + "id": "261", + "parents": [ + "260" + ] + }, + { + "id": "262", + "parents": [ + "261" + ] + }, + { + "id": "263", + "parents": [ + "262" + ] + }, + { + "id": "264", + "parents": [ + "263" + ] + }, + { + "id": "265", + "parents": [ + "264" + ] + }, + { + "id": "266", + "parents": [ + "265" + ] + }, + { + "id": "267", + "parents": [ + "266" + ] + }, + { + "id": "268", + "parents": [ + "267" + ] + }, + { + "id": "269", + "parents": [ + "268" + ] + }, + { + "id": "270", + "parents": [ + "269" + ] + }, + { + "id": "271", + "parents": [ + "270" + ] + }, + { + "id": "272", + "parents": [ + "271" + ] + }, + { + "id": "273", + "parents": [ + "272" + ] + }, + { + "id": "274", + "parents": [ + "273" + ] + }, + { + "id": "275", + "parents": [ + "274" + ] + }, + { + "id": "276", + "parents": [ + "275" + ] + }, + { + "id": "277", + "parents": [ + "276" + ] + }, + { + "id": "278", + "parents": [ + "277" + ] + }, + { + "id": "279", + "parents": [ + "278" + ] + }, + { + "id": "280", + "parents": [ + "279" + ] + }, + { + "id": "281", + "parents": [ + "280" + ] + }, + { + "id": "282", + "parents": [ + "281" + ] + }, + { + "id": "283", + "parents": [ + "282" + ] + }, + { + "id": "284", + "parents": [ + "283" + ] + }, + { + "id": "285", + "parents": [ + "284" + ] + }, + { + "id": "286", + "parents": [ + "285" + ] + }, + { + "id": "287", + "parents": [ + "286" + ] + }, + { + "id": "288", + "parents": [ + "287" + ] + }, + { + "id": "289", + "parents": [ + "288" + ] + }, + { + "id": "290", + "parents": [ + "289" + ] + }, + { + "id": "291", + "parents": [ + "290" + ] + }, + { + "id": "292", + "parents": [ + "291" + ] + }, + { + "id": "293", + "parents": [ + "292" + ] + }, + { + "id": "294", + "parents": [ + "293" + ] + }, + { + "id": "295", + "parents": [ + "294" + ] + }, + { + "id": "296", + "parents": [ + "295" + ] + }, + { + "id": "297", + "parents": [ + "296" + ] + }, + { + "id": "298", + "parents": [ + "297" + ] + }, + { + "id": "299", + "parents": [ + "298" + ] + }, + { + "id": "300", + "parents": [ + "299" + ] + }, + { + "id": "301", + "parents": [ + "300" + ] + }, + { + "id": "302", + "parents": [ + "301" + ] + }, + { + "id": "303", + "parents": [ + "302" + ] + }, + { + "id": "304", + "parents": [ + "303" + ] + }, + { + "id": "305", + "parents": [ + "304" + ] + }, + { + "id": "306", + "parents": [ + "305" + ] + }, + { + "id": "307", + "parents": [ + "306" + ] + }, + { + "id": "308", + "parents": [ + "307" + ] + }, + { + "id": "309", + "parents": [ + "308" + ] + }, + { + "id": "310", + "parents": [ + "309" + ] + }, + { + "id": "311", + "parents": [ + "310" + ] + }, + { + "id": "312", + "parents": [ + "311" + ] + }, + { + "id": "313", + "parents": [ + "312" + ] + }, + { + "id": "314", + "parents": [ + "313" + ] + }, + { + "id": "315", + "parents": [ + "314" + ] + }, + { + "id": "316", + "parents": [ + "315" + ] + }, + { + "id": "317", + "parents": [ + "316" + ] + }, + { + "id": "318", + "parents": [ + "317" + ] + }, + { + "id": "319", + "parents": [ + "318" + ] + }, + { + "id": "320", + "parents": [ + "319" + ] + }, + { + "id": "321", + "parents": [ + "320" + ] + }, + { + "id": "322", + "parents": [ + "321" + ] + }, + { + "id": "323", + "parents": [ + "322" + ] + }, + { + "id": "324", + "parents": [ + "323" + ] + }, + { + "id": "325", + "parents": [ + "324" + ] + }, + { + "id": "326", + "parents": [ + "325" + ] + }, + { + "id": "327", + "parents": [ + "326" + ] + }, + { + "id": "328", + "parents": [ + "327" + ] + }, + { + "id": "329", + "parents": [ + "328" + ] + }, + { + "id": "330", + "parents": [ + "329" + ] + }, + { + "id": "331", + "parents": [ + "330" + ] + }, + { + "id": "332", + "parents": [ + "331" + ] + }, + { + "id": "333", + "parents": [ + "332" + ] + }, + { + "id": "334", + "parents": [ + "333" + ] + }, + { + "id": "335", + "parents": [ + "334" + ] + }, + { + "id": "336", + "parents": [ + "335" + ] + }, + { + "id": "337", + "parents": [ + "336" + ] + }, + { + "id": "338", + "parents": [ + "337" + ] + }, + { + "id": "339", + "parents": [ + "338" + ] + }, + { + "id": "340", + "parents": [ + "339" + ] + }, + { + "id": "341", + "parents": [ + "340" + ] + }, + { + "id": "342", + "parents": [ + "341" + ] + }, + { + "id": "343", + "parents": [ + "342" + ] + }, + { + "id": "344", + "parents": [ + "343" + ] + }, + { + "id": "345", + "parents": [ + "344" + ] + }, + { + "id": "346", + "parents": [ + "345" + ] + }, + { + "id": "347", + "parents": [ + "346" + ] + }, + { + "id": "348", + "parents": [ + "347" + ] + }, + { + "id": "349", + "parents": [ + "348" + ] + }, + { + "id": "350", + "parents": [ + "349" + ] + }, + { + "id": "351", + "parents": [ + "350" + ] + }, + { + "id": "352", + "parents": [ + "351" + ] + }, + { + "id": "353", + "parents": [ + "352" + ] + }, + { + "id": "354", + "parents": [ + "353" + ] + }, + { + "id": "355", + "parents": [ + "354" + ] + }, + { + "id": "356", + "parents": [ + "355" + ] + }, + { + "id": "357", + "parents": [ + "356" + ] + }, + { + "id": "358", + "parents": [ + "357" + ] + }, + { + "id": "359", + "parents": [ + "358" + ] + }, + { + "id": "360", + "parents": [ + "359" + ] + }, + { + "id": "361", + "parents": [ + "360" + ] + }, + { + "id": "362", + "parents": [ + "361" + ] + }, + { + "id": "363", + "parents": [ + "362" + ] + }, + { + "id": "364", + "parents": [ + "363" + ] + }, + { + "id": "365", + "parents": [ + "364" + ] + }, + { + "id": "366", + "parents": [ + "365" + ] + }, + { + "id": "367", + "parents": [ + "366" + ] + }, + { + "id": "368", + "parents": [ + "367" + ] + }, + { + "id": "369", + "parents": [ + "368" + ] + }, + { + "id": "370", + "parents": [ + "369" + ] + }, + { + "id": "371", + "parents": [ + "370" + ] + }, + { + "id": "372", + "parents": [ + "371" + ] + }, + { + "id": "373", + "parents": [ + "372" + ] + }, + { + "id": "374", + "parents": [ + "373" + ] + }, + { + "id": "375", + "parents": [ + "374" + ] + }, + { + "id": "376", + "parents": [ + "375" + ] + }, + { + "id": "377", + "parents": [ + "376" + ] + }, + { + "id": "378", + "parents": [ + "377" + ] + }, + { + "id": "379", + "parents": [ + "378" + ] + }, + { + "id": "380", + "parents": [ + "379" + ] + }, + { + "id": "381", + "parents": [ + "380" + ] + }, + { + "id": "382", + "parents": [ + "381" + ] + }, + { + "id": "383", + "parents": [ + "382" + ] + }, + { + "id": "384", + "parents": [ + "383" + ] + }, + { + "id": "385", + "parents": [ + "384" + ] + }, + { + "id": "386", + "parents": [ + "385" + ] + }, + { + "id": "387", + "parents": [ + "386" + ] + }, + { + "id": "388", + "parents": [ + "387" + ] + }, + { + "id": "389", + "parents": [ + "388" + ] + }, + { + "id": "390", + "parents": [ + "389" + ] + }, + { + "id": "391", + "parents": [ + "390" + ] + }, + { + "id": "392", + "parents": [ + "391" + ] + }, + { + "id": "393", + "parents": [ + "392" + ] + }, + { + "id": "394", + "parents": [ + "393" + ] + }, + { + "id": "395", + "parents": [ + "394" + ] + }, + { + "id": "396", + "parents": [ + "395" + ] + }, + { + "id": "397", + "parents": [ + "396" + ] + }, + { + "id": "398", + "parents": [ + "397" + ] + }, + { + "id": "399", + "parents": [ + "398" + ] + }, + { + "id": "400", + "parents": [ + "399" + ] + }, + { + "id": "401", + "parents": [ + "400" + ] + }, + { + "id": "402", + "parents": [ + "401" + ] + }, + { + "id": "403", + "parents": [ + "402" + ] + }, + { + "id": "404", + "parents": [ + "403" + ] + }, + { + "id": "405", + "parents": [ + "404" + ] + }, + { + "id": "406", + "parents": [ + "405" + ] + }, + { + "id": "407", + "parents": [ + "406" + ] + }, + { + "id": "408", + "parents": [ + "407" + ] + }, + { + "id": "409", + "parents": [ + "408" + ] + }, + { + "id": "410", + "parents": [ + "409" + ] + }, + { + "id": "411", + "parents": [ + "410" + ] + }, + { + "id": "412", + "parents": [ + "411" + ] + }, + { + "id": "413", + "parents": [ + "412" + ] + }, + { + "id": "414", + "parents": [ + "413" + ] + }, + { + "id": "415", + "parents": [ + "414" + ] + }, + { + "id": "416", + "parents": [ + "415" + ] + }, + { + "id": "417", + "parents": [ + "416" + ] + }, + { + "id": "418", + "parents": [ + "417" + ] + }, + { + "id": "419", + "parents": [ + "418" + ] + }, + { + "id": "420", + "parents": [ + "419" + ] + }, + { + "id": "421", + "parents": [ + "420" + ] + }, + { + "id": "422", + "parents": [ + "421" + ] + }, + { + "id": "423", + "parents": [ + "422" + ] + }, + { + "id": "424", + "parents": [ + "423" + ] + }, + { + "id": "425", + "parents": [ + "424" + ] + }, + { + "id": "426", + "parents": [ + "425" + ] + }, + { + "id": "427", + "parents": [ + "426" + ] + }, + { + "id": "428", + "parents": [ + "427" + ] + }, + { + "id": "429", + "parents": [ + "428" + ] + }, + { + "id": "430", + "parents": [ + "429" + ] + }, + { + "id": "431", + "parents": [ + "430" + ] + }, + { + "id": "432", + "parents": [ + "431" + ] + }, + { + "id": "433", + "parents": [ + "432" + ] + }, + { + "id": "434", + "parents": [ + "433" + ] + }, + { + "id": "435", + "parents": [ + "434" + ] + }, + { + "id": "436", + "parents": [ + "435" + ] + }, + { + "id": "437", + "parents": [ + "436" + ] + }, + { + "id": "438", + "parents": [ + "437" + ] + }, + { + "id": "439", + "parents": [ + "438" + ] + }, + { + "id": "440", + "parents": [ + "439" + ] + }, + { + "id": "441", + "parents": [ + "440" + ] + }, + { + "id": "442", + "parents": [ + "441" + ] + }, + { + "id": "443", + "parents": [ + "442" + ] + }, + { + "id": "444", + "parents": [ + "443" + ] + }, + { + "id": "445", + "parents": [ + "444" + ] + }, + { + "id": "446", + "parents": [ + "445" + ] + }, + { + "id": "447", + "parents": [ + "446" + ] + }, + { + "id": "448", + "parents": [ + "447" + ] + }, + { + "id": "449", + "parents": [ + "448" + ] + }, + { + "id": "450", + "parents": [ + "449" + ] + }, + { + "id": "451", + "parents": [ + "450" + ] + }, + { + "id": "452", + "parents": [ + "451" + ] + }, + { + "id": "453", + "parents": [ + "452" + ] + }, + { + "id": "454", + "parents": [ + "453" + ] + }, + { + "id": "455", + "parents": [ + "454" + ] + }, + { + "id": "456", + "parents": [ + "455" + ] + }, + { + "id": "457", + "parents": [ + "456" + ] + }, + { + "id": "458", + "parents": [ + "457" + ] + }, + { + "id": "459", + "parents": [ + "458" + ] + }, + { + "id": "460", + "parents": [ + "459" + ] + }, + { + "id": "461", + "parents": [ + "460" + ] + }, + { + "id": "462", + "parents": [ + "461" + ] + }, + { + "id": "463", + "parents": [ + "462" + ] + }, + { + "id": "464", + "parents": [ + "463" + ] + }, + { + "id": "465", + "parents": [ + "464" + ] + }, + { + "id": "466", + "parents": [ + "465" + ] + }, + { + "id": "467", + "parents": [ + "466" + ] + }, + { + "id": "468", + "parents": [ + "467" + ] + }, + { + "id": "469", + "parents": [ + "468" + ] + }, + { + "id": "470", + "parents": [ + "469" + ] + }, + { + "id": "471", + "parents": [ + "470" + ] + }, + { + "id": "472", + "parents": [ + "471" + ] + }, + { + "id": "473", + "parents": [ + "472" + ] + }, + { + "id": "474", + "parents": [ + "473" + ] + }, + { + "id": "475", + "parents": [ + "474" + ] + }, + { + "id": "476", + "parents": [ + "475" + ] + }, + { + "id": "477", + "parents": [ + "476" + ] + }, + { + "id": "478", + "parents": [ + "477" + ] + }, + { + "id": "479", + "parents": [ + "478" + ] + }, + { + "id": "480", + "parents": [ + "479" + ] + }, + { + "id": "481", + "parents": [ + "480" + ] + }, + { + "id": "482", + "parents": [ + "481" + ] + }, + { + "id": "483", + "parents": [ + "482" + ] + }, + { + "id": "484", + "parents": [ + "483" + ] + }, + { + "id": "485", + "parents": [ + "484" + ] + }, + { + "id": "486", + "parents": [ + "485" + ] + }, + { + "id": "487", + "parents": [ + "486" + ] + }, + { + "id": "488", + "parents": [ + "487" + ] + }, + { + "id": "489", + "parents": [ + "488" + ] + }, + { + "id": "490", + "parents": [ + "489" + ] + }, + { + "id": "491", + "parents": [ + "490" + ] + }, + { + "id": "492", + "parents": [ + "491" + ] + }, + { + "id": "493", + "parents": [ + "492" + ] + }, + { + "id": "494", + "parents": [ + "493" + ] + }, + { + "id": "495", + "parents": [ + "494" + ] + }, + { + "id": "496", + "parents": [ + "495" + ] + }, + { + "id": "497", + "parents": [ + "496" + ] + }, + { + "id": "498", + "parents": [ + "497" + ] + }, + { + "id": "499", + "parents": [ + "498" + ] + }, + { + "id": "500", + "parents": [ + "499" + ] + }, + { + "id": "501", + "parents": [ + "500" + ] + }, + { + "id": "502", + "parents": [ + "501" + ] + }, + { + "id": "503", + "parents": [ + "502" + ] + }, + { + "id": "504", + "parents": [ + "503" + ] + }, + { + "id": "505", + "parents": [ + "504" + ] + }, + { + "id": "506", + "parents": [ + "505" + ] + }, + { + "id": "507", + "parents": [ + "506" + ] + }, + { + "id": "508", + "parents": [ + "507" + ] + }, + { + "id": "509", + "parents": [ + "508" + ] + }, + { + "id": "510", + "parents": [ + "509" + ] + }, + { + "id": "511", + "parents": [ + "510" + ] + }, + { + "id": "512", + "parents": [ + "511" + ] + }, + { + "id": "513", + "parents": [ + "512" + ] + }, + { + "id": "514", + "parents": [ + "513" + ] + }, + { + "id": "515", + "parents": [ + "514" + ] + }, + { + "id": "516", + "parents": [ + "515" + ] + }, + { + "id": "517", + "parents": [ + "516" + ] + }, + { + "id": "518", + "parents": [ + "517" + ] + }, + { + "id": "519", + "parents": [ + "518" + ] + }, + { + "id": "520", + "parents": [ + "519" + ] + }, + { + "id": "521", + "parents": [ + "520" + ] + }, + { + "id": "522", + "parents": [ + "521" + ] + }, + { + "id": "523", + "parents": [ + "522" + ] + }, + { + "id": "524", + "parents": [ + "523" + ] + }, + { + "id": "525", + "parents": [ + "524" + ] + }, + { + "id": "526", + "parents": [ + "525" + ] + }, + { + "id": "527", + "parents": [ + "526" + ] + }, + { + "id": "528", + "parents": [ + "527" + ] + }, + { + "id": "529", + "parents": [ + "528" + ] + }, + { + "id": "530", + "parents": [ + "529" + ] + }, + { + "id": "531", + "parents": [ + "530" + ] + }, + { + "id": "532", + "parents": [ + "531" + ] + }, + { + "id": "533", + "parents": [ + "532" + ] + }, + { + "id": "534", + "parents": [ + "533" + ] + }, + { + "id": "535", + "parents": [ + "534" + ] + }, + { + "id": "536", + "parents": [ + "535" + ] + }, + { + "id": "537", + "parents": [ + "536" + ] + }, + { + "id": "538", + "parents": [ + "537" + ] + }, + { + "id": "539", + "parents": [ + "538" + ] + }, + { + "id": "540", + "parents": [ + "539" + ] + }, + { + "id": "541", + "parents": [ + "540" + ] + }, + { + "id": "542", + "parents": [ + "541" + ] + }, + { + "id": "543", + "parents": [ + "542" + ] + }, + { + "id": "544", + "parents": [ + "543" + ] + }, + { + "id": "545", + "parents": [ + "544" + ] + }, + { + "id": "546", + "parents": [ + "545" + ] + }, + { + "id": "547", + "parents": [ + "546" + ] + }, + { + "id": "548", + "parents": [ + "547" + ] + }, + { + "id": "549", + "parents": [ + "548" + ] + }, + { + "id": "550", + "parents": [ + "549" + ] + }, + { + "id": "551", + "parents": [ + "550" + ] + }, + { + "id": "552", + "parents": [ + "551" + ] + }, + { + "id": "553", + "parents": [ + "552" + ] + }, + { + "id": "554", + "parents": [ + "553" + ] + }, + { + "id": "555", + "parents": [ + "554" + ] + }, + { + "id": "556", + "parents": [ + "555" + ] + }, + { + "id": "557", + "parents": [ + "556" + ] + }, + { + "id": "558", + "parents": [ + "557" + ] + }, + { + "id": "559", + "parents": [ + "558" + ] + }, + { + "id": "560", + "parents": [ + "559" + ] + }, + { + "id": "561", + "parents": [ + "560" + ] + }, + { + "id": "562", + "parents": [ + "561" + ] + }, + { + "id": "563", + "parents": [ + "562" + ] + }, + { + "id": "564", + "parents": [ + "563" + ] + }, + { + "id": "565", + "parents": [ + "564" + ] + }, + { + "id": "566", + "parents": [ + "565" + ] + }, + { + "id": "567", + "parents": [ + "566" + ] + }, + { + "id": "568", + "parents": [ + "567" + ] + }, + { + "id": "569", + "parents": [ + "568" + ] + }, + { + "id": "570", + "parents": [ + "569" + ] + }, + { + "id": "571", + "parents": [ + "570" + ] + }, + { + "id": "572", + "parents": [ + "571" + ] + }, + { + "id": "573", + "parents": [ + "572" + ] + }, + { + "id": "574", + "parents": [ + "573" + ] + }, + { + "id": "575", + "parents": [ + "574" + ] + }, + { + "id": "576", + "parents": [ + "575" + ] + }, + { + "id": "577", + "parents": [ + "576" + ] + }, + { + "id": "578", + "parents": [ + "577" + ] + }, + { + "id": "579", + "parents": [ + "578" + ] + }, + { + "id": "580", + "parents": [ + "579" + ] + }, + { + "id": "581", + "parents": [ + "580" + ] + }, + { + "id": "582", + "parents": [ + "581" + ] + }, + { + "id": "583", + "parents": [ + "582" + ] + }, + { + "id": "584", + "parents": [ + "583" + ] + }, + { + "id": "585", + "parents": [ + "584" + ] + }, + { + "id": "586", + "parents": [ + "585" + ] + }, + { + "id": "587", + "parents": [ + "586" + ] + }, + { + "id": "588", + "parents": [ + "587" + ] + }, + { + "id": "589", + "parents": [ + "588" + ] + }, + { + "id": "590", + "parents": [ + "589" + ] + }, + { + "id": "591", + "parents": [ + "590" + ] + }, + { + "id": "592", + "parents": [ + "591" + ] + }, + { + "id": "593", + "parents": [ + "592" + ] + }, + { + "id": "594", + "parents": [ + "593" + ] + }, + { + "id": "595", + "parents": [ + "594" + ] + }, + { + "id": "596", + "parents": [ + "595" + ] + }, + { + "id": "597", + "parents": [ + "596" + ] + }, + { + "id": "598", + "parents": [ + "597" + ] + }, + { + "id": "599", + "parents": [ + "598" + ] + }, + { + "id": "600", + "parents": [ + "599" + ] + }, + { + "id": "601", + "parents": [ + "600" + ] + }, + { + "id": "602", + "parents": [ + "601" + ] + }, + { + "id": "603", + "parents": [ + "602" + ] + }, + { + "id": "604", + "parents": [ + "603" + ] + }, + { + "id": "605", + "parents": [ + "604" + ] + }, + { + "id": "606", + "parents": [ + "605" + ] + }, + { + "id": "607", + "parents": [ + "606" + ] + }, + { + "id": "608", + "parents": [ + "607" + ] + }, + { + "id": "609", + "parents": [ + "608" + ] + }, + { + "id": "610", + "parents": [ + "609" + ] + }, + { + "id": "611", + "parents": [ + "610" + ] + }, + { + "id": "612", + "parents": [ + "611" + ] + }, + { + "id": "613", + "parents": [ + "612" + ] + }, + { + "id": "614", + "parents": [ + "613" + ] + }, + { + "id": "615", + "parents": [ + "614" + ] + }, + { + "id": "616", + "parents": [ + "615" + ] + }, + { + "id": "617", + "parents": [ + "616" + ] + }, + { + "id": "618", + "parents": [ + "617" + ] + }, + { + "id": "619", + "parents": [ + "618" + ] + }, + { + "id": "620", + "parents": [ + "619" + ] + }, + { + "id": "621", + "parents": [ + "620" + ] + }, + { + "id": "622", + "parents": [ + "621" + ] + }, + { + "id": "623", + "parents": [ + "622" + ] + }, + { + "id": "624", + "parents": [ + "623" + ] + }, + { + "id": "625", + "parents": [ + "624" + ] + }, + { + "id": "626", + "parents": [ + "625" + ] + }, + { + "id": "627", + "parents": [ + "626" + ] + }, + { + "id": "628", + "parents": [ + "627" + ] + }, + { + "id": "629", + "parents": [ + "628" + ] + }, + { + "id": "630", + "parents": [ + "629" + ] + }, + { + "id": "631", + "parents": [ + "630" + ] + }, + { + "id": "632", + "parents": [ + "631" + ] + }, + { + "id": "633", + "parents": [ + "632" + ] + }, + { + "id": "634", + "parents": [ + "633" + ] + }, + { + "id": "635", + "parents": [ + "634" + ] + }, + { + "id": "636", + "parents": [ + "635" + ] + }, + { + "id": "637", + "parents": [ + "636" + ] + }, + { + "id": "638", + "parents": [ + "637" + ] + }, + { + "id": "639", + "parents": [ + "638" + ] + }, + { + "id": "640", + "parents": [ + "639" + ] + }, + { + "id": "641", + "parents": [ + "640" + ] + }, + { + "id": "642", + "parents": [ + "641" + ] + }, + { + "id": "643", + "parents": [ + "642" + ] + }, + { + "id": "644", + "parents": [ + "643" + ] + }, + { + "id": "645", + "parents": [ + "644" + ] + }, + { + "id": "646", + "parents": [ + "645" + ] + }, + { + "id": "647", + "parents": [ + "646" + ] + }, + { + "id": "648", + "parents": [ + "647" + ] + }, + { + "id": "649", + "parents": [ + "648" + ] + }, + { + "id": "650", + "parents": [ + "649" + ] + }, + { + "id": "651", + "parents": [ + "650" + ] + }, + { + "id": "652", + "parents": [ + "651" + ] + }, + { + "id": "653", + "parents": [ + "652" + ] + }, + { + "id": "654", + "parents": [ + "653" + ] + }, + { + "id": "655", + "parents": [ + "654" + ] + }, + { + "id": "656", + "parents": [ + "655" + ] + }, + { + "id": "657", + "parents": [ + "656" + ] + }, + { + "id": "658", + "parents": [ + "657" + ] + }, + { + "id": "659", + "parents": [ + "658" + ] + }, + { + "id": "660", + "parents": [ + "659" + ] + }, + { + "id": "661", + "parents": [ + "660" + ] + }, + { + "id": "662", + "parents": [ + "661" + ] + }, + { + "id": "663", + "parents": [ + "662" + ] + }, + { + "id": "664", + "parents": [ + "663" + ] + }, + { + "id": "665", + "parents": [ + "664" + ] + }, + { + "id": "666", + "parents": [ + "665" + ] + }, + { + "id": "667", + "parents": [ + "666" + ] + }, + { + "id": "668", + "parents": [ + "667" + ] + }, + { + "id": "669", + "parents": [ + "668" + ] + }, + { + "id": "670", + "parents": [ + "669" + ] + }, + { + "id": "671", + "parents": [ + "670" + ] + }, + { + "id": "672", + "parents": [ + "671" + ] + }, + { + "id": "673", + "parents": [ + "672" + ] + }, + { + "id": "674", + "parents": [ + "673" + ] + }, + { + "id": "675", + "parents": [ + "674" + ] + }, + { + "id": "676", + "parents": [ + "675" + ] + }, + { + "id": "677", + "parents": [ + "676" + ] + }, + { + "id": "678", + "parents": [ + "677" + ] + }, + { + "id": "679", + "parents": [ + "678" + ] + }, + { + "id": "680", + "parents": [ + "679" + ] + }, + { + "id": "681", + "parents": [ + "680" + ] + }, + { + "id": "682", + "parents": [ + "681" + ] + }, + { + "id": "683", + "parents": [ + "682" + ] + }, + { + "id": "684", + "parents": [ + "683" + ] + }, + { + "id": "685", + "parents": [ + "684" + ] + }, + { + "id": "686", + "parents": [ + "685" + ] + }, + { + "id": "687", + "parents": [ + "686" + ] + }, + { + "id": "688", + "parents": [ + "687" + ] + }, + { + "id": "689", + "parents": [ + "688" + ] + }, + { + "id": "690", + "parents": [ + "689" + ] + }, + { + "id": "691", + "parents": [ + "690" + ] + }, + { + "id": "692", + "parents": [ + "691" + ] + }, + { + "id": "693", + "parents": [ + "692" + ] + }, + { + "id": "694", + "parents": [ + "693" + ] + }, + { + "id": "695", + "parents": [ + "694" + ] + }, + { + "id": "696", + "parents": [ + "695" + ] + }, + { + "id": "697", + "parents": [ + "696" + ] + }, + { + "id": "698", + "parents": [ + "697" + ] + }, + { + "id": "699", + "parents": [ + "698" + ] + }, + { + "id": "700", + "parents": [ + "699" + ] + }, + { + "id": "701", + "parents": [ + "700" + ] + }, + { + "id": "702", + "parents": [ + "701" + ] + }, + { + "id": "703", + "parents": [ + "702" + ] + }, + { + "id": "704", + "parents": [ + "703" + ] + }, + { + "id": "705", + "parents": [ + "704" + ] + }, + { + "id": "706", + "parents": [ + "705" + ] + }, + { + "id": "707", + "parents": [ + "706" + ] + }, + { + "id": "708", + "parents": [ + "707" + ] + }, + { + "id": "709", + "parents": [ + "708" + ] + }, + { + "id": "710", + "parents": [ + "709" + ] + }, + { + "id": "711", + "parents": [ + "710" + ] + }, + { + "id": "712", + "parents": [ + "711" + ] + }, + { + "id": "713", + "parents": [ + "712" + ] + }, + { + "id": "714", + "parents": [ + "713" + ] + }, + { + "id": "715", + "parents": [ + "714" + ] + }, + { + "id": "716", + "parents": [ + "715" + ] + }, + { + "id": "717", + "parents": [ + "716" + ] + }, + { + "id": "718", + "parents": [ + "717" + ] + }, + { + "id": "719", + "parents": [ + "718" + ] + }, + { + "id": "720", + "parents": [ + "719" + ] + }, + { + "id": "721", + "parents": [ + "720" + ] + }, + { + "id": "722", + "parents": [ + "721" + ] + }, + { + "id": "723", + "parents": [ + "722" + ] + }, + { + "id": "724", + "parents": [ + "723" + ] + }, + { + "id": "725", + "parents": [ + "724" + ] + }, + { + "id": "726", + "parents": [ + "725" + ] + }, + { + "id": "727", + "parents": [ + "726" + ] + }, + { + "id": "728", + "parents": [ + "727" + ] + }, + { + "id": "729", + "parents": [ + "728" + ] + }, + { + "id": "730", + "parents": [ + "729" + ] + }, + { + "id": "731", + "parents": [ + "730" + ] + }, + { + "id": "732", + "parents": [ + "731" + ] + }, + { + "id": "733", + "parents": [ + "732" + ] + }, + { + "id": "734", + "parents": [ + "733" + ] + }, + { + "id": "735", + "parents": [ + "734" + ] + }, + { + "id": "736", + "parents": [ + "735" + ] + }, + { + "id": "737", + "parents": [ + "736" + ] + }, + { + "id": "738", + "parents": [ + "737" + ] + }, + { + "id": "739", + "parents": [ + "738" + ] + }, + { + "id": "740", + "parents": [ + "739" + ] + }, + { + "id": "741", + "parents": [ + "740" + ] + }, + { + "id": "742", + "parents": [ + "741" + ] + }, + { + "id": "743", + "parents": [ + "742" + ] + }, + { + "id": "744", + "parents": [ + "743" + ] + }, + { + "id": "745", + "parents": [ + "744" + ] + }, + { + "id": "746", + "parents": [ + "745" + ] + }, + { + "id": "747", + "parents": [ + "746" + ] + }, + { + "id": "748", + "parents": [ + "747" + ] + }, + { + "id": "749", + "parents": [ + "748" + ] + }, + { + "id": "750", + "parents": [ + "749" + ] + }, + { + "id": "751", + "parents": [ + "750" + ] + }, + { + "id": "752", + "parents": [ + "751" + ] + }, + { + "id": "753", + "parents": [ + "752" + ] + }, + { + "id": "754", + "parents": [ + "753" + ] + }, + { + "id": "755", + "parents": [ + "754" + ] + }, + { + "id": "756", + "parents": [ + "755" + ] + }, + { + "id": "757", + "parents": [ + "756" + ] + }, + { + "id": "758", + "parents": [ + "757" + ] + }, + { + "id": "759", + "parents": [ + "758" + ] + }, + { + "id": "760", + "parents": [ + "759" + ] + }, + { + "id": "761", + "parents": [ + "760" + ] + }, + { + "id": "762", + "parents": [ + "761" + ] + }, + { + "id": "763", + "parents": [ + "762" + ] + }, + { + "id": "764", + "parents": [ + "763" + ] + }, + { + "id": "765", + "parents": [ + "764" + ] + }, + { + "id": "766", + "parents": [ + "765" + ] + }, + { + "id": "767", + "parents": [ + "766" + ] + }, + { + "id": "768", + "parents": [ + "767" + ] + }, + { + "id": "769", + "parents": [ + "768" + ] + }, + { + "id": "770", + "parents": [ + "769" + ] + }, + { + "id": "771", + "parents": [ + "770" + ] + }, + { + "id": "772", + "parents": [ + "771" + ] + }, + { + "id": "773", + "parents": [ + "772" + ] + }, + { + "id": "774", + "parents": [ + "773" + ] + }, + { + "id": "775", + "parents": [ + "774" + ] + }, + { + "id": "776", + "parents": [ + "775" + ] + }, + { + "id": "777", + "parents": [ + "776" + ] + }, + { + "id": "778", + "parents": [ + "777" + ] + }, + { + "id": "779", + "parents": [ + "778" + ] + }, + { + "id": "780", + "parents": [ + "779" + ] + }, + { + "id": "781", + "parents": [ + "780" + ] + }, + { + "id": "782", + "parents": [ + "781" + ] + }, + { + "id": "783", + "parents": [ + "782" + ] + }, + { + "id": "784", + "parents": [ + "783" + ] + }, + { + "id": "785", + "parents": [ + "784" + ] + }, + { + "id": "786", + "parents": [ + "785" + ] + }, + { + "id": "787", + "parents": [ + "786" + ] + }, + { + "id": "788", + "parents": [ + "787" + ] + }, + { + "id": "789", + "parents": [ + "788" + ] + }, + { + "id": "790", + "parents": [ + "789" + ] + }, + { + "id": "791", + "parents": [ + "790" + ] + }, + { + "id": "792", + "parents": [ + "791" + ] + }, + { + "id": "793", + "parents": [ + "792" + ] + }, + { + "id": "794", + "parents": [ + "793" + ] + }, + { + "id": "795", + "parents": [ + "794" + ] + }, + { + "id": "796", + "parents": [ + "795" + ] + }, + { + "id": "797", + "parents": [ + "796" + ] + }, + { + "id": "798", + "parents": [ + "797" + ] + }, + { + "id": "799", + "parents": [ + "798" + ] + }, + { + "id": "800", + "parents": [ + "799" + ] + }, + { + "id": "801", + "parents": [ + "800" + ] + }, + { + "id": "802", + "parents": [ + "801" + ] + }, + { + "id": "803", + "parents": [ + "802" + ] + }, + { + "id": "804", + "parents": [ + "803" + ] + }, + { + "id": "805", + "parents": [ + "804" + ] + }, + { + "id": "806", + "parents": [ + "805" + ] + }, + { + "id": "807", + "parents": [ + "806" + ] + }, + { + "id": "808", + "parents": [ + "807" + ] + }, + { + "id": "809", + "parents": [ + "808" + ] + }, + { + "id": "810", + "parents": [ + "809" + ] + }, + { + "id": "811", + "parents": [ + "810" + ] + }, + { + "id": "812", + "parents": [ + "811" + ] + }, + { + "id": "813", + "parents": [ + "812" + ] + }, + { + "id": "814", + "parents": [ + "813" + ] + }, + { + "id": "815", + "parents": [ + "814" + ] + }, + { + "id": "816", + "parents": [ + "815" + ] + }, + { + "id": "817", + "parents": [ + "816" + ] + }, + { + "id": "818", + "parents": [ + "817" + ] + }, + { + "id": "819", + "parents": [ + "818" + ] + }, + { + "id": "820", + "parents": [ + "819" + ] + }, + { + "id": "821", + "parents": [ + "820" + ] + }, + { + "id": "822", + "parents": [ + "821" + ] + }, + { + "id": "823", + "parents": [ + "822" + ] + }, + { + "id": "824", + "parents": [ + "823" + ] + }, + { + "id": "825", + "parents": [ + "824" + ] + }, + { + "id": "826", + "parents": [ + "825" + ] + }, + { + "id": "827", + "parents": [ + "826" + ] + }, + { + "id": "828", + "parents": [ + "827" + ] + }, + { + "id": "829", + "parents": [ + "828" + ] + }, + { + "id": "830", + "parents": [ + "829" + ] + }, + { + "id": "831", + "parents": [ + "830" + ] + }, + { + "id": "832", + "parents": [ + "831" + ] + }, + { + "id": "833", + "parents": [ + "832" + ] + }, + { + "id": "834", + "parents": [ + "833" + ] + }, + { + "id": "835", + "parents": [ + "834" + ] + }, + { + "id": "836", + "parents": [ + "835" + ] + }, + { + "id": "837", + "parents": [ + "836" + ] + }, + { + "id": "838", + "parents": [ + "837" + ] + }, + { + "id": "839", + "parents": [ + "838" + ] + }, + { + "id": "840", + "parents": [ + "839" + ] + }, + { + "id": "841", + "parents": [ + "840" + ] + }, + { + "id": "842", + "parents": [ + "841" + ] + }, + { + "id": "843", + "parents": [ + "842" + ] + }, + { + "id": "844", + "parents": [ + "843" + ] + }, + { + "id": "845", + "parents": [ + "844" + ] + }, + { + "id": "846", + "parents": [ + "845" + ] + }, + { + "id": "847", + "parents": [ + "846" + ] + }, + { + "id": "848", + "parents": [ + "847" + ] + }, + { + "id": "849", + "parents": [ + "848" + ] + }, + { + "id": "850", + "parents": [ + "849" + ] + }, + { + "id": "851", + "parents": [ + "850" + ] + }, + { + "id": "852", + "parents": [ + "851" + ] + }, + { + "id": "853", + "parents": [ + "852" + ] + }, + { + "id": "854", + "parents": [ + "853" + ] + }, + { + "id": "855", + "parents": [ + "854" + ] + }, + { + "id": "856", + "parents": [ + "855" + ] + }, + { + "id": "857", + "parents": [ + "856" + ] + }, + { + "id": "858", + "parents": [ + "857" + ] + }, + { + "id": "859", + "parents": [ + "858" + ] + }, + { + "id": "860", + "parents": [ + "859" + ] + }, + { + "id": "861", + "parents": [ + "860" + ] + }, + { + "id": "862", + "parents": [ + "861" + ] + }, + { + "id": "863", + "parents": [ + "862" + ] + }, + { + "id": "864", + "parents": [ + "863" + ] + }, + { + "id": "865", + "parents": [ + "864" + ] + }, + { + "id": "866", + "parents": [ + "865" + ] + }, + { + "id": "867", + "parents": [ + "866" + ] + }, + { + "id": "868", + "parents": [ + "867" + ] + }, + { + "id": "869", + "parents": [ + "868" + ] + }, + { + "id": "870", + "parents": [ + "869" + ] + }, + { + "id": "871", + "parents": [ + "870" + ] + }, + { + "id": "872", + "parents": [ + "871" + ] + }, + { + "id": "873", + "parents": [ + "872" + ] + }, + { + "id": "874", + "parents": [ + "873" + ] + }, + { + "id": "875", + "parents": [ + "874" + ] + }, + { + "id": "876", + "parents": [ + "875" + ] + }, + { + "id": "877", + "parents": [ + "876" + ] + }, + { + "id": "878", + "parents": [ + "877" + ] + }, + { + "id": "879", + "parents": [ + "878" + ] + }, + { + "id": "880", + "parents": [ + "879" + ] + }, + { + "id": "881", + "parents": [ + "880" + ] + }, + { + "id": "882", + "parents": [ + "881" + ] + }, + { + "id": "883", + "parents": [ + "882" + ] + }, + { + "id": "884", + "parents": [ + "883" + ] + }, + { + "id": "885", + "parents": [ + "884" + ] + }, + { + "id": "886", + "parents": [ + "885" + ] + }, + { + "id": "887", + "parents": [ + "886" + ] + }, + { + "id": "888", + "parents": [ + "887" + ] + }, + { + "id": "889", + "parents": [ + "888" + ] + }, + { + "id": "890", + "parents": [ + "889" + ] + }, + { + "id": "891", + "parents": [ + "890" + ] + }, + { + "id": "892", + "parents": [ + "891" + ] + }, + { + "id": "893", + "parents": [ + "892" + ] + }, + { + "id": "894", + "parents": [ + "893" + ] + }, + { + "id": "895", + "parents": [ + "894" + ] + }, + { + "id": "896", + "parents": [ + "895" + ] + }, + { + "id": "897", + "parents": [ + "896" + ] + }, + { + "id": "898", + "parents": [ + "897" + ] + }, + { + "id": "899", + "parents": [ + "898" + ] + }, + { + "id": "900", + "parents": [ + "899" + ] + }, + { + "id": "901", + "parents": [ + "900" + ] + }, + { + "id": "902", + "parents": [ + "901" + ] + }, + { + "id": "903", + "parents": [ + "902" + ] + }, + { + "id": "904", + "parents": [ + "903" + ] + }, + { + "id": "905", + "parents": [ + "904" + ] + }, + { + "id": "906", + "parents": [ + "905" + ] + }, + { + "id": "907", + "parents": [ + "906" + ] + }, + { + "id": "908", + "parents": [ + "907" + ] + }, + { + "id": "909", + "parents": [ + "908" + ] + }, + { + "id": "910", + "parents": [ + "909" + ] + }, + { + "id": "911", + "parents": [ + "910" + ] + }, + { + "id": "912", + "parents": [ + "911" + ] + }, + { + "id": "913", + "parents": [ + "912" + ] + }, + { + "id": "914", + "parents": [ + "913" + ] + }, + { + "id": "915", + "parents": [ + "914" + ] + }, + { + "id": "916", + "parents": [ + "915" + ] + }, + { + "id": "917", + "parents": [ + "916" + ] + }, + { + "id": "918", + "parents": [ + "917" + ] + }, + { + "id": "919", + "parents": [ + "918" + ] + }, + { + "id": "920", + "parents": [ + "919" + ] + }, + { + "id": "921", + "parents": [ + "920" + ] + }, + { + "id": "922", + "parents": [ + "921" + ] + }, + { + "id": "923", + "parents": [ + "922" + ] + }, + { + "id": "924", + "parents": [ + "923" + ] + }, + { + "id": "925", + "parents": [ + "924" + ] + }, + { + "id": "926", + "parents": [ + "925" + ] + }, + { + "id": "927", + "parents": [ + "926" + ] + }, + { + "id": "928", + "parents": [ + "927" + ] + }, + { + "id": "929", + "parents": [ + "928" + ] + }, + { + "id": "930", + "parents": [ + "929" + ] + }, + { + "id": "931", + "parents": [ + "930" + ] + }, + { + "id": "932", + "parents": [ + "931" + ] + }, + { + "id": "933", + "parents": [ + "932" + ] + }, + { + "id": "934", + "parents": [ + "933" + ] + }, + { + "id": "935", + "parents": [ + "934" + ] + }, + { + "id": "936", + "parents": [ + "935" + ] + }, + { + "id": "937", + "parents": [ + "936" + ] + }, + { + "id": "938", + "parents": [ + "937" + ] + }, + { + "id": "939", + "parents": [ + "938" + ] + }, + { + "id": "940", + "parents": [ + "939" + ] + }, + { + "id": "941", + "parents": [ + "940" + ] + }, + { + "id": "942", + "parents": [ + "941" + ] + }, + { + "id": "943", + "parents": [ + "942" + ] + }, + { + "id": "944", + "parents": [ + "943" + ] + }, + { + "id": "945", + "parents": [ + "944" + ] + }, + { + "id": "946", + "parents": [ + "945" + ] + }, + { + "id": "947", + "parents": [ + "946" + ] + }, + { + "id": "948", + "parents": [ + "947" + ] + }, + { + "id": "949", + "parents": [ + "948" + ] + }, + { + "id": "950", + "parents": [ + "949" + ] + }, + { + "id": "951", + "parents": [ + "950" + ] + }, + { + "id": "952", + "parents": [ + "951" + ] + }, + { + "id": "953", + "parents": [ + "952" + ] + }, + { + "id": "954", + "parents": [ + "953" + ] + }, + { + "id": "955", + "parents": [ + "954" + ] + }, + { + "id": "956", + "parents": [ + "955" + ] + }, + { + "id": "957", + "parents": [ + "956" + ] + }, + { + "id": "958", + "parents": [ + "957" + ] + }, + { + "id": "959", + "parents": [ + "958" + ] + }, + { + "id": "960", + "parents": [ + "959" + ] + }, + { + "id": "961", + "parents": [ + "960" + ] + }, + { + "id": "962", + "parents": [ + "961" + ] + }, + { + "id": "963", + "parents": [ + "962" + ] + }, + { + "id": "964", + "parents": [ + "963" + ] + }, + { + "id": "965", + "parents": [ + "964" + ] + }, + { + "id": "966", + "parents": [ + "965" + ] + }, + { + "id": "967", + "parents": [ + "966" + ] + }, + { + "id": "968", + "parents": [ + "967" + ] + }, + { + "id": "969", + "parents": [ + "968" + ] + }, + { + "id": "970", + "parents": [ + "969" + ] + }, + { + "id": "971", + "parents": [ + "970" + ] + }, + { + "id": "972", + "parents": [ + "971" + ] + }, + { + "id": "973", + "parents": [ + "972" + ] + }, + { + "id": "974", + "parents": [ + "973" + ] + }, + { + "id": "975", + "parents": [ + "974" + ] + }, + { + "id": "976", + "parents": [ + "975" + ] + }, + { + "id": "977", + "parents": [ + "976" + ] + }, + { + "id": "978", + "parents": [ + "977" + ] + }, + { + "id": "979", + "parents": [ + "978" + ] + }, + { + "id": "980", + "parents": [ + "979" + ] + }, + { + "id": "981", + "parents": [ + "980" + ] + }, + { + "id": "982", + "parents": [ + "981" + ] + }, + { + "id": "983", + "parents": [ + "982" + ] + }, + { + "id": "984", + "parents": [ + "983" + ] + }, + { + "id": "985", + "parents": [ + "984" + ] + }, + { + "id": "986", + "parents": [ + "985" + ] + }, + { + "id": "987", + "parents": [ + "986" + ] + }, + { + "id": "988", + "parents": [ + "987" + ] + }, + { + "id": "989", + "parents": [ + "988" + ] + }, + { + "id": "990", + "parents": [ + "989" + ] + }, + { + "id": "991", + "parents": [ + "990" + ] + }, + { + "id": "992", + "parents": [ + "991" + ] + }, + { + "id": "993", + "parents": [ + "992" + ] + }, + { + "id": "994", + "parents": [ + "993" + ] + }, + { + "id": "995", + "parents": [ + "994" + ] + }, + { + "id": "996", + "parents": [ + "995" + ] + }, + { + "id": "997", + "parents": [ + "996" + ] + }, + { + "id": "998", + "parents": [ + "997" + ] + }, + { + "id": "999", + "parents": [ + "998" + ] + }, + { + "id": "1000", + "parents": [ + "999" + ] + }, + { + "id": "1001", + "parents": [ + "1000" + ] + }, + { + "id": "1002", + "parents": [ + "1001" + ] + }, + { + "id": "1003", + "parents": [ + "1002" + ] + }, + { + "id": "1004", + "parents": [ + "1003" + ] + }, + { + "id": "1005", + "parents": [ + "1004" + ] + }, + { + "id": "1006", + "parents": [ + "1005" + ] + }, + { + "id": "1007", + "parents": [ + "1006" + ] + }, + { + "id": "1008", + "parents": [ + "1007" + ] + }, + { + "id": "1009", + "parents": [ + "1008" + ] + }, + { + "id": "1010", + "parents": [ + "1009" + ] + }, + { + "id": "1011", + "parents": [ + "1010" + ] + }, + { + "id": "1012", + "parents": [ + "1011" + ] + }, + { + "id": "1013", + "parents": [ + "1012" + ] + }, + { + "id": "1014", + "parents": [ + "1013" + ] + }, + { + "id": "1015", + "parents": [ + "1014" + ] + }, + { + "id": "1016", + "parents": [ + "1015" + ] + }, + { + "id": "1017", + "parents": [ + "1016" + ] + }, + { + "id": "1018", + "parents": [ + "1017" + ] + }, + { + "id": "1019", + "parents": [ + "1018" + ] + }, + { + "id": "1020", + "parents": [ + "1019" + ] + }, + { + "id": "1021", + "parents": [ + "1020" + ] + }, + { + "id": "1022", + "parents": [ + "1021" + ] + }, + { + "id": "1023", + "parents": [ + "1022" + ] + }, + { + "id": "1024", + "parents": [ + "1023" + ] + }, + { + "id": "1025", + "parents": [ + "1024" + ] + }, + { + "id": "1026", + "parents": [ + "1025" + ] + }, + { + "id": "1027", + "parents": [ + "1026" + ] + }, + { + "id": "1028", + "parents": [ + "1027" + ] + }, + { + "id": "1029", + "parents": [ + "1028" + ] + }, + { + "id": "1030", + "parents": [ + "1029" + ] + }, + { + "id": "1031", + "parents": [ + "1030" + ] + }, + { + "id": "1032", + "parents": [ + "1031" + ] + }, + { + "id": "1033", + "parents": [ + "1032" + ] + }, + { + "id": "1034", + "parents": [ + "1033" + ] + }, + { + "id": "1035", + "parents": [ + "1034" + ] + }, + { + "id": "1036", + "parents": [ + "1035" + ] + }, + { + "id": "1037", + "parents": [ + "1036" + ] + }, + { + "id": "1038", + "parents": [ + "1037" + ] + }, + { + "id": "1039", + "parents": [ + "1038" + ] + }, + { + "id": "1040", + "parents": [ + "1039" + ] + }, + { + "id": "1041", + "parents": [ + "1040" + ] + }, + { + "id": "1042", + "parents": [ + "1041" + ] + }, + { + "id": "1043", + "parents": [ + "1042" + ] + }, + { + "id": "1044", + "parents": [ + "1043" + ] + }, + { + "id": "1045", + "parents": [ + "1044" + ] + }, + { + "id": "1046", + "parents": [ + "1045" + ] + }, + { + "id": "1047", + "parents": [ + "1046" + ] + }, + { + "id": "1048", + "parents": [ + "1047" + ] + }, + { + "id": "1049", + "parents": [ + "1048" + ] + }, + { + "id": "1050", + "parents": [ + "1049" + ] + }, + { + "id": "1051", + "parents": [ + "1050" + ] + }, + { + "id": "1052", + "parents": [ + "1051" + ] + }, + { + "id": "1053", + "parents": [ + "1052" + ] + }, + { + "id": "1054", + "parents": [ + "1053" + ] + }, + { + "id": "1055", + "parents": [ + "1054" + ] + }, + { + "id": "1056", + "parents": [ + "1055" + ] + }, + { + "id": "1057", + "parents": [ + "1056" + ] + }, + { + "id": "1058", + "parents": [ + "1057" + ] + }, + { + "id": "1059", + "parents": [ + "1058" + ] + }, + { + "id": "1060", + "parents": [ + "1059" + ] + }, + { + "id": "1061", + "parents": [ + "1060" + ] + }, + { + "id": "1062", + "parents": [ + "1061" + ] + }, + { + "id": "1063", + "parents": [ + "1062" + ] + }, + { + "id": "1064", + "parents": [ + "1063" + ] + }, + { + "id": "1065", + "parents": [ + "1064" + ] + }, + { + "id": "1066", + "parents": [ + "1065" + ] + }, + { + "id": "1067", + "parents": [ + "1066" + ] + }, + { + "id": "1068", + "parents": [ + "1067" + ] + }, + { + "id": "1069", + "parents": [ + "1068" + ] + }, + { + "id": "1070", + "parents": [ + "1069" + ] + }, + { + "id": "1071", + "parents": [ + "1070" + ] + }, + { + "id": "1072", + "parents": [ + "1071" + ] + }, + { + "id": "1073", + "parents": [ + "1072" + ] + }, + { + "id": "1074", + "parents": [ + "1073" + ] + }, + { + "id": "1075", + "parents": [ + "1074" + ] + }, + { + "id": "1076", + "parents": [ + "1075" + ] + }, + { + "id": "1077", + "parents": [ + "1076" + ] + }, + { + "id": "1078", + "parents": [ + "1077" + ] + }, + { + "id": "1079", + "parents": [ + "1078" + ] + }, + { + "id": "1080", + "parents": [ + "1079" + ] + }, + { + "id": "1081", + "parents": [ + "1080" + ] + }, + { + "id": "1082", + "parents": [ + "1081" + ] + }, + { + "id": "1083", + "parents": [ + "1082" + ] + }, + { + "id": "1084", + "parents": [ + "1083" + ] + }, + { + "id": "1085", + "parents": [ + "1084" + ] + }, + { + "id": "1086", + "parents": [ + "1085" + ] + }, + { + "id": "1087", + "parents": [ + "1086" + ] + }, + { + "id": "1088", + "parents": [ + "1087" + ] + }, + { + "id": "1089", + "parents": [ + "1088" + ] + }, + { + "id": "1090", + "parents": [ + "1089" + ] + }, + { + "id": "1091", + "parents": [ + "1090" + ] + }, + { + "id": "1092", + "parents": [ + "1091" + ] + }, + { + "id": "1093", + "parents": [ + "1092" + ] + }, + { + "id": "1094", + "parents": [ + "1093" + ] + }, + { + "id": "1095", + "parents": [ + "1094" + ] + }, + { + "id": "1096", + "parents": [ + "1095" + ] + }, + { + "id": "1097", + "parents": [ + "1096" + ] + }, + { + "id": "1098", + "parents": [ + "1097" + ] + }, + { + "id": "1099", + "parents": [ + "1098" + ] + }, + { + "id": "1100", + "parents": [ + "1099" + ] + }, + { + "id": "1101", + "parents": [ + "1100" + ] + }, + { + "id": "1102", + "parents": [ + "1101" + ] + }, + { + "id": "1103", + "parents": [ + "1102" + ] + }, + { + "id": "1104", + "parents": [ + "1103" + ] + }, + { + "id": "1105", + "parents": [ + "1104" + ] + }, + { + "id": "1106", + "parents": [ + "1105" + ] + }, + { + "id": "1107", + "parents": [ + "1106" + ] + }, + { + "id": "1108", + "parents": [ + "1107" + ] + }, + { + "id": "1109", + "parents": [ + "1108" + ] + }, + { + "id": "1110", + "parents": [ + "1109" + ] + }, + { + "id": "1111", + "parents": [ + "1110" + ] + }, + { + "id": "1112", + "parents": [ + "1111" + ] + }, + { + "id": "1113", + "parents": [ + "1112" + ] + }, + { + "id": "1114", + "parents": [ + "1113" + ] + }, + { + "id": "1115", + "parents": [ + "1114" + ] + }, + { + "id": "1116", + "parents": [ + "1115" + ] + }, + { + "id": "1117", + "parents": [ + "1116" + ] + }, + { + "id": "1118", + "parents": [ + "1117" + ] + }, + { + "id": "1119", + "parents": [ + "1118" + ] + }, + { + "id": "1120", + "parents": [ + "1119" + ] + }, + { + "id": "1121", + "parents": [ + "1120" + ] + }, + { + "id": "1122", + "parents": [ + "1121" + ] + }, + { + "id": "1123", + "parents": [ + "1122" + ] + }, + { + "id": "1124", + "parents": [ + "1123" + ] + }, + { + "id": "1125", + "parents": [ + "1124" + ] + }, + { + "id": "1126", + "parents": [ + "1125" + ] + }, + { + "id": "1127", + "parents": [ + "1126" + ] + }, + { + "id": "1128", + "parents": [ + "1127" + ] + }, + { + "id": "1129", + "parents": [ + "1128" + ] + }, + { + "id": "1130", + "parents": [ + "1129" + ] + }, + { + "id": "1131", + "parents": [ + "1130" + ] + }, + { + "id": "1132", + "parents": [ + "1131" + ] + }, + { + "id": "1133", + "parents": [ + "1132" + ] + }, + { + "id": "1134", + "parents": [ + "1133" + ] + }, + { + "id": "1135", + "parents": [ + "1134" + ] + }, + { + "id": "1136", + "parents": [ + "1135" + ] + }, + { + "id": "1137", + "parents": [ + "1136" + ] + }, + { + "id": "1138", + "parents": [ + "1137" + ] + }, + { + "id": "1139", + "parents": [ + "1138" + ] + }, + { + "id": "1140", + "parents": [ + "1139" + ] + }, + { + "id": "1141", + "parents": [ + "1140" + ] + }, + { + "id": "1142", + "parents": [ + "1141" + ] + }, + { + "id": "1143", + "parents": [ + "1142" + ] + }, + { + "id": "1144", + "parents": [ + "1143" + ] + }, + { + "id": "1145", + "parents": [ + "1144" + ] + }, + { + "id": "1146", + "parents": [ + "1145" + ] + }, + { + "id": "1147", + "parents": [ + "1146" + ] + }, + { + "id": "1148", + "parents": [ + "1147" + ] + }, + { + "id": "1149", + "parents": [ + "1148" + ] + }, + { + "id": "1150", + "parents": [ + "1149" + ] + }, + { + "id": "1151", + "parents": [ + "1150" + ] + }, + { + "id": "1152", + "parents": [ + "1151" + ] + }, + { + "id": "1153", + "parents": [ + "1152" + ] + }, + { + "id": "1154", + "parents": [ + "1153" + ] + }, + { + "id": "1155", + "parents": [ + "1154" + ] + }, + { + "id": "1156", + "parents": [ + "1155" + ] + }, + { + "id": "1157", + "parents": [ + "1156" + ] + }, + { + "id": "1158", + "parents": [ + "1157" + ] + }, + { + "id": "1159", + "parents": [ + "1158" + ] + }, + { + "id": "1160", + "parents": [ + "1159" + ] + }, + { + "id": "1161", + "parents": [ + "1160" + ] + }, + { + "id": "1162", + "parents": [ + "1161" + ] + }, + { + "id": "1163", + "parents": [ + "1162" + ] + }, + { + "id": "1164", + "parents": [ + "1163" + ] + }, + { + "id": "1165", + "parents": [ + "1164" + ] + }, + { + "id": "1166", + "parents": [ + "1165" + ] + }, + { + "id": "1167", + "parents": [ + "1166" + ] + }, + { + "id": "1168", + "parents": [ + "1167" + ] + }, + { + "id": "1169", + "parents": [ + "1168" + ] + }, + { + "id": "1170", + "parents": [ + "1169" + ] + }, + { + "id": "1171", + "parents": [ + "1170" + ] + }, + { + "id": "1172", + "parents": [ + "1171" + ] + }, + { + "id": "1173", + "parents": [ + "1172" + ] + }, + { + "id": "1174", + "parents": [ + "1173" + ] + }, + { + "id": "1175", + "parents": [ + "1174" + ] + }, + { + "id": "1176", + "parents": [ + "1175" + ] + }, + { + "id": "1177", + "parents": [ + "1176" + ] + }, + { + "id": "1178", + "parents": [ + "1177" + ] + }, + { + "id": "1179", + "parents": [ + "1178" + ] + }, + { + "id": "1180", + "parents": [ + "1179" + ] + }, + { + "id": "1181", + "parents": [ + "1180" + ] + }, + { + "id": "1182", + "parents": [ + "1181" + ] + }, + { + "id": "1183", + "parents": [ + "1182" + ] + }, + { + "id": "1184", + "parents": [ + "1183" + ] + }, + { + "id": "1185", + "parents": [ + "1184" + ] + }, + { + "id": "1186", + "parents": [ + "1185" + ] + }, + { + "id": "1187", + "parents": [ + "1186" + ] + }, + { + "id": "1188", + "parents": [ + "1187" + ] + }, + { + "id": "1189", + "parents": [ + "1188" + ] + }, + { + "id": "1190", + "parents": [ + "1189" + ] + }, + { + "id": "1191", + "parents": [ + "1190" + ] + }, + { + "id": "1192", + "parents": [ + "1191" + ] + }, + { + "id": "1193", + "parents": [ + "1192" + ] + }, + { + "id": "1194", + "parents": [ + "1193" + ] + }, + { + "id": "1195", + "parents": [ + "1194" + ] + }, + { + "id": "1196", + "parents": [ + "1195" + ] + }, + { + "id": "1197", + "parents": [ + "1196" + ] + }, + { + "id": "1198", + "parents": [ + "1197" + ] + }, + { + "id": "1199", + "parents": [ + "1198" + ] + }, + { + "id": "1200", + "parents": [ + "1199" + ] + }, + { + "id": "1201", + "parents": [ + "1200" + ] + }, + { + "id": "1202", + "parents": [ + "1201" + ] + }, + { + "id": "1203", + "parents": [ + "1202" + ] + }, + { + "id": "1204", + "parents": [ + "1203" + ] + }, + { + "id": "1205", + "parents": [ + "1204" + ] + }, + { + "id": "1206", + "parents": [ + "1205" + ] + }, + { + "id": "1207", + "parents": [ + "1206" + ] + }, + { + "id": "1208", + "parents": [ + "1207" + ] + }, + { + "id": "1209", + "parents": [ + "1208" + ] + }, + { + "id": "1210", + "parents": [ + "1209" + ] + }, + { + "id": "1211", + "parents": [ + "1210" + ] + }, + { + "id": "1212", + "parents": [ + "1211" + ] + }, + { + "id": "1213", + "parents": [ + "1212" + ] + }, + { + "id": "1214", + "parents": [ + "1213" + ] + }, + { + "id": "1215", + "parents": [ + "1214" + ] + }, + { + "id": "1216", + "parents": [ + "1215" + ] + }, + { + "id": "1217", + "parents": [ + "1216" + ] + }, + { + "id": "1218", + "parents": [ + "1217" + ] + }, + { + "id": "1219", + "parents": [ + "1218" + ] + }, + { + "id": "1220", + "parents": [ + "1219" + ] + }, + { + "id": "1221", + "parents": [ + "1220" + ] + }, + { + "id": "1222", + "parents": [ + "1221" + ] + }, + { + "id": "1223", + "parents": [ + "1222" + ] + }, + { + "id": "1224", + "parents": [ + "1223" + ] + }, + { + "id": "1225", + "parents": [ + "1224" + ] + }, + { + "id": "1226", + "parents": [ + "1225" + ] + }, + { + "id": "1227", + "parents": [ + "1226" + ] + }, + { + "id": "1228", + "parents": [ + "1227" + ] + }, + { + "id": "1229", + "parents": [ + "1228" + ] + }, + { + "id": "1230", + "parents": [ + "1229" + ] + }, + { + "id": "1231", + "parents": [ + "1230" + ] + }, + { + "id": "1232", + "parents": [ + "1231" + ] + }, + { + "id": "1233", + "parents": [ + "1232" + ] + }, + { + "id": "1234", + "parents": [ + "1233" + ] + }, + { + "id": "1235", + "parents": [ + "1234" + ] + }, + { + "id": "1236", + "parents": [ + "1235" + ] + }, + { + "id": "1237", + "parents": [ + "1236" + ] + }, + { + "id": "1238", + "parents": [ + "1237" + ] + }, + { + "id": "1239", + "parents": [ + "1238" + ] + }, + { + "id": "1240", + "parents": [ + "1239" + ] + }, + { + "id": "1241", + "parents": [ + "1240" + ] + }, + { + "id": "1242", + "parents": [ + "1241" + ] + }, + { + "id": "1243", + "parents": [ + "1242" + ] + }, + { + "id": "1244", + "parents": [ + "1243" + ] + }, + { + "id": "1245", + "parents": [ + "1244" + ] + }, + { + "id": "1246", + "parents": [ + "1245" + ] + }, + { + "id": "1247", + "parents": [ + "1246" + ] + }, + { + "id": "1248", + "parents": [ + "1247" + ] + }, + { + "id": "1249", + "parents": [ + "1248" + ] + }, + { + "id": "1250", + "parents": [ + "1249" + ] + }, + { + "id": "1251", + "parents": [ + "1250" + ] + }, + { + "id": "1252", + "parents": [ + "1251" + ] + }, + { + "id": "1253", + "parents": [ + "1252" + ] + }, + { + "id": "1254", + "parents": [ + "1253" + ] + }, + { + "id": "1255", + "parents": [ + "1254" + ] + }, + { + "id": "1256", + "parents": [ + "1255" + ] + }, + { + "id": "1257", + "parents": [ + "1256" + ] + }, + { + "id": "1258", + "parents": [ + "1257" + ] + }, + { + "id": "1259", + "parents": [ + "1258" + ] + }, + { + "id": "1260", + "parents": [ + "1259" + ] + }, + { + "id": "1261", + "parents": [ + "1260" + ] + }, + { + "id": "1262", + "parents": [ + "1261" + ] + }, + { + "id": "1263", + "parents": [ + "1262" + ] + }, + { + "id": "1264", + "parents": [ + "1263" + ] + }, + { + "id": "1265", + "parents": [ + "1264" + ] + }, + { + "id": "1266", + "parents": [ + "1265" + ] + }, + { + "id": "1267", + "parents": [ + "1266" + ] + }, + { + "id": "1268", + "parents": [ + "1267" + ] + }, + { + "id": "1269", + "parents": [ + "1268" + ] + }, + { + "id": "1270", + "parents": [ + "1269" + ] + }, + { + "id": "1271", + "parents": [ + "1270" + ] + }, + { + "id": "1272", + "parents": [ + "1271" + ] + }, + { + "id": "1273", + "parents": [ + "1272" + ] + }, + { + "id": "1274", + "parents": [ + "1273" + ] + }, + { + "id": "1275", + "parents": [ + "1274" + ] + }, + { + "id": "1276", + "parents": [ + "1275" + ] + }, + { + "id": "1277", + "parents": [ + "1276" + ] + }, + { + "id": "1278", + "parents": [ + "1277" + ] + }, + { + "id": "1279", + "parents": [ + "1278" + ] + }, + { + "id": "1280", + "parents": [ + "1279" + ] + }, + { + "id": "1281", + "parents": [ + "1280" + ] + }, + { + "id": "1282", + "parents": [ + "1281" + ] + }, + { + "id": "1283", + "parents": [ + "1282" + ] + }, + { + "id": "1284", + "parents": [ + "1283" + ] + }, + { + "id": "1285", + "parents": [ + "1284" + ] + }, + { + "id": "1286", + "parents": [ + "1285" + ] + }, + { + "id": "1287", + "parents": [ + "1286" + ] + }, + { + "id": "1288", + "parents": [ + "1287" + ] + }, + { + "id": "1289", + "parents": [ + "1288" + ] + }, + { + "id": "1290", + "parents": [ + "1289" + ] + }, + { + "id": "1291", + "parents": [ + "1290" + ] + }, + { + "id": "1292", + "parents": [ + "1291" + ] + }, + { + "id": "1293", + "parents": [ + "1292" + ] + }, + { + "id": "1294", + "parents": [ + "1293" + ] + }, + { + "id": "1295", + "parents": [ + "1294" + ] + }, + { + "id": "1296", + "parents": [ + "1295" + ] + }, + { + "id": "1297", + "parents": [ + "1296" + ] + }, + { + "id": "1298", + "parents": [ + "1297" + ] + }, + { + "id": "1299", + "parents": [ + "1298" + ] + }, + { + "id": "1300", + "parents": [ + "1299" + ] + }, + { + "id": "1301", + "parents": [ + "1300" + ] + }, + { + "id": "1302", + "parents": [ + "1301" + ] + }, + { + "id": "1303", + "parents": [ + "1302" + ] + }, + { + "id": "1304", + "parents": [ + "1303" + ] + }, + { + "id": "1305", + "parents": [ + "1304" + ] + }, + { + "id": "1306", + "parents": [ + "1305" + ] + }, + { + "id": "1307", + "parents": [ + "1306" + ] + }, + { + "id": "1308", + "parents": [ + "1307" + ] + }, + { + "id": "1309", + "parents": [ + "1308" + ] + }, + { + "id": "1310", + "parents": [ + "1309" + ] + }, + { + "id": "1311", + "parents": [ + "1310" + ] + }, + { + "id": "1312", + "parents": [ + "1311" + ] + }, + { + "id": "1313", + "parents": [ + "1312" + ] + }, + { + "id": "1314", + "parents": [ + "1313" + ] + }, + { + "id": "1315", + "parents": [ + "1314" + ] + }, + { + "id": "1316", + "parents": [ + "1315" + ] + }, + { + "id": "1317", + "parents": [ + "1316" + ] + }, + { + "id": "1318", + "parents": [ + "1317" + ] + }, + { + "id": "1319", + "parents": [ + "1318" + ] + }, + { + "id": "1320", + "parents": [ + "1319" + ] + }, + { + "id": "1321", + "parents": [ + "1320" + ] + }, + { + "id": "1322", + "parents": [ + "1321" + ] + }, + { + "id": "1323", + "parents": [ + "1322" + ] + }, + { + "id": "1324", + "parents": [ + "1323" + ] + }, + { + "id": "1325", + "parents": [ + "1324" + ] + }, + { + "id": "1326", + "parents": [ + "1325" + ] + }, + { + "id": "1327", + "parents": [ + "1326" + ] + }, + { + "id": "1328", + "parents": [ + "1327" + ] + }, + { + "id": "1329", + "parents": [ + "1328" + ] + }, + { + "id": "1330", + "parents": [ + "1329" + ] + }, + { + "id": "1331", + "parents": [ + "1330" + ] + }, + { + "id": "1332", + "parents": [ + "1331" + ] + }, + { + "id": "1333", + "parents": [ + "1332" + ] + }, + { + "id": "1334", + "parents": [ + "1333" + ] + }, + { + "id": "1335", + "parents": [ + "1334" + ] + }, + { + "id": "1336", + "parents": [ + "1335" + ] + }, + { + "id": "1337", + "parents": [ + "1336" + ] + }, + { + "id": "1338", + "parents": [ + "1337" + ] + }, + { + "id": "1339", + "parents": [ + "1338" + ] + }, + { + "id": "1340", + "parents": [ + "1339" + ] + }, + { + "id": "1341", + "parents": [ + "1340" + ] + }, + { + "id": "1342", + "parents": [ + "1341" + ] + }, + { + "id": "1343", + "parents": [ + "1342" + ] + }, + { + "id": "1344", + "parents": [ + "1343" + ] + }, + { + "id": "1345", + "parents": [ + "1344" + ] + }, + { + "id": "1346", + "parents": [ + "1345" + ] + }, + { + "id": "1347", + "parents": [ + "1346" + ] + }, + { + "id": "1348", + "parents": [ + "1347" + ] + }, + { + "id": "1349", + "parents": [ + "1348" + ] + }, + { + "id": "1350", + "parents": [ + "1349" + ] + }, + { + "id": "1351", + "parents": [ + "1350" + ] + }, + { + "id": "1352", + "parents": [ + "1351" + ] + }, + { + "id": "1353", + "parents": [ + "1352" + ] + }, + { + "id": "1354", + "parents": [ + "1353" + ] + }, + { + "id": "1355", + "parents": [ + "1354" + ] + }, + { + "id": "1356", + "parents": [ + "1355" + ] + }, + { + "id": "1357", + "parents": [ + "1356" + ] + }, + { + "id": "1358", + "parents": [ + "1357" + ] + }, + { + "id": "1359", + "parents": [ + "1358" + ] + }, + { + "id": "1360", + "parents": [ + "1359" + ] + }, + { + "id": "1361", + "parents": [ + "1360" + ] + }, + { + "id": "1362", + "parents": [ + "1361" + ] + }, + { + "id": "1363", + "parents": [ + "1362" + ] + }, + { + "id": "1364", + "parents": [ + "1363" + ] + }, + { + "id": "1365", + "parents": [ + "1364" + ] + }, + { + "id": "1366", + "parents": [ + "1365" + ] + }, + { + "id": "1367", + "parents": [ + "1366" + ] + }, + { + "id": "1368", + "parents": [ + "1367" + ] + }, + { + "id": "1369", + "parents": [ + "1368" + ] + }, + { + "id": "1370", + "parents": [ + "1369" + ] + }, + { + "id": "1371", + "parents": [ + "1370" + ] + }, + { + "id": "1372", + "parents": [ + "1371" + ] + }, + { + "id": "1373", + "parents": [ + "1372" + ] + }, + { + "id": "1374", + "parents": [ + "1373" + ] + }, + { + "id": "1375", + "parents": [ + "1374" + ] + }, + { + "id": "1376", + "parents": [ + "1375" + ] + }, + { + "id": "1377", + "parents": [ + "1376" + ] + }, + { + "id": "1378", + "parents": [ + "1377" + ] + }, + { + "id": "1379", + "parents": [ + "1378" + ] + }, + { + "id": "1380", + "parents": [ + "1379" + ] + }, + { + "id": "1381", + "parents": [ + "1380" + ] + }, + { + "id": "1382", + "parents": [ + "1381" + ] + }, + { + "id": "1383", + "parents": [ + "1382" + ] + }, + { + "id": "1384", + "parents": [ + "1383" + ] + }, + { + "id": "1385", + "parents": [ + "1384" + ] + }, + { + "id": "1386", + "parents": [ + "1385" + ] + }, + { + "id": "1387", + "parents": [ + "1386" + ] + }, + { + "id": "1388", + "parents": [ + "1387" + ] + }, + { + "id": "1389", + "parents": [ + "1388" + ] + }, + { + "id": "1390", + "parents": [ + "1389" + ] + }, + { + "id": "1391", + "parents": [ + "1390" + ] + }, + { + "id": "1392", + "parents": [ + "1391" + ] + }, + { + "id": "1393", + "parents": [ + "1392" + ] + }, + { + "id": "1394", + "parents": [ + "1393" + ] + }, + { + "id": "1395", + "parents": [ + "1394" + ] + }, + { + "id": "1396", + "parents": [ + "1395" + ] + }, + { + "id": "1397", + "parents": [ + "1396" + ] + }, + { + "id": "1398", + "parents": [ + "1397" + ] + }, + { + "id": "1399", + "parents": [ + "1398" + ] + }, + { + "id": "1400", + "parents": [ + "1399" + ] + }, + { + "id": "1401", + "parents": [ + "1400" + ] + }, + { + "id": "1402", + "parents": [ + "1401" + ] + }, + { + "id": "1403", + "parents": [ + "1402" + ] + }, + { + "id": "1404", + "parents": [ + "1403" + ] + }, + { + "id": "1405", + "parents": [ + "1404" + ] + }, + { + "id": "1406", + "parents": [ + "1405" + ] + }, + { + "id": "1407", + "parents": [ + "1406" + ] + }, + { + "id": "1408", + "parents": [ + "1407" + ] + }, + { + "id": "1409", + "parents": [ + "1408" + ] + }, + { + "id": "1410", + "parents": [ + "1409" + ] + }, + { + "id": "1411", + "parents": [ + "1410" + ] + }, + { + "id": "1412", + "parents": [ + "1411" + ] + }, + { + "id": "1413", + "parents": [ + "1412" + ] + }, + { + "id": "1414", + "parents": [ + "1413" + ] + }, + { + "id": "1415", + "parents": [ + "1414" + ] + }, + { + "id": "1416", + "parents": [ + "1415" + ] + }, + { + "id": "1417", + "parents": [ + "1416" + ] + }, + { + "id": "1418", + "parents": [ + "1417" + ] + }, + { + "id": "1419", + "parents": [ + "1418" + ] + }, + { + "id": "1420", + "parents": [ + "1419" + ] + }, + { + "id": "1421", + "parents": [ + "1420" + ] + }, + { + "id": "1422", + "parents": [ + "1421" + ] + }, + { + "id": "1423", + "parents": [ + "1422" + ] + }, + { + "id": "1424", + "parents": [ + "1423" + ] + }, + { + "id": "1425", + "parents": [ + "1424" + ] + }, + { + "id": "1426", + "parents": [ + "1425" + ] + }, + { + "id": "1427", + "parents": [ + "1426" + ] + }, + { + "id": "1428", + "parents": [ + "1427" + ] + }, + { + "id": "1429", + "parents": [ + "1428" + ] + }, + { + "id": "1430", + "parents": [ + "1429" + ] + }, + { + "id": "1431", + "parents": [ + "1430" + ] + }, + { + "id": "1432", + "parents": [ + "1431" + ] + }, + { + "id": "1433", + "parents": [ + "1432" + ] + }, + { + "id": "1434", + "parents": [ + "1433" + ] + }, + { + "id": "1435", + "parents": [ + "1434" + ] + }, + { + "id": "1436", + "parents": [ + "1435" + ] + }, + { + "id": "1437", + "parents": [ + "1436" + ] + }, + { + "id": "1438", + "parents": [ + "1437" + ] + }, + { + "id": "1439", + "parents": [ + "1438" + ] + }, + { + "id": "1440", + "parents": [ + "1439" + ] + }, + { + "id": "1441", + "parents": [ + "1440" + ] + }, + { + "id": "1442", + "parents": [ + "1441" + ] + }, + { + "id": "1443", + "parents": [ + "1442" + ] + }, + { + "id": "1444", + "parents": [ + "1443" + ] + }, + { + "id": "1445", + "parents": [ + "1444" + ] + }, + { + "id": "1446", + "parents": [ + "1445" + ] + }, + { + "id": "1447", + "parents": [ + "1446" + ] + }, + { + "id": "1448", + "parents": [ + "1447" + ] + }, + { + "id": "1449", + "parents": [ + "1448" + ] + }, + { + "id": "1450", + "parents": [ + "1449" + ] + }, + { + "id": "1451", + "parents": [ + "1450" + ] + }, + { + "id": "1452", + "parents": [ + "1451" + ] + }, + { + "id": "1453", + "parents": [ + "1452" + ] + }, + { + "id": "1454", + "parents": [ + "1453" + ] + }, + { + "id": "1455", + "parents": [ + "1454" + ] + }, + { + "id": "1456", + "parents": [ + "1455" + ] + }, + { + "id": "1457", + "parents": [ + "1456" + ] + }, + { + "id": "1458", + "parents": [ + "1457" + ] + }, + { + "id": "1459", + "parents": [ + "1458" + ] + }, + { + "id": "1460", + "parents": [ + "1459" + ] + }, + { + "id": "1461", + "parents": [ + "1460" + ] + }, + { + "id": "1462", + "parents": [ + "1461" + ] + }, + { + "id": "1463", + "parents": [ + "1462" + ] + }, + { + "id": "1464", + "parents": [ + "1463" + ] + }, + { + "id": "1465", + "parents": [ + "1464" + ] + }, + { + "id": "1466", + "parents": [ + "1465" + ] + }, + { + "id": "1467", + "parents": [ + "1466" + ] + }, + { + "id": "1468", + "parents": [ + "1467" + ] + }, + { + "id": "1469", + "parents": [ + "1468" + ] + }, + { + "id": "1470", + "parents": [ + "1469" + ] + }, + { + "id": "1471", + "parents": [ + "1470" + ] + }, + { + "id": "1472", + "parents": [ + "1471" + ] + }, + { + "id": "1473", + "parents": [ + "1472" + ] + }, + { + "id": "1474", + "parents": [ + "1473" + ] + }, + { + "id": "1475", + "parents": [ + "1474" + ] + }, + { + "id": "1476", + "parents": [ + "1475" + ] + }, + { + "id": "1477", + "parents": [ + "1476" + ] + }, + { + "id": "1478", + "parents": [ + "1477" + ] + }, + { + "id": "1479", + "parents": [ + "1478" + ] + }, + { + "id": "1480", + "parents": [ + "1479" + ] + }, + { + "id": "1481", + "parents": [ + "1480" + ] + }, + { + "id": "1482", + "parents": [ + "1481" + ] + }, + { + "id": "1483", + "parents": [ + "1482" + ] + }, + { + "id": "1484", + "parents": [ + "1483" + ] + }, + { + "id": "1485", + "parents": [ + "1484" + ] + }, + { + "id": "1486", + "parents": [ + "1485" + ] + }, + { + "id": "1487", + "parents": [ + "1486" + ] + }, + { + "id": "1488", + "parents": [ + "1487" + ] + }, + { + "id": "1489", + "parents": [ + "1488" + ] + }, + { + "id": "1490", + "parents": [ + "1489" + ] + }, + { + "id": "1491", + "parents": [ + "1490" + ] + }, + { + "id": "1492", + "parents": [ + "1491" + ] + }, + { + "id": "1493", + "parents": [ + "1492" + ] + }, + { + "id": "1494", + "parents": [ + "1493" + ] + }, + { + "id": "1495", + "parents": [ + "1494" + ] + }, + { + "id": "1496", + "parents": [ + "1495" + ] + }, + { + "id": "1497", + "parents": [ + "1496" + ] + }, + { + "id": "1498", + "parents": [ + "1497" + ] + }, + { + "id": "1499", + "parents": [ + "1498" + ] + }, + { + "id": "1500", + "parents": [ + "1499" + ] + }, + { + "id": "1501", + "parents": [ + "1500" + ] + }, + { + "id": "1502", + "parents": [ + "1501" + ] + }, + { + "id": "1503", + "parents": [ + "1502" + ] + }, + { + "id": "1504", + "parents": [ + "1503" + ] + }, + { + "id": "1505", + "parents": [ + "1504" + ] + }, + { + "id": "1506", + "parents": [ + "1505" + ] + }, + { + "id": "1507", + "parents": [ + "1506" + ] + }, + { + "id": "1508", + "parents": [ + "1507" + ] + }, + { + "id": "1509", + "parents": [ + "1508" + ] + }, + { + "id": "1510", + "parents": [ + "1509" + ] + }, + { + "id": "1511", + "parents": [ + "1510" + ] + }, + { + "id": "1512", + "parents": [ + "1511" + ] + }, + { + "id": "1513", + "parents": [ + "1512" + ] + }, + { + "id": "1514", + "parents": [ + "1513" + ] + }, + { + "id": "1515", + "parents": [ + "1514" + ] + }, + { + "id": "1516", + "parents": [ + "1515" + ] + }, + { + "id": "1517", + "parents": [ + "1516" + ] + }, + { + "id": "1518", + "parents": [ + "1517" + ] + }, + { + "id": "1519", + "parents": [ + "1518" + ] + }, + { + "id": "1520", + "parents": [ + "1519" + ] + }, + { + "id": "1521", + "parents": [ + "1520" + ] + }, + { + "id": "1522", + "parents": [ + "1521" + ] + }, + { + "id": "1523", + "parents": [ + "1522" + ] + }, + { + "id": "1524", + "parents": [ + "1523" + ] + }, + { + "id": "1525", + "parents": [ + "1524" + ] + }, + { + "id": "1526", + "parents": [ + "1525" + ] + }, + { + "id": "1527", + "parents": [ + "1526" + ] + }, + { + "id": "1528", + "parents": [ + "1527" + ] + }, + { + "id": "1529", + "parents": [ + "1528" + ] + }, + { + "id": "1530", + "parents": [ + "1529" + ] + }, + { + "id": "1531", + "parents": [ + "1530" + ] + }, + { + "id": "1532", + "parents": [ + "1531" + ] + }, + { + "id": "1533", + "parents": [ + "1532" + ] + }, + { + "id": "1534", + "parents": [ + "1533" + ] + }, + { + "id": "1535", + "parents": [ + "1534" + ] + }, + { + "id": "1536", + "parents": [ + "1535" + ] + }, + { + "id": "1537", + "parents": [ + "1536" + ] + }, + { + "id": "1538", + "parents": [ + "1537" + ] + }, + { + "id": "1539", + "parents": [ + "1538" + ] + }, + { + "id": "1540", + "parents": [ + "1539" + ] + }, + { + "id": "1541", + "parents": [ + "1540" + ] + }, + { + "id": "1542", + "parents": [ + "1541" + ] + }, + { + "id": "1543", + "parents": [ + "1542" + ] + }, + { + "id": "1544", + "parents": [ + "1543" + ] + }, + { + "id": "1545", + "parents": [ + "1544" + ] + }, + { + "id": "1546", + "parents": [ + "1545" + ] + }, + { + "id": "1547", + "parents": [ + "1546" + ] + }, + { + "id": "1548", + "parents": [ + "1547" + ] + }, + { + "id": "1549", + "parents": [ + "1548" + ] + }, + { + "id": "1550", + "parents": [ + "1549" + ] + }, + { + "id": "1551", + "parents": [ + "1550" + ] + }, + { + "id": "1552", + "parents": [ + "1551" + ] + }, + { + "id": "1553", + "parents": [ + "1552" + ] + }, + { + "id": "1554", + "parents": [ + "1553" + ] + }, + { + "id": "1555", + "parents": [ + "1554" + ] + }, + { + "id": "1556", + "parents": [ + "1555" + ] + }, + { + "id": "1557", + "parents": [ + "1556" + ] + }, + { + "id": "1558", + "parents": [ + "1557" + ] + }, + { + "id": "1559", + "parents": [ + "1558" + ] + }, + { + "id": "1560", + "parents": [ + "1559" + ] + }, + { + "id": "1561", + "parents": [ + "1560" + ] + }, + { + "id": "1562", + "parents": [ + "1561" + ] + }, + { + "id": "1563", + "parents": [ + "1562" + ] + }, + { + "id": "1564", + "parents": [ + "1563" + ] + }, + { + "id": "1565", + "parents": [ + "1564" + ] + }, + { + "id": "1566", + "parents": [ + "1565" + ] + }, + { + "id": "1567", + "parents": [ + "1566" + ] + }, + { + "id": "1568", + "parents": [ + "1567" + ] + }, + { + "id": "1569", + "parents": [ + "1568" + ] + }, + { + "id": "1570", + "parents": [ + "1569" + ] + }, + { + "id": "1571", + "parents": [ + "1570" + ] + }, + { + "id": "1572", + "parents": [ + "1571" + ] + }, + { + "id": "1573", + "parents": [ + "1572" + ] + }, + { + "id": "1574", + "parents": [ + "1573" + ] + }, + { + "id": "1575", + "parents": [ + "1574" + ] + }, + { + "id": "1576", + "parents": [ + "1575" + ] + }, + { + "id": "1577", + "parents": [ + "1576" + ] + }, + { + "id": "1578", + "parents": [ + "1577" + ] + }, + { + "id": "1579", + "parents": [ + "1578" + ] + }, + { + "id": "1580", + "parents": [ + "1579" + ] + }, + { + "id": "1581", + "parents": [ + "1580" + ] + }, + { + "id": "1582", + "parents": [ + "1581" + ] + }, + { + "id": "1583", + "parents": [ + "1582" + ] + }, + { + "id": "1584", + "parents": [ + "1583" + ] + }, + { + "id": "1585", + "parents": [ + "1584" + ] + }, + { + "id": "1586", + "parents": [ + "1585" + ] + }, + { + "id": "1587", + "parents": [ + "1586" + ] + }, + { + "id": "1588", + "parents": [ + "1587" + ] + }, + { + "id": "1589", + "parents": [ + "1588" + ] + }, + { + "id": "1590", + "parents": [ + "1589" + ] + }, + { + "id": "1591", + "parents": [ + "1590" + ] + }, + { + "id": "1592", + "parents": [ + "1591" + ] + }, + { + "id": "1593", + "parents": [ + "1592" + ] + }, + { + "id": "1594", + "parents": [ + "1593" + ] + }, + { + "id": "1595", + "parents": [ + "1594" + ] + }, + { + "id": "1596", + "parents": [ + "1595" + ] + }, + { + "id": "1597", + "parents": [ + "1596" + ] + }, + { + "id": "1598", + "parents": [ + "1597" + ] + }, + { + "id": "1599", + "parents": [ + "1598" + ] + }, + { + "id": "1600", + "parents": [ + "1599" + ] + }, + { + "id": "1601", + "parents": [ + "1600" + ] + }, + { + "id": "1602", + "parents": [ + "1601" + ] + }, + { + "id": "1603", + "parents": [ + "1602" + ] + }, + { + "id": "1604", + "parents": [ + "1603" + ] + }, + { + "id": "1605", + "parents": [ + "1604" + ] + }, + { + "id": "1606", + "parents": [ + "1605" + ] + }, + { + "id": "1607", + "parents": [ + "1606" + ] + }, + { + "id": "1608", + "parents": [ + "1607" + ] + }, + { + "id": "1609", + "parents": [ + "1608" + ] + }, + { + "id": "1610", + "parents": [ + "1609" + ] + }, + { + "id": "1611", + "parents": [ + "1610" + ] + }, + { + "id": "1612", + "parents": [ + "1611" + ] + }, + { + "id": "1613", + "parents": [ + "1612" + ] + }, + { + "id": "1614", + "parents": [ + "1613" + ] + }, + { + "id": "1615", + "parents": [ + "1614" + ] + }, + { + "id": "1616", + "parents": [ + "1615" + ] + }, + { + "id": "1617", + "parents": [ + "1616" + ] + }, + { + "id": "1618", + "parents": [ + "1617" + ] + }, + { + "id": "1619", + "parents": [ + "1618" + ] + }, + { + "id": "1620", + "parents": [ + "1619" + ] + }, + { + "id": "1621", + "parents": [ + "1620" + ] + }, + { + "id": "1622", + "parents": [ + "1621" + ] + }, + { + "id": "1623", + "parents": [ + "1622" + ] + }, + { + "id": "1624", + "parents": [ + "1623" + ] + }, + { + "id": "1625", + "parents": [ + "1624" + ] + }, + { + "id": "1626", + "parents": [ + "1625" + ] + }, + { + "id": "1627", + "parents": [ + "1626" + ] + }, + { + "id": "1628", + "parents": [ + "1627" + ] + }, + { + "id": "1629", + "parents": [ + "1628" + ] + }, + { + "id": "1630", + "parents": [ + "1629" + ] + }, + { + "id": "1631", + "parents": [ + "1630" + ] + }, + { + "id": "1632", + "parents": [ + "1631" + ] + }, + { + "id": "1633", + "parents": [ + "1632" + ] + }, + { + "id": "1634", + "parents": [ + "1633" + ] + }, + { + "id": "1635", + "parents": [ + "1634" + ] + }, + { + "id": "1636", + "parents": [ + "1635" + ] + }, + { + "id": "1637", + "parents": [ + "1636" + ] + }, + { + "id": "1638", + "parents": [ + "1637" + ] + }, + { + "id": "1639", + "parents": [ + "1638" + ] + }, + { + "id": "1640", + "parents": [ + "1639" + ] + }, + { + "id": "1641", + "parents": [ + "1640" + ] + }, + { + "id": "1642", + "parents": [ + "1641" + ] + }, + { + "id": "1643", + "parents": [ + "1642" + ] + }, + { + "id": "1644", + "parents": [ + "1643" + ] + }, + { + "id": "1645", + "parents": [ + "1644" + ] + }, + { + "id": "1646", + "parents": [ + "1645" + ] + }, + { + "id": "1647", + "parents": [ + "1646" + ] + }, + { + "id": "1648", + "parents": [ + "1647" + ] + }, + { + "id": "1649", + "parents": [ + "1648" + ] + }, + { + "id": "1650", + "parents": [ + "1649" + ] + }, + { + "id": "1651", + "parents": [ + "1650" + ] + }, + { + "id": "1652", + "parents": [ + "1651" + ] + }, + { + "id": "1653", + "parents": [ + "1652" + ] + }, + { + "id": "1654", + "parents": [ + "1653" + ] + }, + { + "id": "1655", + "parents": [ + "1654" + ] + }, + { + "id": "1656", + "parents": [ + "1655" + ] + }, + { + "id": "1657", + "parents": [ + "1656" + ] + }, + { + "id": "1658", + "parents": [ + "1657" + ] + }, + { + "id": "1659", + "parents": [ + "1658" + ] + }, + { + "id": "1660", + "parents": [ + "1659" + ] + }, + { + "id": "1661", + "parents": [ + "1660" + ] + }, + { + "id": "1662", + "parents": [ + "1661" + ] + }, + { + "id": "1663", + "parents": [ + "1662" + ] + }, + { + "id": "1664", + "parents": [ + "1663" + ] + }, + { + "id": "1665", + "parents": [ + "1664" + ] + }, + { + "id": "1666", + "parents": [ + "1665" + ] + }, + { + "id": "1667", + "parents": [ + "1666" + ] + }, + { + "id": "1668", + "parents": [ + "1667" + ] + }, + { + "id": "1669", + "parents": [ + "1668" + ] + }, + { + "id": "1670", + "parents": [ + "1669" + ] + }, + { + "id": "1671", + "parents": [ + "1670" + ] + }, + { + "id": "1672", + "parents": [ + "1671" + ] + }, + { + "id": "1673", + "parents": [ + "1672" + ] + }, + { + "id": "1674", + "parents": [ + "1673" + ] + }, + { + "id": "1675", + "parents": [ + "1674" + ] + }, + { + "id": "1676", + "parents": [ + "1675" + ] + }, + { + "id": "1677", + "parents": [ + "1676" + ] + }, + { + "id": "1678", + "parents": [ + "1677" + ] + }, + { + "id": "1679", + "parents": [ + "1678" + ] + }, + { + "id": "1680", + "parents": [ + "1679" + ] + }, + { + "id": "1681", + "parents": [ + "1680" + ] + }, + { + "id": "1682", + "parents": [ + "1681" + ] + }, + { + "id": "1683", + "parents": [ + "1682" + ] + }, + { + "id": "1684", + "parents": [ + "1683" + ] + }, + { + "id": "1685", + "parents": [ + "1684" + ] + }, + { + "id": "1686", + "parents": [ + "1685" + ] + }, + { + "id": "1687", + "parents": [ + "1686" + ] + }, + { + "id": "1688", + "parents": [ + "1687" + ] + }, + { + "id": "1689", + "parents": [ + "1688" + ] + }, + { + "id": "1690", + "parents": [ + "1689" + ] + }, + { + "id": "1691", + "parents": [ + "1690" + ] + }, + { + "id": "1692", + "parents": [ + "1691" + ] + }, + { + "id": "1693", + "parents": [ + "1692" + ] + }, + { + "id": "1694", + "parents": [ + "1693" + ] + }, + { + "id": "1695", + "parents": [ + "1694" + ] + }, + { + "id": "1696", + "parents": [ + "1695" + ] + }, + { + "id": "1697", + "parents": [ + "1696" + ] + }, + { + "id": "1698", + "parents": [ + "1697" + ] + }, + { + "id": "1699", + "parents": [ + "1698" + ] + }, + { + "id": "1700", + "parents": [ + "1699" + ] + }, + { + "id": "1701", + "parents": [ + "1700" + ] + }, + { + "id": "1702", + "parents": [ + "1701" + ] + }, + { + "id": "1703", + "parents": [ + "1702" + ] + }, + { + "id": "1704", + "parents": [ + "1703" + ] + }, + { + "id": "1705", + "parents": [ + "1704" + ] + }, + { + "id": "1706", + "parents": [ + "1705" + ] + }, + { + "id": "1707", + "parents": [ + "1706" + ] + }, + { + "id": "1708", + "parents": [ + "1707" + ] + }, + { + "id": "1709", + "parents": [ + "1708" + ] + }, + { + "id": "1710", + "parents": [ + "1709" + ] + }, + { + "id": "1711", + "parents": [ + "1710" + ] + }, + { + "id": "1712", + "parents": [ + "1711" + ] + }, + { + "id": "1713", + "parents": [ + "1712" + ] + }, + { + "id": "1714", + "parents": [ + "1713" + ] + }, + { + "id": "1715", + "parents": [ + "1714" + ] + }, + { + "id": "1716", + "parents": [ + "1715" + ] + }, + { + "id": "1717", + "parents": [ + "1716" + ] + }, + { + "id": "1718", + "parents": [ + "1717" + ] + }, + { + "id": "1719", + "parents": [ + "1718" + ] + }, + { + "id": "1720", + "parents": [ + "1719" + ] + }, + { + "id": "1721", + "parents": [ + "1720" + ] + }, + { + "id": "1722", + "parents": [ + "1721" + ] + }, + { + "id": "1723", + "parents": [ + "1722" + ] + }, + { + "id": "1724", + "parents": [ + "1723" + ] + }, + { + "id": "1725", + "parents": [ + "1724" + ] + }, + { + "id": "1726", + "parents": [ + "1725" + ] + }, + { + "id": "1727", + "parents": [ + "1726" + ] + }, + { + "id": "1728", + "parents": [ + "1727" + ] + }, + { + "id": "1729", + "parents": [ + "1728" + ] + }, + { + "id": "1730", + "parents": [ + "1729" + ] + }, + { + "id": "1731", + "parents": [ + "1730" + ] + }, + { + "id": "1732", + "parents": [ + "1731" + ] + }, + { + "id": "1733", + "parents": [ + "1732" + ] + }, + { + "id": "1734", + "parents": [ + "1733" + ] + }, + { + "id": "1735", + "parents": [ + "1734" + ] + }, + { + "id": "1736", + "parents": [ + "1735" + ] + }, + { + "id": "1737", + "parents": [ + "1736" + ] + }, + { + "id": "1738", + "parents": [ + "1737" + ] + }, + { + "id": "1739", + "parents": [ + "1738" + ] + }, + { + "id": "1740", + "parents": [ + "1739" + ] + }, + { + "id": "1741", + "parents": [ + "1740" + ] + }, + { + "id": "1742", + "parents": [ + "1741" + ] + }, + { + "id": "1743", + "parents": [ + "1742" + ] + }, + { + "id": "1744", + "parents": [ + "1743" + ] + }, + { + "id": "1745", + "parents": [ + "1744" + ] + }, + { + "id": "1746", + "parents": [ + "1745" + ] + }, + { + "id": "1747", + "parents": [ + "1746" + ] + }, + { + "id": "1748", + "parents": [ + "1747" + ] + }, + { + "id": "1749", + "parents": [ + "1748" + ] + }, + { + "id": "1750", + "parents": [ + "1749" + ] + }, + { + "id": "1751", + "parents": [ + "1750" + ] + }, + { + "id": "1752", + "parents": [ + "1751" + ] + }, + { + "id": "1753", + "parents": [ + "1752" + ] + }, + { + "id": "1754", + "parents": [ + "1753" + ] + }, + { + "id": "1755", + "parents": [ + "1754" + ] + }, + { + "id": "1756", + "parents": [ + "1755" + ] + }, + { + "id": "1757", + "parents": [ + "1756" + ] + }, + { + "id": "1758", + "parents": [ + "1757" + ] + }, + { + "id": "1759", + "parents": [ + "1758" + ] + }, + { + "id": "1760", + "parents": [ + "1759" + ] + }, + { + "id": "1761", + "parents": [ + "1760" + ] + }, + { + "id": "1762", + "parents": [ + "1761" + ] + }, + { + "id": "1763", + "parents": [ + "1762" + ] + }, + { + "id": "1764", + "parents": [ + "1763" + ] + }, + { + "id": "1765", + "parents": [ + "1764" + ] + }, + { + "id": "1766", + "parents": [ + "1765" + ] + }, + { + "id": "1767", + "parents": [ + "1766" + ] + }, + { + "id": "1768", + "parents": [ + "1767" + ] + }, + { + "id": "1769", + "parents": [ + "1768" + ] + }, + { + "id": "1770", + "parents": [ + "1769" + ] + }, + { + "id": "1771", + "parents": [ + "1770" + ] + }, + { + "id": "1772", + "parents": [ + "1771" + ] + }, + { + "id": "1773", + "parents": [ + "1772" + ] + }, + { + "id": "1774", + "parents": [ + "1773" + ] + }, + { + "id": "1775", + "parents": [ + "1774" + ] + }, + { + "id": "1776", + "parents": [ + "1775" + ] + }, + { + "id": "1777", + "parents": [ + "1776" + ] + }, + { + "id": "1778", + "parents": [ + "1777" + ] + }, + { + "id": "1779", + "parents": [ + "1778" + ] + }, + { + "id": "1780", + "parents": [ + "1779" + ] + }, + { + "id": "1781", + "parents": [ + "1780" + ] + }, + { + "id": "1782", + "parents": [ + "1781" + ] + }, + { + "id": "1783", + "parents": [ + "1782" + ] + }, + { + "id": "1784", + "parents": [ + "1783" + ] + }, + { + "id": "1785", + "parents": [ + "1784" + ] + }, + { + "id": "1786", + "parents": [ + "1785" + ] + }, + { + "id": "1787", + "parents": [ + "1786" + ] + }, + { + "id": "1788", + "parents": [ + "1787" + ] + }, + { + "id": "1789", + "parents": [ + "1788" + ] + }, + { + "id": "1790", + "parents": [ + "1789" + ] + }, + { + "id": "1791", + "parents": [ + "1790" + ] + }, + { + "id": "1792", + "parents": [ + "1791" + ] + }, + { + "id": "1793", + "parents": [ + "1792" + ] + }, + { + "id": "1794", + "parents": [ + "1793" + ] + }, + { + "id": "1795", + "parents": [ + "1794" + ] + }, + { + "id": "1796", + "parents": [ + "1795" + ] + }, + { + "id": "1797", + "parents": [ + "1796" + ] + }, + { + "id": "1798", + "parents": [ + "1797" + ] + }, + { + "id": "1799", + "parents": [ + "1798" + ] + }, + { + "id": "1800", + "parents": [ + "1799" + ] + }, + { + "id": "1801", + "parents": [ + "1800" + ] + }, + { + "id": "1802", + "parents": [ + "1801" + ] + }, + { + "id": "1803", + "parents": [ + "1802" + ] + }, + { + "id": "1804", + "parents": [ + "1803" + ] + }, + { + "id": "1805", + "parents": [ + "1804" + ] + }, + { + "id": "1806", + "parents": [ + "1805" + ] + }, + { + "id": "1807", + "parents": [ + "1806" + ] + }, + { + "id": "1808", + "parents": [ + "1807" + ] + }, + { + "id": "1809", + "parents": [ + "1808" + ] + }, + { + "id": "1810", + "parents": [ + "1809" + ] + }, + { + "id": "1811", + "parents": [ + "1810" + ] + }, + { + "id": "1812", + "parents": [ + "1811" + ] + }, + { + "id": "1813", + "parents": [ + "1812" + ] + }, + { + "id": "1814", + "parents": [ + "1813" + ] + }, + { + "id": "1815", + "parents": [ + "1814" + ] + }, + { + "id": "1816", + "parents": [ + "1815" + ] + }, + { + "id": "1817", + "parents": [ + "1816" + ] + }, + { + "id": "1818", + "parents": [ + "1817" + ] + }, + { + "id": "1819", + "parents": [ + "1818" + ] + }, + { + "id": "1820", + "parents": [ + "1819" + ] + }, + { + "id": "1821", + "parents": [ + "1820" + ] + }, + { + "id": "1822", + "parents": [ + "1821" + ] + }, + { + "id": "1823", + "parents": [ + "1822" + ] + }, + { + "id": "1824", + "parents": [ + "1823" + ] + }, + { + "id": "1825", + "parents": [ + "1824" + ] + }, + { + "id": "1826", + "parents": [ + "1825" + ] + }, + { + "id": "1827", + "parents": [ + "1826" + ] + }, + { + "id": "1828", + "parents": [ + "1827" + ] + }, + { + "id": "1829", + "parents": [ + "1828" + ] + }, + { + "id": "1830", + "parents": [ + "1829" + ] + }, + { + "id": "1831", + "parents": [ + "1830" + ] + }, + { + "id": "1832", + "parents": [ + "1831" + ] + }, + { + "id": "1833", + "parents": [ + "1832" + ] + }, + { + "id": "1834", + "parents": [ + "1833" + ] + }, + { + "id": "1835", + "parents": [ + "1834" + ] + }, + { + "id": "1836", + "parents": [ + "1835" + ] + }, + { + "id": "1837", + "parents": [ + "1836" + ] + }, + { + "id": "1838", + "parents": [ + "1837" + ] + }, + { + "id": "1839", + "parents": [ + "1838" + ] + }, + { + "id": "1840", + "parents": [ + "1839" + ] + }, + { + "id": "1841", + "parents": [ + "1840" + ] + }, + { + "id": "1842", + "parents": [ + "1841" + ] + }, + { + "id": "1843", + "parents": [ + "1842" + ] + }, + { + "id": "1844", + "parents": [ + "1843" + ] + }, + { + "id": "1845", + "parents": [ + "1844" + ] + }, + { + "id": "1846", + "parents": [ + "1845" + ] + }, + { + "id": "1847", + "parents": [ + "1846" + ] + }, + { + "id": "1848", + "parents": [ + "1847" + ] + }, + { + "id": "1849", + "parents": [ + "1848" + ] + }, + { + "id": "1850", + "parents": [ + "1849" + ] + }, + { + "id": "1851", + "parents": [ + "1850" + ] + }, + { + "id": "1852", + "parents": [ + "1851" + ] + }, + { + "id": "1853", + "parents": [ + "1852" + ] + }, + { + "id": "1854", + "parents": [ + "1853" + ] + }, + { + "id": "1855", + "parents": [ + "1854" + ] + }, + { + "id": "1856", + "parents": [ + "1855" + ] + }, + { + "id": "1857", + "parents": [ + "1856" + ] + }, + { + "id": "1858", + "parents": [ + "1857" + ] + }, + { + "id": "1859", + "parents": [ + "1858" + ] + }, + { + "id": "1860", + "parents": [ + "1859" + ] + }, + { + "id": "1861", + "parents": [ + "1860" + ] + }, + { + "id": "1862", + "parents": [ + "1861" + ] + }, + { + "id": "1863", + "parents": [ + "1862" + ] + }, + { + "id": "1864", + "parents": [ + "1863" + ] + }, + { + "id": "1865", + "parents": [ + "1864" + ] + }, + { + "id": "1866", + "parents": [ + "1865" + ] + }, + { + "id": "1867", + "parents": [ + "1866" + ] + }, + { + "id": "1868", + "parents": [ + "1867" + ] + }, + { + "id": "1869", + "parents": [ + "1868" + ] + }, + { + "id": "1870", + "parents": [ + "1869" + ] + }, + { + "id": "1871", + "parents": [ + "1870" + ] + }, + { + "id": "1872", + "parents": [ + "1871" + ] + }, + { + "id": "1873", + "parents": [ + "1872" + ] + }, + { + "id": "1874", + "parents": [ + "1873" + ] + }, + { + "id": "1875", + "parents": [ + "1874" + ] + }, + { + "id": "1876", + "parents": [ + "1875" + ] + }, + { + "id": "1877", + "parents": [ + "1876" + ] + }, + { + "id": "1878", + "parents": [ + "1877" + ] + }, + { + "id": "1879", + "parents": [ + "1878" + ] + }, + { + "id": "1880", + "parents": [ + "1879" + ] + }, + { + "id": "1881", + "parents": [ + "1880" + ] + }, + { + "id": "1882", + "parents": [ + "1881" + ] + }, + { + "id": "1883", + "parents": [ + "1882" + ] + }, + { + "id": "1884", + "parents": [ + "1883" + ] + }, + { + "id": "1885", + "parents": [ + "1884" + ] + }, + { + "id": "1886", + "parents": [ + "1885" + ] + }, + { + "id": "1887", + "parents": [ + "1886" + ] + }, + { + "id": "1888", + "parents": [ + "1887" + ] + }, + { + "id": "1889", + "parents": [ + "1888" + ] + }, + { + "id": "1890", + "parents": [ + "1889" + ] + }, + { + "id": "1891", + "parents": [ + "1890" + ] + }, + { + "id": "1892", + "parents": [ + "1891" + ] + }, + { + "id": "1893", + "parents": [ + "1892" + ] + }, + { + "id": "1894", + "parents": [ + "1893" + ] + }, + { + "id": "1895", + "parents": [ + "1894" + ] + }, + { + "id": "1896", + "parents": [ + "1895" + ] + }, + { + "id": "1897", + "parents": [ + "1896" + ] + }, + { + "id": "1898", + "parents": [ + "1897" + ] + }, + { + "id": "1899", + "parents": [ + "1898" + ] + }, + { + "id": "1900", + "parents": [ + "1899" + ] + }, + { + "id": "1901", + "parents": [ + "1900" + ] + }, + { + "id": "1902", + "parents": [ + "1901" + ] + }, + { + "id": "1903", + "parents": [ + "1902" + ] + }, + { + "id": "1904", + "parents": [ + "1903" + ] + }, + { + "id": "1905", + "parents": [ + "1904" + ] + }, + { + "id": "1906", + "parents": [ + "1905" + ] + }, + { + "id": "1907", + "parents": [ + "1906" + ] + }, + { + "id": "1908", + "parents": [ + "1907" + ] + }, + { + "id": "1909", + "parents": [ + "1908" + ] + }, + { + "id": "1910", + "parents": [ + "1909" + ] + }, + { + "id": "1911", + "parents": [ + "1910" + ] + }, + { + "id": "1912", + "parents": [ + "1911" + ] + }, + { + "id": "1913", + "parents": [ + "1912" + ] + }, + { + "id": "1914", + "parents": [ + "1913" + ] + }, + { + "id": "1915", + "parents": [ + "1914" + ] + }, + { + "id": "1916", + "parents": [ + "1915" + ] + }, + { + "id": "1917", + "parents": [ + "1916" + ] + }, + { + "id": "1918", + "parents": [ + "1917" + ] + }, + { + "id": "1919", + "parents": [ + "1918" + ] + }, + { + "id": "1920", + "parents": [ + "1919" + ] + }, + { + "id": "1921", + "parents": [ + "1920" + ] + }, + { + "id": "1922", + "parents": [ + "1921" + ] + }, + { + "id": "1923", + "parents": [ + "1922" + ] + }, + { + "id": "1924", + "parents": [ + "1923" + ] + }, + { + "id": "1925", + "parents": [ + "1924" + ] + }, + { + "id": "1926", + "parents": [ + "1925" + ] + }, + { + "id": "1927", + "parents": [ + "1926" + ] + }, + { + "id": "1928", + "parents": [ + "1927" + ] + }, + { + "id": "1929", + "parents": [ + "1928" + ] + }, + { + "id": "1930", + "parents": [ + "1929" + ] + }, + { + "id": "1931", + "parents": [ + "1930" + ] + }, + { + "id": "1932", + "parents": [ + "1931" + ] + }, + { + "id": "1933", + "parents": [ + "1932" + ] + }, + { + "id": "1934", + "parents": [ + "1933" + ] + }, + { + "id": "1935", + "parents": [ + "1934" + ] + }, + { + "id": "1936", + "parents": [ + "1935" + ] + }, + { + "id": "1937", + "parents": [ + "1936" + ] + }, + { + "id": "1938", + "parents": [ + "1937" + ] + }, + { + "id": "1939", + "parents": [ + "1938" + ] + }, + { + "id": "1940", + "parents": [ + "1939" + ] + }, + { + "id": "1941", + "parents": [ + "1940" + ] + }, + { + "id": "1942", + "parents": [ + "1941" + ] + }, + { + "id": "1943", + "parents": [ + "1942" + ] + }, + { + "id": "1944", + "parents": [ + "1943" + ] + }, + { + "id": "1945", + "parents": [ + "1944" + ] + }, + { + "id": "1946", + "parents": [ + "1945" + ] + }, + { + "id": "1947", + "parents": [ + "1946" + ] + }, + { + "id": "1948", + "parents": [ + "1947" + ] + }, + { + "id": "1949", + "parents": [ + "1948" + ] + }, + { + "id": "1950", + "parents": [ + "1949" + ] + }, + { + "id": "1951", + "parents": [ + "1950" + ] + }, + { + "id": "1952", + "parents": [ + "1951" + ] + }, + { + "id": "1953", + "parents": [ + "1952" + ] + }, + { + "id": "1954", + "parents": [ + "1953" + ] + }, + { + "id": "1955", + "parents": [ + "1954" + ] + }, + { + "id": "1956", + "parents": [ + "1955" + ] + }, + { + "id": "1957", + "parents": [ + "1956" + ] + }, + { + "id": "1958", + "parents": [ + "1957" + ] + }, + { + "id": "1959", + "parents": [ + "1958" + ] + }, + { + "id": "1960", + "parents": [ + "1959" + ] + }, + { + "id": "1961", + "parents": [ + "1960" + ] + }, + { + "id": "1962", + "parents": [ + "1961" + ] + }, + { + "id": "1963", + "parents": [ + "1962" + ] + }, + { + "id": "1964", + "parents": [ + "1963" + ] + }, + { + "id": "1965", + "parents": [ + "1964" + ] + }, + { + "id": "1966", + "parents": [ + "1965" + ] + }, + { + "id": "1967", + "parents": [ + "1966" + ] + }, + { + "id": "1968", + "parents": [ + "1967" + ] + }, + { + "id": "1969", + "parents": [ + "1968" + ] + }, + { + "id": "1970", + "parents": [ + "1969" + ] + }, + { + "id": "1971", + "parents": [ + "1970" + ] + }, + { + "id": "1972", + "parents": [ + "1971" + ] + }, + { + "id": "1973", + "parents": [ + "1972" + ] + }, + { + "id": "1974", + "parents": [ + "1973" + ] + }, + { + "id": "1975", + "parents": [ + "1974" + ] + }, + { + "id": "1976", + "parents": [ + "1975" + ] + }, + { + "id": "1977", + "parents": [ + "1976" + ] + }, + { + "id": "1978", + "parents": [ + "1977" + ] + }, + { + "id": "1979", + "parents": [ + "1978" + ] + }, + { + "id": "1980", + "parents": [ + "1979" + ] + }, + { + "id": "1981", + "parents": [ + "1980" + ] + }, + { + "id": "1982", + "parents": [ + "1981" + ] + }, + { + "id": "1983", + "parents": [ + "1982" + ] + }, + { + "id": "1984", + "parents": [ + "1983" + ] + }, + { + "id": "1985", + "parents": [ + "1984" + ] + }, + { + "id": "1986", + "parents": [ + "1985" + ] + }, + { + "id": "1987", + "parents": [ + "1986" + ] + }, + { + "id": "1988", + "parents": [ + "1987" + ] + }, + { + "id": "1989", + "parents": [ + "1988" + ] + }, + { + "id": "1990", + "parents": [ + "1989" + ] + }, + { + "id": "1991", + "parents": [ + "1990" + ] + }, + { + "id": "1992", + "parents": [ + "1991" + ] + }, + { + "id": "1993", + "parents": [ + "1992" + ] + }, + { + "id": "1994", + "parents": [ + "1993" + ] + }, + { + "id": "1995", + "parents": [ + "1994" + ] + }, + { + "id": "1996", + "parents": [ + "1995" + ] + }, + { + "id": "1997", + "parents": [ + "1996" + ] + }, + { + "id": "1998", + "parents": [ + "1997" + ] + }, + { + "id": "1999", + "parents": [ + "1998" + ] + } + ] +} \ No newline at end of file diff --git a/domain/consensus/processes/pruningmanager/testdata/dag-for-test-pruning.json b/domain/consensus/processes/pruningmanager/testdata/dag-for-test-pruning.json new file mode 100644 index 0000000..8d0d5c8 --- /dev/null +++ b/domain/consensus/processes/pruningmanager/testdata/dag-for-test-pruning.json @@ -0,0 +1,14196 @@ +{ + "mergeSetSizeLimit": 20, + "finalityDepth": 20, + "blocks": [ + { + "id": "0", + "parents": [] + }, + { + "id": "1", + "parents": [ + "0" + ] + }, + { + "id": "2", + "parents": [ + "0" + ] + }, + { + "id": "3", + "parents": [ + "0" + ] + }, + { + "id": "4", + "parents": [ + "2", + "1", + "3" + ] + }, + { + "id": "5", + "parents": [ + "4" + ] + }, + { + "id": "6", + "parents": [ + "5" + ] + }, + { + "id": "7", + "parents": [ + "4" + ] + }, + { + "id": "8", + "parents": [ + "5", + "7" + ] + }, + { + "id": "9", + "parents": [ + "5" + ] + }, + { + "id": "10", + "parents": [ + "7" + ] + }, + { + "id": "11", + "parents": [ + "5", + "10" + ] + }, + { + "id": "12", + "parents": [ + "6", + "11", + "9", + "8" + ] + }, + { + "id": "13", + "parents": [ + "11", + "8", + "6", + "9" + ] + }, + { + "id": "14", + "parents": [ + "13" + ] + }, + { + "id": "15", + "parents": [ + "6", + "8", + "11", + "9" + ] + }, + { + "id": "16", + "parents": [ + "13", + "12" + ] + }, + { + "id": "17", + "parents": [ + "14", + "12" + ] + }, + { + "id": "18", + "parents": [ + "12", + "13" + ] + }, + { + "id": "19", + "parents": [ + "16", + "17", + "15" + ] + }, + { + "id": "20", + "parents": [ + "18", + "19" + ] + }, + { + "id": "21", + "parents": [ + "14", + "15", + "16" + ] + }, + { + "id": "22", + "parents": [ + "18", + "17", + "21" + ] + }, + { + "id": "23", + "parents": [ + "22", + "20" + ] + }, + { + "id": "24", + "parents": [ + "23" + ] + }, + { + "id": "25", + "parents": [ + "23" + ] + }, + { + "id": "26", + "parents": [ + "25" + ] + }, + { + "id": "27", + "parents": [ + "24", + "25" + ] + }, + { + "id": "28", + "parents": [ + "26" + ] + }, + { + "id": "29", + "parents": [ + "25", + "24" + ] + }, + { + "id": "30", + "parents": [ + "24", + "28" + ] + }, + { + "id": "31", + "parents": [ + "30" + ] + }, + { + "id": "32", + "parents": [ + "31", + "29", + "27" + ] + }, + { + "id": "33", + "parents": [ + "32" + ] + }, + { + "id": "34", + "parents": [ + "32" + ] + }, + { + "id": "35", + "parents": [ + "34", + "33" + ] + }, + { + "id": "36", + "parents": [ + "34", + "33" + ] + }, + { + "id": "37", + "parents": [ + "35" + ] + }, + { + "id": "38", + "parents": [ + "36" + ] + }, + { + "id": "39", + "parents": [ + "36", + "35" + ] + }, + { + "id": "40", + "parents": [ + "39" + ] + }, + { + "id": "41", + "parents": [ + "37", + "38" + ] + }, + { + "id": "42", + "parents": [ + "41" + ] + }, + { + "id": "43", + "parents": [ + "38" + ] + }, + { + "id": "44", + "parents": [ + "43", + "39", + "41" + ] + }, + { + "id": "45", + "parents": [ + "41", + "40", + "43" + ] + }, + { + "id": "46", + "parents": [ + "42", + "44", + "45" + ] + }, + { + "id": "47", + "parents": [ + "42", + "44", + "45" + ] + }, + { + "id": "48", + "parents": [ + "46", + "47" + ] + }, + { + "id": "49", + "parents": [ + "47" + ] + }, + { + "id": "50", + "parents": [ + "39", + "41", + "43" + ] + }, + { + "id": "51", + "parents": [ + "50", + "46" + ] + }, + { + "id": "52", + "parents": [ + "48", + "51", + "49" + ] + }, + { + "id": "53", + "parents": [ + "48", + "51", + "49" + ] + }, + { + "id": "54", + "parents": [ + "53", + "52" + ] + }, + { + "id": "55", + "parents": [ + "54" + ] + }, + { + "id": "56", + "parents": [ + "54" + ] + }, + { + "id": "57", + "parents": [ + "54" + ] + }, + { + "id": "58", + "parents": [ + "57", + "55", + "56" + ] + }, + { + "id": "59", + "parents": [ + "56", + "57", + "55" + ] + }, + { + "id": "60", + "parents": [ + "58", + "59" + ] + }, + { + "id": "61", + "parents": [ + "59", + "58" + ] + }, + { + "id": "62", + "parents": [ + "61", + "60" + ] + }, + { + "id": "63", + "parents": [ + "62" + ] + }, + { + "id": "64", + "parents": [ + "61", + "60" + ] + }, + { + "id": "65", + "parents": [ + "64", + "63" + ] + }, + { + "id": "66", + "parents": [ + "65" + ] + }, + { + "id": "67", + "parents": [ + "63", + "64" + ] + }, + { + "id": "68", + "parents": [ + "63", + "64" + ] + }, + { + "id": "69", + "parents": [ + "66", + "68", + "67" + ] + }, + { + "id": "70", + "parents": [ + "69" + ] + }, + { + "id": "71", + "parents": [ + "69" + ] + }, + { + "id": "72", + "parents": [ + "71", + "70" + ] + }, + { + "id": "73", + "parents": [ + "69" + ] + }, + { + "id": "74", + "parents": [ + "72", + "73" + ] + }, + { + "id": "75", + "parents": [ + "72", + "73" + ] + }, + { + "id": "76", + "parents": [ + "73", + "72" + ] + }, + { + "id": "77", + "parents": [ + "75" + ] + }, + { + "id": "78", + "parents": [ + "77", + "74", + "76" + ] + }, + { + "id": "79", + "parents": [ + "74", + "75", + "76" + ] + }, + { + "id": "80", + "parents": [ + "74", + "76", + "77" + ] + }, + { + "id": "81", + "parents": [ + "79", + "80" + ] + }, + { + "id": "82", + "parents": [ + "78", + "81" + ] + }, + { + "id": "83", + "parents": [ + "79", + "80", + "78" + ] + }, + { + "id": "84", + "parents": [ + "78", + "79", + "80" + ] + }, + { + "id": "85", + "parents": [ + "84" + ] + }, + { + "id": "86", + "parents": [ + "82", + "84" + ] + }, + { + "id": "87", + "parents": [ + "82" + ] + }, + { + "id": "88", + "parents": [ + "85" + ] + }, + { + "id": "89", + "parents": [ + "86", + "87", + "88", + "83" + ] + }, + { + "id": "90", + "parents": [ + "86", + "87", + "88", + "83" + ] + }, + { + "id": "91", + "parents": [ + "89" + ] + }, + { + "id": "92", + "parents": [ + "91", + "90" + ] + }, + { + "id": "93", + "parents": [ + "89" + ] + }, + { + "id": "94", + "parents": [ + "91", + "90" + ] + }, + { + "id": "95", + "parents": [ + "93" + ] + }, + { + "id": "96", + "parents": [ + "91", + "90", + "93" + ] + }, + { + "id": "97", + "parents": [ + "93", + "91", + "90" + ] + }, + { + "id": "98", + "parents": [ + "94" + ] + }, + { + "id": "99", + "parents": [ + "97", + "96", + "95", + "98" + ] + }, + { + "id": "100", + "parents": [ + "99" + ] + }, + { + "id": "101", + "parents": [ + "99", + "92" + ] + }, + { + "id": "102", + "parents": [ + "96", + "95", + "98", + "92", + "97" + ] + }, + { + "id": "103", + "parents": [ + "100", + "92" + ] + }, + { + "id": "104", + "parents": [ + "103", + "102", + "101" + ] + }, + { + "id": "105", + "parents": [ + "101", + "103", + "102" + ] + }, + { + "id": "106", + "parents": [ + "105", + "104" + ] + }, + { + "id": "107", + "parents": [ + "104", + "105" + ] + }, + { + "id": "108", + "parents": [ + "104", + "105" + ] + }, + { + "id": "109", + "parents": [ + "104", + "105" + ] + }, + { + "id": "110", + "parents": [ + "109" + ] + }, + { + "id": "111", + "parents": [ + "105", + "104" + ] + }, + { + "id": "112", + "parents": [ + "108", + "111", + "107" + ] + }, + { + "id": "113", + "parents": [ + "112", + "110", + "106" + ] + }, + { + "id": "114", + "parents": [ + "113" + ] + }, + { + "id": "115", + "parents": [ + "113" + ] + }, + { + "id": "116", + "parents": [ + "110", + "106", + "112" + ] + }, + { + "id": "117", + "parents": [ + "116", + "114", + "115" + ] + }, + { + "id": "118", + "parents": [ + "115", + "114", + "116" + ] + }, + { + "id": "119", + "parents": [ + "117", + "118" + ] + }, + { + "id": "120", + "parents": [ + "119" + ] + }, + { + "id": "121", + "parents": [ + "120" + ] + }, + { + "id": "122", + "parents": [ + "114", + "116", + "115" + ] + }, + { + "id": "123", + "parents": [ + "122", + "121" + ] + }, + { + "id": "124", + "parents": [ + "122", + "119" + ] + }, + { + "id": "125", + "parents": [ + "122", + "119" + ] + }, + { + "id": "126", + "parents": [ + "125" + ] + }, + { + "id": "127", + "parents": [ + "123", + "124", + "125" + ] + }, + { + "id": "128", + "parents": [ + "126", + "127" + ] + }, + { + "id": "129", + "parents": [ + "128" + ] + }, + { + "id": "130", + "parents": [ + "126", + "127" + ] + }, + { + "id": "131", + "parents": [ + "130", + "129" + ] + }, + { + "id": "132", + "parents": [ + "129", + "130" + ] + }, + { + "id": "133", + "parents": [ + "130", + "129" + ] + }, + { + "id": "134", + "parents": [ + "132", + "133" + ] + }, + { + "id": "135", + "parents": [ + "132", + "131", + "133" + ] + }, + { + "id": "136", + "parents": [ + "131", + "134" + ] + }, + { + "id": "137", + "parents": [ + "134", + "135" + ] + }, + { + "id": "138", + "parents": [ + "136", + "137" + ] + }, + { + "id": "139", + "parents": [ + "136", + "137" + ] + }, + { + "id": "140", + "parents": [ + "136", + "135" + ] + }, + { + "id": "141", + "parents": [ + "138", + "139", + "140" + ] + }, + { + "id": "142", + "parents": [ + "141" + ] + }, + { + "id": "143", + "parents": [ + "141" + ] + }, + { + "id": "144", + "parents": [ + "141" + ] + }, + { + "id": "145", + "parents": [ + "142", + "144" + ] + }, + { + "id": "146", + "parents": [ + "145", + "143" + ] + }, + { + "id": "147", + "parents": [ + "146" + ] + }, + { + "id": "148", + "parents": [ + "146" + ] + }, + { + "id": "149", + "parents": [ + "147" + ] + }, + { + "id": "150", + "parents": [ + "148", + "149" + ] + }, + { + "id": "151", + "parents": [ + "149" + ] + }, + { + "id": "152", + "parents": [ + "147", + "148" + ] + }, + { + "id": "153", + "parents": [ + "148" + ] + }, + { + "id": "154", + "parents": [ + "148", + "149" + ] + }, + { + "id": "155", + "parents": [ + "147" + ] + }, + { + "id": "156", + "parents": [ + "155", + "153", + "150", + "151" + ] + }, + { + "id": "157", + "parents": [ + "150", + "153", + "151" + ] + }, + { + "id": "158", + "parents": [ + "156", + "157", + "154", + "152" + ] + }, + { + "id": "159", + "parents": [ + "158" + ] + }, + { + "id": "160", + "parents": [ + "158" + ] + }, + { + "id": "161", + "parents": [ + "160" + ] + }, + { + "id": "162", + "parents": [ + "160" + ] + }, + { + "id": "163", + "parents": [ + "161", + "159", + "162" + ] + }, + { + "id": "164", + "parents": [ + "163" + ] + }, + { + "id": "165", + "parents": [ + "161", + "162", + "159" + ] + }, + { + "id": "166", + "parents": [ + "162", + "159", + "161" + ] + }, + { + "id": "167", + "parents": [ + "166" + ] + }, + { + "id": "168", + "parents": [ + "164", + "167", + "165" + ] + }, + { + "id": "169", + "parents": [ + "168" + ] + }, + { + "id": "170", + "parents": [ + "168" + ] + }, + { + "id": "171", + "parents": [ + "169" + ] + }, + { + "id": "172", + "parents": [ + "171", + "170" + ] + }, + { + "id": "173", + "parents": [ + "172" + ] + }, + { + "id": "174", + "parents": [ + "172" + ] + }, + { + "id": "175", + "parents": [ + "172" + ] + }, + { + "id": "176", + "parents": [ + "173", + "174" + ] + }, + { + "id": "177", + "parents": [ + "175", + "176" + ] + }, + { + "id": "178", + "parents": [ + "174", + "173" + ] + }, + { + "id": "179", + "parents": [ + "178", + "177" + ] + }, + { + "id": "180", + "parents": [ + "179" + ] + }, + { + "id": "181", + "parents": [ + "180" + ] + }, + { + "id": "182", + "parents": [ + "179" + ] + }, + { + "id": "183", + "parents": [ + "181", + "182" + ] + }, + { + "id": "184", + "parents": [ + "182", + "181" + ] + }, + { + "id": "185", + "parents": [ + "184" + ] + }, + { + "id": "186", + "parents": [ + "183", + "185" + ] + }, + { + "id": "187", + "parents": [ + "181", + "182" + ] + }, + { + "id": "188", + "parents": [ + "187", + "186" + ] + }, + { + "id": "189", + "parents": [ + "187", + "186" + ] + }, + { + "id": "190", + "parents": [ + "189" + ] + }, + { + "id": "191", + "parents": [ + "190", + "188" + ] + }, + { + "id": "192", + "parents": [ + "191" + ] + }, + { + "id": "193", + "parents": [ + "192" + ] + }, + { + "id": "194", + "parents": [ + "192" + ] + }, + { + "id": "195", + "parents": [ + "193", + "194" + ] + }, + { + "id": "196", + "parents": [ + "195" + ] + }, + { + "id": "197", + "parents": [ + "196" + ] + }, + { + "id": "198", + "parents": [ + "195" + ] + }, + { + "id": "199", + "parents": [ + "198" + ] + }, + { + "id": "200", + "parents": [ + "197", + "198" + ] + }, + { + "id": "201", + "parents": [ + "199", + "197" + ] + }, + { + "id": "202", + "parents": [ + "201", + "200" + ] + }, + { + "id": "203", + "parents": [ + "200", + "201" + ] + }, + { + "id": "204", + "parents": [ + "203" + ] + }, + { + "id": "205", + "parents": [ + "204" + ] + }, + { + "id": "206", + "parents": [ + "201", + "200" + ] + }, + { + "id": "207", + "parents": [ + "201", + "200" + ] + }, + { + "id": "208", + "parents": [ + "202", + "205", + "207", + "206" + ] + }, + { + "id": "209", + "parents": [ + "205", + "202", + "206", + "207" + ] + }, + { + "id": "210", + "parents": [ + "208", + "209" + ] + }, + { + "id": "211", + "parents": [ + "209", + "208" + ] + }, + { + "id": "212", + "parents": [ + "208", + "209" + ] + }, + { + "id": "213", + "parents": [ + "211", + "210" + ] + }, + { + "id": "214", + "parents": [ + "212", + "210" + ] + }, + { + "id": "215", + "parents": [ + "214", + "211" + ] + }, + { + "id": "216", + "parents": [ + "213", + "212" + ] + }, + { + "id": "217", + "parents": [ + "212" + ] + }, + { + "id": "218", + "parents": [ + "214", + "211", + "217" + ] + }, + { + "id": "219", + "parents": [ + "215", + "213", + "218" + ] + }, + { + "id": "220", + "parents": [ + "216", + "219" + ] + }, + { + "id": "221", + "parents": [ + "220" + ] + }, + { + "id": "222", + "parents": [ + "216", + "219" + ] + }, + { + "id": "223", + "parents": [ + "220", + "222" + ] + }, + { + "id": "224", + "parents": [ + "223", + "221" + ] + }, + { + "id": "225", + "parents": [ + "222", + "220" + ] + }, + { + "id": "226", + "parents": [ + "225", + "221", + "223" + ] + }, + { + "id": "227", + "parents": [ + "221", + "225", + "223" + ] + }, + { + "id": "228", + "parents": [ + "227", + "226" + ] + }, + { + "id": "229", + "parents": [ + "228", + "224" + ] + }, + { + "id": "230", + "parents": [ + "224", + "227", + "226" + ] + }, + { + "id": "231", + "parents": [ + "224", + "227", + "226" + ] + }, + { + "id": "232", + "parents": [ + "230", + "231", + "229" + ] + }, + { + "id": "233", + "parents": [ + "228", + "224" + ] + }, + { + "id": "234", + "parents": [ + "226", + "224" + ] + }, + { + "id": "235", + "parents": [ + "233", + "232" + ] + }, + { + "id": "236", + "parents": [ + "230", + "233", + "234", + "231" + ] + }, + { + "id": "237", + "parents": [ + "236", + "235" + ] + }, + { + "id": "238", + "parents": [ + "235", + "236" + ] + }, + { + "id": "239", + "parents": [ + "236", + "235" + ] + }, + { + "id": "240", + "parents": [ + "237" + ] + }, + { + "id": "241", + "parents": [ + "240" + ] + }, + { + "id": "242", + "parents": [ + "239", + "241" + ] + }, + { + "id": "243", + "parents": [ + "241", + "238" + ] + }, + { + "id": "244", + "parents": [ + "242", + "238" + ] + }, + { + "id": "245", + "parents": [ + "238", + "242" + ] + }, + { + "id": "246", + "parents": [ + "245", + "243", + "244" + ] + }, + { + "id": "247", + "parents": [ + "245", + "243", + "244" + ] + }, + { + "id": "248", + "parents": [ + "247", + "246" + ] + }, + { + "id": "249", + "parents": [ + "248" + ] + }, + { + "id": "250", + "parents": [ + "247" + ] + }, + { + "id": "251", + "parents": [ + "246", + "250" + ] + }, + { + "id": "252", + "parents": [ + "251" + ] + }, + { + "id": "253", + "parents": [ + "252" + ] + }, + { + "id": "254", + "parents": [ + "249", + "253" + ] + }, + { + "id": "255", + "parents": [ + "253", + "249" + ] + }, + { + "id": "256", + "parents": [ + "255", + "254" + ] + }, + { + "id": "257", + "parents": [ + "256" + ] + }, + { + "id": "258", + "parents": [ + "257" + ] + }, + { + "id": "259", + "parents": [ + "257" + ] + }, + { + "id": "260", + "parents": [ + "259" + ] + }, + { + "id": "261", + "parents": [ + "258", + "259" + ] + }, + { + "id": "262", + "parents": [ + "258", + "259" + ] + }, + { + "id": "263", + "parents": [ + "261", + "262", + "260" + ] + }, + { + "id": "264", + "parents": [ + "263" + ] + }, + { + "id": "265", + "parents": [ + "261", + "260", + "262" + ] + }, + { + "id": "266", + "parents": [ + "264" + ] + }, + { + "id": "267", + "parents": [ + "264" + ] + }, + { + "id": "268", + "parents": [ + "265", + "267", + "266" + ] + }, + { + "id": "269", + "parents": [ + "267", + "265", + "266" + ] + }, + { + "id": "270", + "parents": [ + "268" + ] + }, + { + "id": "271", + "parents": [ + "269", + "268" + ] + }, + { + "id": "272", + "parents": [ + "268", + "269" + ] + }, + { + "id": "273", + "parents": [ + "272" + ] + }, + { + "id": "274", + "parents": [ + "269", + "268" + ] + }, + { + "id": "275", + "parents": [ + "274", + "273", + "270", + "271" + ] + }, + { + "id": "276", + "parents": [ + "274", + "270", + "273", + "271" + ] + }, + { + "id": "277", + "parents": [ + "275", + "276" + ] + }, + { + "id": "278", + "parents": [ + "276" + ] + }, + { + "id": "279", + "parents": [ + "275", + "278" + ] + }, + { + "id": "280", + "parents": [ + "279" + ] + }, + { + "id": "281", + "parents": [ + "277", + "278" + ] + }, + { + "id": "282", + "parents": [ + "280", + "281" + ] + }, + { + "id": "283", + "parents": [ + "281", + "280" + ] + }, + { + "id": "284", + "parents": [ + "283" + ] + }, + { + "id": "285", + "parents": [ + "277", + "280" + ] + }, + { + "id": "286", + "parents": [ + "282", + "285", + "284" + ] + }, + { + "id": "287", + "parents": [ + "282", + "285", + "284" + ] + }, + { + "id": "288", + "parents": [ + "286" + ] + }, + { + "id": "289", + "parents": [ + "287", + "288" + ] + }, + { + "id": "290", + "parents": [ + "282", + "285", + "284" + ] + }, + { + "id": "291", + "parents": [ + "287", + "288" + ] + }, + { + "id": "292", + "parents": [ + "285", + "284", + "282" + ] + }, + { + "id": "293", + "parents": [ + "288", + "290" + ] + }, + { + "id": "294", + "parents": [ + "293", + "291", + "292" + ] + }, + { + "id": "295", + "parents": [ + "294", + "289" + ] + }, + { + "id": "296", + "parents": [ + "294", + "289" + ] + }, + { + "id": "297", + "parents": [ + "295", + "296" + ] + }, + { + "id": "298", + "parents": [ + "294", + "289" + ] + }, + { + "id": "299", + "parents": [ + "298", + "295", + "296" + ] + }, + { + "id": "300", + "parents": [ + "298", + "295", + "296" + ] + }, + { + "id": "301", + "parents": [ + "300", + "297", + "299" + ] + }, + { + "id": "302", + "parents": [ + "299" + ] + }, + { + "id": "303", + "parents": [ + "297", + "302", + "300" + ] + }, + { + "id": "304", + "parents": [ + "301", + "302" + ] + }, + { + "id": "305", + "parents": [ + "301", + "303" + ] + }, + { + "id": "306", + "parents": [ + "303", + "304" + ] + }, + { + "id": "307", + "parents": [ + "304", + "305" + ] + }, + { + "id": "308", + "parents": [ + "306", + "307" + ] + }, + { + "id": "309", + "parents": [ + "306", + "305" + ] + }, + { + "id": "310", + "parents": [ + "307", + "309" + ] + }, + { + "id": "311", + "parents": [ + "310" + ] + }, + { + "id": "312", + "parents": [ + "310" + ] + }, + { + "id": "313", + "parents": [ + "312", + "308" + ] + }, + { + "id": "314", + "parents": [ + "311", + "313" + ] + }, + { + "id": "315", + "parents": [ + "311", + "313" + ] + }, + { + "id": "316", + "parents": [ + "315", + "314" + ] + }, + { + "id": "317", + "parents": [ + "314", + "315" + ] + }, + { + "id": "318", + "parents": [ + "316" + ] + }, + { + "id": "319", + "parents": [ + "318", + "317" + ] + }, + { + "id": "320", + "parents": [ + "317", + "318" + ] + }, + { + "id": "321", + "parents": [ + "317", + "318" + ] + }, + { + "id": "322", + "parents": [ + "317", + "318" + ] + }, + { + "id": "323", + "parents": [ + "319" + ] + }, + { + "id": "324", + "parents": [ + "323", + "321", + "320" + ] + }, + { + "id": "325", + "parents": [ + "324", + "322" + ] + }, + { + "id": "326", + "parents": [ + "325" + ] + }, + { + "id": "327", + "parents": [ + "321", + "322", + "319", + "320" + ] + }, + { + "id": "328", + "parents": [ + "327" + ] + }, + { + "id": "329", + "parents": [ + "322", + "324" + ] + }, + { + "id": "330", + "parents": [ + "323", + "322", + "320", + "321" + ] + }, + { + "id": "331", + "parents": [ + "330", + "327", + "329" + ] + }, + { + "id": "332", + "parents": [ + "331", + "328", + "325" + ] + }, + { + "id": "333", + "parents": [ + "329", + "328", + "326", + "330" + ] + }, + { + "id": "334", + "parents": [ + "328" + ] + }, + { + "id": "335", + "parents": [ + "329", + "326", + "330" + ] + }, + { + "id": "336", + "parents": [ + "334", + "333", + "335", + "332" + ] + }, + { + "id": "337", + "parents": [ + "335", + "333", + "332", + "334" + ] + }, + { + "id": "338", + "parents": [ + "336", + "337" + ] + }, + { + "id": "339", + "parents": [ + "337", + "336" + ] + }, + { + "id": "340", + "parents": [ + "338", + "339" + ] + }, + { + "id": "341", + "parents": [ + "337", + "336" + ] + }, + { + "id": "342", + "parents": [ + "339" + ] + }, + { + "id": "343", + "parents": [ + "337", + "336" + ] + }, + { + "id": "344", + "parents": [ + "343", + "340", + "342", + "341" + ] + }, + { + "id": "345", + "parents": [ + "341", + "340", + "342" + ] + }, + { + "id": "346", + "parents": [ + "344", + "345" + ] + }, + { + "id": "347", + "parents": [ + "345", + "344" + ] + }, + { + "id": "348", + "parents": [ + "347" + ] + }, + { + "id": "349", + "parents": [ + "346", + "348" + ] + }, + { + "id": "350", + "parents": [ + "349" + ] + }, + { + "id": "351", + "parents": [ + "350" + ] + }, + { + "id": "352", + "parents": [ + "348", + "346" + ] + }, + { + "id": "353", + "parents": [ + "349" + ] + }, + { + "id": "354", + "parents": [ + "353", + "352", + "351" + ] + }, + { + "id": "355", + "parents": [ + "351", + "352", + "353" + ] + }, + { + "id": "356", + "parents": [ + "355" + ] + }, + { + "id": "357", + "parents": [ + "354" + ] + }, + { + "id": "358", + "parents": [ + "356" + ] + }, + { + "id": "359", + "parents": [ + "357", + "356" + ] + }, + { + "id": "360", + "parents": [ + "358", + "357" + ] + }, + { + "id": "361", + "parents": [ + "359", + "360" + ] + }, + { + "id": "362", + "parents": [ + "361" + ] + }, + { + "id": "363", + "parents": [ + "360" + ] + }, + { + "id": "364", + "parents": [ + "360", + "359" + ] + }, + { + "id": "365", + "parents": [ + "360", + "359" + ] + }, + { + "id": "366", + "parents": [ + "365" + ] + }, + { + "id": "367", + "parents": [ + "365" + ] + }, + { + "id": "368", + "parents": [ + "362", + "363", + "364", + "365" + ] + }, + { + "id": "369", + "parents": [ + "368", + "366", + "367" + ] + }, + { + "id": "370", + "parents": [ + "366", + "362", + "367", + "363" + ] + }, + { + "id": "371", + "parents": [ + "368", + "370" + ] + }, + { + "id": "372", + "parents": [ + "369", + "371" + ] + }, + { + "id": "373", + "parents": [ + "372" + ] + }, + { + "id": "374", + "parents": [ + "371", + "369" + ] + }, + { + "id": "375", + "parents": [ + "374", + "373" + ] + }, + { + "id": "376", + "parents": [ + "374", + "373" + ] + }, + { + "id": "377", + "parents": [ + "373" + ] + }, + { + "id": "378", + "parents": [ + "377", + "376" + ] + }, + { + "id": "379", + "parents": [ + "373", + "374" + ] + }, + { + "id": "380", + "parents": [ + "376" + ] + }, + { + "id": "381", + "parents": [ + "378", + "379", + "380", + "375" + ] + }, + { + "id": "382", + "parents": [ + "375", + "380", + "377" + ] + }, + { + "id": "383", + "parents": [ + "381", + "382" + ] + }, + { + "id": "384", + "parents": [ + "382", + "381" + ] + }, + { + "id": "385", + "parents": [ + "382", + "381" + ] + }, + { + "id": "386", + "parents": [ + "385", + "383", + "384" + ] + }, + { + "id": "387", + "parents": [ + "385", + "384" + ] + }, + { + "id": "388", + "parents": [ + "385", + "384" + ] + }, + { + "id": "389", + "parents": [ + "388" + ] + }, + { + "id": "390", + "parents": [ + "387", + "389", + "386" + ] + }, + { + "id": "391", + "parents": [ + "387", + "386", + "389" + ] + }, + { + "id": "392", + "parents": [ + "390", + "391" + ] + }, + { + "id": "393", + "parents": [ + "390" + ] + }, + { + "id": "394", + "parents": [ + "390" + ] + }, + { + "id": "395", + "parents": [ + "394" + ] + }, + { + "id": "396", + "parents": [ + "395", + "393", + "392" + ] + }, + { + "id": "397", + "parents": [ + "393" + ] + }, + { + "id": "398", + "parents": [ + "392", + "397", + "395" + ] + }, + { + "id": "399", + "parents": [ + "398", + "396" + ] + }, + { + "id": "400", + "parents": [ + "398", + "396" + ] + }, + { + "id": "401", + "parents": [ + "399", + "400" + ] + }, + { + "id": "402", + "parents": [ + "401" + ] + }, + { + "id": "403", + "parents": [ + "402" + ] + }, + { + "id": "404", + "parents": [ + "402" + ] + }, + { + "id": "405", + "parents": [ + "403" + ] + }, + { + "id": "406", + "parents": [ + "405" + ] + }, + { + "id": "407", + "parents": [ + "403", + "404" + ] + }, + { + "id": "408", + "parents": [ + "406" + ] + }, + { + "id": "409", + "parents": [ + "403", + "404" + ] + }, + { + "id": "410", + "parents": [ + "407", + "408" + ] + }, + { + "id": "411", + "parents": [ + "407", + "406" + ] + }, + { + "id": "412", + "parents": [ + "409", + "411", + "410" + ] + }, + { + "id": "413", + "parents": [ + "409", + "411", + "410" + ] + }, + { + "id": "414", + "parents": [ + "412" + ] + }, + { + "id": "415", + "parents": [ + "409", + "408", + "411" + ] + }, + { + "id": "416", + "parents": [ + "412", + "413", + "415" + ] + }, + { + "id": "417", + "parents": [ + "412", + "413", + "415" + ] + }, + { + "id": "418", + "parents": [ + "414", + "417", + "416" + ] + }, + { + "id": "419", + "parents": [ + "412", + "413", + "415" + ] + }, + { + "id": "420", + "parents": [ + "418", + "419" + ] + }, + { + "id": "421", + "parents": [ + "417", + "416", + "414", + "419" + ] + }, + { + "id": "422", + "parents": [ + "413", + "412", + "415" + ] + }, + { + "id": "423", + "parents": [ + "416", + "422", + "417", + "414", + "419" + ] + }, + { + "id": "424", + "parents": [ + "421" + ] + }, + { + "id": "425", + "parents": [ + "419", + "416", + "414", + "422", + "417" + ] + }, + { + "id": "426", + "parents": [ + "418", + "423" + ] + }, + { + "id": "427", + "parents": [ + "425", + "423", + "420", + "424" + ] + }, + { + "id": "428", + "parents": [ + "420", + "426", + "425", + "424" + ] + }, + { + "id": "429", + "parents": [ + "428" + ] + }, + { + "id": "430", + "parents": [ + "427", + "428" + ] + }, + { + "id": "431", + "parents": [ + "426", + "427" + ] + }, + { + "id": "432", + "parents": [ + "431", + "429", + "430" + ] + }, + { + "id": "433", + "parents": [ + "430", + "429" + ] + }, + { + "id": "434", + "parents": [ + "429", + "430" + ] + }, + { + "id": "435", + "parents": [ + "431", + "434" + ] + }, + { + "id": "436", + "parents": [ + "429", + "430" + ] + }, + { + "id": "437", + "parents": [ + "432", + "436", + "435", + "433" + ] + }, + { + "id": "438", + "parents": [ + "433", + "432", + "435", + "436" + ] + }, + { + "id": "439", + "parents": [ + "435", + "433", + "436", + "432" + ] + }, + { + "id": "440", + "parents": [ + "437", + "438", + "439" + ] + }, + { + "id": "441", + "parents": [ + "437", + "438" + ] + }, + { + "id": "442", + "parents": [ + "432", + "433", + "436", + "435" + ] + }, + { + "id": "443", + "parents": [ + "441", + "442", + "440" + ] + }, + { + "id": "444", + "parents": [ + "439", + "441", + "442" + ] + }, + { + "id": "445", + "parents": [ + "444", + "443" + ] + }, + { + "id": "446", + "parents": [ + "443", + "444" + ] + }, + { + "id": "447", + "parents": [ + "445" + ] + }, + { + "id": "448", + "parents": [ + "444", + "443" + ] + }, + { + "id": "449", + "parents": [ + "447", + "446" + ] + }, + { + "id": "450", + "parents": [ + "445" + ] + }, + { + "id": "451", + "parents": [ + "449" + ] + }, + { + "id": "452", + "parents": [ + "451", + "450", + "448" + ] + }, + { + "id": "453", + "parents": [ + "448", + "450", + "451" + ] + }, + { + "id": "454", + "parents": [ + "450", + "448", + "451" + ] + }, + { + "id": "455", + "parents": [ + "451", + "448", + "450" + ] + }, + { + "id": "456", + "parents": [ + "455", + "452", + "453", + "454" + ] + }, + { + "id": "457", + "parents": [ + "456" + ] + }, + { + "id": "458", + "parents": [ + "457" + ] + }, + { + "id": "459", + "parents": [ + "456" + ] + }, + { + "id": "460", + "parents": [ + "458", + "459" + ] + }, + { + "id": "461", + "parents": [ + "460" + ] + }, + { + "id": "462", + "parents": [ + "460" + ] + }, + { + "id": "463", + "parents": [ + "461", + "462" + ] + }, + { + "id": "464", + "parents": [ + "462", + "461" + ] + }, + { + "id": "465", + "parents": [ + "463", + "464" + ] + }, + { + "id": "466", + "parents": [ + "463" + ] + }, + { + "id": "467", + "parents": [ + "466", + "465" + ] + }, + { + "id": "468", + "parents": [ + "461" + ] + }, + { + "id": "469", + "parents": [ + "468" + ] + }, + { + "id": "470", + "parents": [ + "464", + "463", + "469" + ] + }, + { + "id": "471", + "parents": [ + "466", + "464" + ] + }, + { + "id": "472", + "parents": [ + "467", + "469", + "471" + ] + }, + { + "id": "473", + "parents": [ + "465", + "470", + "471" + ] + }, + { + "id": "474", + "parents": [ + "471", + "467", + "470" + ] + }, + { + "id": "475", + "parents": [ + "465", + "470", + "471" + ] + }, + { + "id": "476", + "parents": [ + "475", + "473", + "474", + "472" + ] + }, + { + "id": "477", + "parents": [ + "472", + "475", + "473", + "474" + ] + }, + { + "id": "478", + "parents": [ + "476" + ] + }, + { + "id": "479", + "parents": [ + "478" + ] + }, + { + "id": "480", + "parents": [ + "479", + "477" + ] + }, + { + "id": "481", + "parents": [ + "477", + "478" + ] + }, + { + "id": "482", + "parents": [ + "481" + ] + }, + { + "id": "483", + "parents": [ + "482", + "479" + ] + }, + { + "id": "484", + "parents": [ + "481", + "479" + ] + }, + { + "id": "485", + "parents": [ + "484", + "483", + "480" + ] + }, + { + "id": "486", + "parents": [ + "482", + "480" + ] + }, + { + "id": "487", + "parents": [ + "480", + "483" + ] + }, + { + "id": "488", + "parents": [ + "486" + ] + }, + { + "id": "489", + "parents": [ + "487", + "484" + ] + }, + { + "id": "490", + "parents": [ + "483", + "480" + ] + }, + { + "id": "491", + "parents": [ + "488", + "487" + ] + }, + { + "id": "492", + "parents": [ + "488", + "485", + "490", + "489" + ] + }, + { + "id": "493", + "parents": [ + "491", + "490", + "485", + "489" + ] + }, + { + "id": "494", + "parents": [ + "492", + "493" + ] + }, + { + "id": "495", + "parents": [ + "493", + "492" + ] + }, + { + "id": "496", + "parents": [ + "495", + "494" + ] + }, + { + "id": "497", + "parents": [ + "496" + ] + }, + { + "id": "498", + "parents": [ + "496" + ] + }, + { + "id": "499", + "parents": [ + "496" + ] + }, + { + "id": "500", + "parents": [ + "496" + ] + }, + { + "id": "501", + "parents": [ + "496" + ] + }, + { + "id": "502", + "parents": [ + "499", + "501", + "500", + "497", + "498" + ] + }, + { + "id": "503", + "parents": [ + "497", + "499", + "500", + "498", + "501" + ] + }, + { + "id": "504", + "parents": [ + "503", + "502" + ] + }, + { + "id": "505", + "parents": [ + "503" + ] + }, + { + "id": "506", + "parents": [ + "503", + "502" + ] + }, + { + "id": "507", + "parents": [ + "506", + "505" + ] + }, + { + "id": "508", + "parents": [ + "505", + "502" + ] + }, + { + "id": "509", + "parents": [ + "506" + ] + }, + { + "id": "510", + "parents": [ + "504", + "507", + "509" + ] + }, + { + "id": "511", + "parents": [ + "510", + "508" + ] + }, + { + "id": "512", + "parents": [ + "504", + "508", + "507" + ] + }, + { + "id": "513", + "parents": [ + "512", + "511" + ] + }, + { + "id": "514", + "parents": [ + "508", + "510" + ] + }, + { + "id": "515", + "parents": [ + "513", + "514" + ] + }, + { + "id": "516", + "parents": [ + "513" + ] + }, + { + "id": "517", + "parents": [ + "515" + ] + }, + { + "id": "518", + "parents": [ + "516", + "514" + ] + }, + { + "id": "519", + "parents": [ + "518", + "517" + ] + }, + { + "id": "520", + "parents": [ + "519" + ] + }, + { + "id": "521", + "parents": [ + "520" + ] + }, + { + "id": "522", + "parents": [ + "521" + ] + }, + { + "id": "523", + "parents": [ + "519" + ] + }, + { + "id": "524", + "parents": [ + "523" + ] + }, + { + "id": "525", + "parents": [ + "524" + ] + }, + { + "id": "526", + "parents": [ + "525", + "522" + ] + }, + { + "id": "527", + "parents": [ + "526" + ] + }, + { + "id": "528", + "parents": [ + "527" + ] + }, + { + "id": "529", + "parents": [ + "527" + ] + }, + { + "id": "530", + "parents": [ + "528" + ] + }, + { + "id": "531", + "parents": [ + "526" + ] + }, + { + "id": "532", + "parents": [ + "527", + "531" + ] + }, + { + "id": "533", + "parents": [ + "530", + "532", + "529" + ] + }, + { + "id": "534", + "parents": [ + "529", + "528", + "531" + ] + }, + { + "id": "535", + "parents": [ + "534", + "533" + ] + }, + { + "id": "536", + "parents": [ + "534", + "533" + ] + }, + { + "id": "537", + "parents": [ + "534", + "533" + ] + }, + { + "id": "538", + "parents": [ + "537", + "535", + "536" + ] + }, + { + "id": "539", + "parents": [ + "536", + "537", + "535" + ] + }, + { + "id": "540", + "parents": [ + "539", + "538" + ] + }, + { + "id": "541", + "parents": [ + "539", + "538" + ] + }, + { + "id": "542", + "parents": [ + "541" + ] + }, + { + "id": "543", + "parents": [ + "542", + "540" + ] + }, + { + "id": "544", + "parents": [ + "540" + ] + }, + { + "id": "545", + "parents": [ + "542", + "540" + ] + }, + { + "id": "546", + "parents": [ + "542" + ] + }, + { + "id": "547", + "parents": [ + "546", + "544", + "543" + ] + }, + { + "id": "548", + "parents": [ + "546", + "543", + "544", + "545" + ] + }, + { + "id": "549", + "parents": [ + "548" + ] + }, + { + "id": "550", + "parents": [ + "545", + "544", + "546", + "543" + ] + }, + { + "id": "551", + "parents": [ + "547", + "550", + "549" + ] + }, + { + "id": "552", + "parents": [ + "550", + "547", + "549" + ] + }, + { + "id": "553", + "parents": [ + "549", + "547", + "550" + ] + }, + { + "id": "554", + "parents": [ + "549", + "550", + "547" + ] + }, + { + "id": "555", + "parents": [ + "552", + "551", + "554", + "553" + ] + }, + { + "id": "556", + "parents": [ + "555" + ] + }, + { + "id": "557", + "parents": [ + "556" + ] + }, + { + "id": "558", + "parents": [ + "555" + ] + }, + { + "id": "559", + "parents": [ + "556", + "558" + ] + }, + { + "id": "560", + "parents": [ + "559" + ] + }, + { + "id": "561", + "parents": [ + "560", + "557" + ] + }, + { + "id": "562", + "parents": [ + "561" + ] + }, + { + "id": "563", + "parents": [ + "561" + ] + }, + { + "id": "564", + "parents": [ + "561" + ] + }, + { + "id": "565", + "parents": [ + "561" + ] + }, + { + "id": "566", + "parents": [ + "563", + "562" + ] + }, + { + "id": "567", + "parents": [ + "565", + "566", + "564" + ] + }, + { + "id": "568", + "parents": [ + "565", + "563", + "562", + "564" + ] + }, + { + "id": "569", + "parents": [ + "565" + ] + }, + { + "id": "570", + "parents": [ + "568", + "566" + ] + }, + { + "id": "571", + "parents": [ + "564", + "565", + "566" + ] + }, + { + "id": "572", + "parents": [ + "570", + "569", + "571", + "567" + ] + }, + { + "id": "573", + "parents": [ + "567", + "569", + "568" + ] + }, + { + "id": "574", + "parents": [ + "568", + "569", + "566" + ] + }, + { + "id": "575", + "parents": [ + "574", + "573", + "572" + ] + }, + { + "id": "576", + "parents": [ + "572", + "574", + "573" + ] + }, + { + "id": "577", + "parents": [ + "576" + ] + }, + { + "id": "578", + "parents": [ + "575", + "577" + ] + }, + { + "id": "579", + "parents": [ + "578" + ] + }, + { + "id": "580", + "parents": [ + "578" + ] + }, + { + "id": "581", + "parents": [ + "580", + "579" + ] + }, + { + "id": "582", + "parents": [ + "580" + ] + }, + { + "id": "583", + "parents": [ + "579", + "582" + ] + }, + { + "id": "584", + "parents": [ + "581", + "582" + ] + }, + { + "id": "585", + "parents": [ + "582", + "581" + ] + }, + { + "id": "586", + "parents": [ + "579", + "582" + ] + }, + { + "id": "587", + "parents": [ + "586", + "585" + ] + }, + { + "id": "588", + "parents": [ + "585", + "586" + ] + }, + { + "id": "589", + "parents": [ + "583", + "584", + "588" + ] + }, + { + "id": "590", + "parents": [ + "589", + "587" + ] + }, + { + "id": "591", + "parents": [ + "589", + "587" + ] + }, + { + "id": "592", + "parents": [ + "590", + "591" + ] + }, + { + "id": "593", + "parents": [ + "592" + ] + }, + { + "id": "594", + "parents": [ + "592" + ] + }, + { + "id": "595", + "parents": [ + "594", + "593" + ] + }, + { + "id": "596", + "parents": [ + "595" + ] + }, + { + "id": "597", + "parents": [ + "595" + ] + }, + { + "id": "598", + "parents": [ + "595" + ] + }, + { + "id": "599", + "parents": [ + "597", + "598" + ] + }, + { + "id": "600", + "parents": [ + "599", + "596" + ] + }, + { + "id": "601", + "parents": [ + "596", + "599" + ] + }, + { + "id": "602", + "parents": [ + "600", + "601" + ] + }, + { + "id": "603", + "parents": [ + "602" + ] + }, + { + "id": "604", + "parents": [ + "602" + ] + }, + { + "id": "605", + "parents": [ + "604" + ] + }, + { + "id": "606", + "parents": [ + "603", + "605" + ] + }, + { + "id": "607", + "parents": [ + "603", + "604" + ] + }, + { + "id": "608", + "parents": [ + "607" + ] + }, + { + "id": "609", + "parents": [ + "604", + "603" + ] + }, + { + "id": "610", + "parents": [ + "607", + "605" + ] + }, + { + "id": "611", + "parents": [ + "606", + "608", + "609", + "610" + ] + }, + { + "id": "612", + "parents": [ + "610", + "609", + "606", + "608" + ] + }, + { + "id": "613", + "parents": [ + "611" + ] + }, + { + "id": "614", + "parents": [ + "612", + "613" + ] + }, + { + "id": "615", + "parents": [ + "614" + ] + }, + { + "id": "616", + "parents": [ + "612", + "613" + ] + }, + { + "id": "617", + "parents": [ + "614" + ] + }, + { + "id": "618", + "parents": [ + "614", + "616" + ] + }, + { + "id": "619", + "parents": [ + "614" + ] + }, + { + "id": "620", + "parents": [ + "612", + "613" + ] + }, + { + "id": "621", + "parents": [ + "616", + "619", + "620", + "617" + ] + }, + { + "id": "622", + "parents": [ + "615", + "618", + "621" + ] + }, + { + "id": "623", + "parents": [ + "621", + "615", + "618" + ] + }, + { + "id": "624", + "parents": [ + "615", + "621", + "618" + ] + }, + { + "id": "625", + "parents": [ + "623", + "624" + ] + }, + { + "id": "626", + "parents": [ + "622", + "625" + ] + }, + { + "id": "627", + "parents": [ + "626" + ] + }, + { + "id": "628", + "parents": [ + "626" + ] + }, + { + "id": "629", + "parents": [ + "628", + "627" + ] + }, + { + "id": "630", + "parents": [ + "623", + "622", + "624" + ] + }, + { + "id": "631", + "parents": [ + "630" + ] + }, + { + "id": "632", + "parents": [ + "625", + "622" + ] + }, + { + "id": "633", + "parents": [ + "629", + "631", + "632" + ] + }, + { + "id": "634", + "parents": [ + "631", + "625" + ] + }, + { + "id": "635", + "parents": [ + "627", + "634", + "632" + ] + }, + { + "id": "636", + "parents": [ + "632", + "634", + "628", + "627" + ] + }, + { + "id": "637", + "parents": [ + "636", + "633", + "635" + ] + }, + { + "id": "638", + "parents": [ + "637" + ] + }, + { + "id": "639", + "parents": [ + "638" + ] + }, + { + "id": "640", + "parents": [ + "637" + ] + }, + { + "id": "641", + "parents": [ + "637" + ] + }, + { + "id": "642", + "parents": [ + "639", + "641", + "640" + ] + }, + { + "id": "643", + "parents": [ + "640", + "641", + "639" + ] + }, + { + "id": "644", + "parents": [ + "642", + "643" + ] + }, + { + "id": "645", + "parents": [ + "642", + "643" + ] + }, + { + "id": "646", + "parents": [ + "643", + "642" + ] + }, + { + "id": "647", + "parents": [ + "644" + ] + }, + { + "id": "648", + "parents": [ + "647", + "645", + "646" + ] + }, + { + "id": "649", + "parents": [ + "648" + ] + }, + { + "id": "650", + "parents": [ + "649" + ] + }, + { + "id": "651", + "parents": [ + "648" + ] + }, + { + "id": "652", + "parents": [ + "646", + "647", + "645" + ] + }, + { + "id": "653", + "parents": [ + "651", + "649", + "652" + ] + }, + { + "id": "654", + "parents": [ + "650", + "653" + ] + }, + { + "id": "655", + "parents": [ + "650", + "653" + ] + }, + { + "id": "656", + "parents": [ + "653", + "650" + ] + }, + { + "id": "657", + "parents": [ + "650", + "653" + ] + }, + { + "id": "658", + "parents": [ + "653", + "650" + ] + }, + { + "id": "659", + "parents": [ + "655", + "658" + ] + }, + { + "id": "660", + "parents": [ + "657", + "656", + "654", + "658", + "655" + ] + }, + { + "id": "661", + "parents": [ + "660" + ] + }, + { + "id": "662", + "parents": [ + "656", + "654", + "657", + "659" + ] + }, + { + "id": "663", + "parents": [ + "662" + ] + }, + { + "id": "664", + "parents": [ + "663", + "661" + ] + }, + { + "id": "665", + "parents": [ + "661", + "662" + ] + }, + { + "id": "666", + "parents": [ + "661", + "663" + ] + }, + { + "id": "667", + "parents": [ + "661", + "659" + ] + }, + { + "id": "668", + "parents": [ + "667", + "663" + ] + }, + { + "id": "669", + "parents": [ + "662", + "667" + ] + }, + { + "id": "670", + "parents": [ + "668", + "666", + "664", + "665" + ] + }, + { + "id": "671", + "parents": [ + "666", + "669", + "664", + "665", + "668" + ] + }, + { + "id": "672", + "parents": [ + "670", + "669" + ] + }, + { + "id": "673", + "parents": [ + "672", + "671" + ] + }, + { + "id": "674", + "parents": [ + "669", + "670" + ] + }, + { + "id": "675", + "parents": [ + "672", + "671" + ] + }, + { + "id": "676", + "parents": [ + "672" + ] + }, + { + "id": "677", + "parents": [ + "676", + "675", + "674", + "673" + ] + }, + { + "id": "678", + "parents": [ + "675", + "676", + "674" + ] + }, + { + "id": "679", + "parents": [ + "673", + "678" + ] + }, + { + "id": "680", + "parents": [ + "679" + ] + }, + { + "id": "681", + "parents": [ + "680", + "677" + ] + }, + { + "id": "682", + "parents": [ + "681" + ] + }, + { + "id": "683", + "parents": [ + "682" + ] + }, + { + "id": "684", + "parents": [ + "681" + ] + }, + { + "id": "685", + "parents": [ + "682" + ] + }, + { + "id": "686", + "parents": [ + "685" + ] + }, + { + "id": "687", + "parents": [ + "685", + "684", + "683" + ] + }, + { + "id": "688", + "parents": [ + "683", + "686" + ] + }, + { + "id": "689", + "parents": [ + "688", + "684" + ] + }, + { + "id": "690", + "parents": [ + "687", + "689" + ] + }, + { + "id": "691", + "parents": [ + "689" + ] + }, + { + "id": "692", + "parents": [ + "691", + "690" + ] + }, + { + "id": "693", + "parents": [ + "687", + "689" + ] + }, + { + "id": "694", + "parents": [ + "690", + "693", + "691" + ] + }, + { + "id": "695", + "parents": [ + "694" + ] + }, + { + "id": "696", + "parents": [ + "692", + "694" + ] + }, + { + "id": "697", + "parents": [ + "693", + "691", + "690" + ] + }, + { + "id": "698", + "parents": [ + "696", + "697", + "695" + ] + }, + { + "id": "699", + "parents": [ + "698" + ] + }, + { + "id": "700", + "parents": [ + "696", + "695", + "697" + ] + }, + { + "id": "701", + "parents": [ + "697", + "696", + "695" + ] + }, + { + "id": "702", + "parents": [ + "701", + "700", + "699" + ] + }, + { + "id": "703", + "parents": [ + "698", + "701" + ] + }, + { + "id": "704", + "parents": [ + "703", + "699", + "700" + ] + }, + { + "id": "705", + "parents": [ + "703", + "702" + ] + }, + { + "id": "706", + "parents": [ + "702", + "703" + ] + }, + { + "id": "707", + "parents": [ + "704", + "705", + "706" + ] + }, + { + "id": "708", + "parents": [ + "704" + ] + }, + { + "id": "709", + "parents": [ + "707", + "708" + ] + }, + { + "id": "710", + "parents": [ + "707", + "708" + ] + }, + { + "id": "711", + "parents": [ + "709" + ] + }, + { + "id": "712", + "parents": [ + "711", + "710" + ] + }, + { + "id": "713", + "parents": [ + "709" + ] + }, + { + "id": "714", + "parents": [ + "711", + "713", + "710" + ] + }, + { + "id": "715", + "parents": [ + "714", + "712" + ] + }, + { + "id": "716", + "parents": [ + "715" + ] + }, + { + "id": "717", + "parents": [ + "716" + ] + }, + { + "id": "718", + "parents": [ + "714", + "712" + ] + }, + { + "id": "719", + "parents": [ + "716" + ] + }, + { + "id": "720", + "parents": [ + "719" + ] + }, + { + "id": "721", + "parents": [ + "717", + "718", + "720" + ] + }, + { + "id": "722", + "parents": [ + "721" + ] + }, + { + "id": "723", + "parents": [ + "720", + "718", + "717" + ] + }, + { + "id": "724", + "parents": [ + "716", + "718" + ] + }, + { + "id": "725", + "parents": [ + "720", + "718" + ] + }, + { + "id": "726", + "parents": [ + "725", + "717", + "724" + ] + }, + { + "id": "727", + "parents": [ + "726" + ] + }, + { + "id": "728", + "parents": [ + "725", + "724", + "721", + "723" + ] + }, + { + "id": "729", + "parents": [ + "722", + "727", + "723" + ] + }, + { + "id": "730", + "parents": [ + "727", + "723", + "722" + ] + }, + { + "id": "731", + "parents": [ + "722", + "723", + "727" + ] + }, + { + "id": "732", + "parents": [ + "730" + ] + }, + { + "id": "733", + "parents": [ + "728", + "731", + "729", + "732" + ] + }, + { + "id": "734", + "parents": [ + "728", + "729", + "732", + "731" + ] + }, + { + "id": "735", + "parents": [ + "729", + "732", + "731", + "728" + ] + }, + { + "id": "736", + "parents": [ + "733", + "734", + "735" + ] + }, + { + "id": "737", + "parents": [ + "731", + "728", + "732", + "729" + ] + }, + { + "id": "738", + "parents": [ + "737", + "736" + ] + }, + { + "id": "739", + "parents": [ + "738" + ] + }, + { + "id": "740", + "parents": [ + "739" + ] + }, + { + "id": "741", + "parents": [ + "739" + ] + }, + { + "id": "742", + "parents": [ + "739" + ] + }, + { + "id": "743", + "parents": [ + "739" + ] + }, + { + "id": "744", + "parents": [ + "740", + "743", + "741" + ] + }, + { + "id": "745", + "parents": [ + "743", + "741", + "742", + "740" + ] + }, + { + "id": "746", + "parents": [ + "744", + "745" + ] + }, + { + "id": "747", + "parents": [ + "746" + ] + }, + { + "id": "748", + "parents": [ + "747" + ] + }, + { + "id": "749", + "parents": [ + "748" + ] + }, + { + "id": "750", + "parents": [ + "746" + ] + }, + { + "id": "751", + "parents": [ + "746" + ] + }, + { + "id": "752", + "parents": [ + "751", + "750", + "749" + ] + }, + { + "id": "753", + "parents": [ + "749", + "751", + "750" + ] + }, + { + "id": "754", + "parents": [ + "753" + ] + }, + { + "id": "755", + "parents": [ + "754", + "752" + ] + }, + { + "id": "756", + "parents": [ + "752", + "753" + ] + }, + { + "id": "757", + "parents": [ + "752" + ] + }, + { + "id": "758", + "parents": [ + "753", + "757" + ] + }, + { + "id": "759", + "parents": [ + "755", + "756" + ] + }, + { + "id": "760", + "parents": [ + "755", + "757", + "756" + ] + }, + { + "id": "761", + "parents": [ + "760", + "759" + ] + }, + { + "id": "762", + "parents": [ + "761", + "758" + ] + }, + { + "id": "763", + "parents": [ + "762" + ] + }, + { + "id": "764", + "parents": [ + "762" + ] + }, + { + "id": "765", + "parents": [ + "762" + ] + }, + { + "id": "766", + "parents": [ + "764", + "765", + "763" + ] + }, + { + "id": "767", + "parents": [ + "766" + ] + }, + { + "id": "768", + "parents": [ + "763", + "765", + "764" + ] + }, + { + "id": "769", + "parents": [ + "763", + "765", + "764" + ] + }, + { + "id": "770", + "parents": [ + "766", + "769" + ] + }, + { + "id": "771", + "parents": [ + "769" + ] + }, + { + "id": "772", + "parents": [ + "768", + "771", + "770" + ] + }, + { + "id": "773", + "parents": [ + "772", + "767" + ] + }, + { + "id": "774", + "parents": [ + "772", + "767" + ] + }, + { + "id": "775", + "parents": [ + "772", + "767" + ] + }, + { + "id": "776", + "parents": [ + "773" + ] + }, + { + "id": "777", + "parents": [ + "773", + "774" + ] + }, + { + "id": "778", + "parents": [ + "775", + "777", + "776" + ] + }, + { + "id": "779", + "parents": [ + "774", + "776" + ] + }, + { + "id": "780", + "parents": [ + "779", + "778" + ] + }, + { + "id": "781", + "parents": [ + "780" + ] + }, + { + "id": "782", + "parents": [ + "775", + "777", + "779" + ] + }, + { + "id": "783", + "parents": [ + "781", + "782" + ] + }, + { + "id": "784", + "parents": [ + "782", + "780" + ] + }, + { + "id": "785", + "parents": [ + "783", + "784" + ] + }, + { + "id": "786", + "parents": [ + "783", + "784" + ] + }, + { + "id": "787", + "parents": [ + "786" + ] + }, + { + "id": "788", + "parents": [ + "786" + ] + }, + { + "id": "789", + "parents": [ + "785", + "786" + ] + }, + { + "id": "790", + "parents": [ + "787", + "788", + "789" + ] + }, + { + "id": "791", + "parents": [ + "788", + "785", + "787" + ] + }, + { + "id": "792", + "parents": [ + "790" + ] + }, + { + "id": "793", + "parents": [ + "792", + "791" + ] + }, + { + "id": "794", + "parents": [ + "791", + "792" + ] + }, + { + "id": "795", + "parents": [ + "793" + ] + }, + { + "id": "796", + "parents": [ + "794", + "795" + ] + }, + { + "id": "797", + "parents": [ + "795", + "794" + ] + }, + { + "id": "798", + "parents": [ + "794", + "795" + ] + }, + { + "id": "799", + "parents": [ + "798" + ] + }, + { + "id": "800", + "parents": [ + "797", + "799", + "796" + ] + }, + { + "id": "801", + "parents": [ + "796", + "799", + "797" + ] + }, + { + "id": "802", + "parents": [ + "801", + "800" + ] + }, + { + "id": "803", + "parents": [ + "801", + "800" + ] + }, + { + "id": "804", + "parents": [ + "803" + ] + }, + { + "id": "805", + "parents": [ + "802", + "804" + ] + }, + { + "id": "806", + "parents": [ + "802", + "804" + ] + }, + { + "id": "807", + "parents": [ + "805" + ] + }, + { + "id": "808", + "parents": [ + "807", + "806" + ] + }, + { + "id": "809", + "parents": [ + "806", + "807" + ] + }, + { + "id": "810", + "parents": [ + "808", + "809" + ] + }, + { + "id": "811", + "parents": [ + "808", + "809" + ] + }, + { + "id": "812", + "parents": [ + "810", + "811" + ] + }, + { + "id": "813", + "parents": [ + "811" + ] + }, + { + "id": "814", + "parents": [ + "812", + "813" + ] + }, + { + "id": "815", + "parents": [ + "810", + "813" + ] + }, + { + "id": "816", + "parents": [ + "815", + "814" + ] + }, + { + "id": "817", + "parents": [ + "812", + "813" + ] + }, + { + "id": "818", + "parents": [ + "817", + "814", + "815" + ] + }, + { + "id": "819", + "parents": [ + "816", + "818" + ] + }, + { + "id": "820", + "parents": [ + "819" + ] + }, + { + "id": "821", + "parents": [ + "820" + ] + }, + { + "id": "822", + "parents": [ + "816", + "818" + ] + }, + { + "id": "823", + "parents": [ + "822", + "820" + ] + }, + { + "id": "824", + "parents": [ + "823", + "821" + ] + }, + { + "id": "825", + "parents": [ + "824" + ] + }, + { + "id": "826", + "parents": [ + "821", + "823" + ] + }, + { + "id": "827", + "parents": [ + "826", + "824" + ] + }, + { + "id": "828", + "parents": [ + "824", + "826" + ] + }, + { + "id": "829", + "parents": [ + "826", + "825" + ] + }, + { + "id": "830", + "parents": [ + "824" + ] + }, + { + "id": "831", + "parents": [ + "829", + "828", + "830", + "827" + ] + }, + { + "id": "832", + "parents": [ + "831" + ] + }, + { + "id": "833", + "parents": [ + "831" + ] + }, + { + "id": "834", + "parents": [ + "831" + ] + }, + { + "id": "835", + "parents": [ + "832", + "834", + "833" + ] + }, + { + "id": "836", + "parents": [ + "834", + "832", + "833" + ] + }, + { + "id": "837", + "parents": [ + "832", + "833", + "834" + ] + }, + { + "id": "838", + "parents": [ + "832", + "833", + "834" + ] + }, + { + "id": "839", + "parents": [ + "837", + "835", + "838" + ] + }, + { + "id": "840", + "parents": [ + "837" + ] + }, + { + "id": "841", + "parents": [ + "836", + "835", + "840", + "838" + ] + }, + { + "id": "842", + "parents": [ + "838", + "837", + "835" + ] + }, + { + "id": "843", + "parents": [ + "841", + "839", + "842" + ] + }, + { + "id": "844", + "parents": [ + "842", + "839", + "841" + ] + }, + { + "id": "845", + "parents": [ + "844" + ] + }, + { + "id": "846", + "parents": [ + "844" + ] + }, + { + "id": "847", + "parents": [ + "845", + "846" + ] + }, + { + "id": "848", + "parents": [ + "847", + "843" + ] + }, + { + "id": "849", + "parents": [ + "848" + ] + }, + { + "id": "850", + "parents": [ + "849" + ] + }, + { + "id": "851", + "parents": [ + "848" + ] + }, + { + "id": "852", + "parents": [ + "851", + "850" + ] + }, + { + "id": "853", + "parents": [ + "852" + ] + }, + { + "id": "854", + "parents": [ + "850", + "851" + ] + }, + { + "id": "855", + "parents": [ + "853", + "854" + ] + }, + { + "id": "856", + "parents": [ + "855" + ] + }, + { + "id": "857", + "parents": [ + "850", + "851" + ] + }, + { + "id": "858", + "parents": [ + "857" + ] + }, + { + "id": "859", + "parents": [ + "858" + ] + }, + { + "id": "860", + "parents": [ + "858" + ] + }, + { + "id": "861", + "parents": [ + "860", + "859", + "856" + ] + }, + { + "id": "862", + "parents": [ + "861" + ] + }, + { + "id": "863", + "parents": [ + "861" + ] + }, + { + "id": "864", + "parents": [ + "861" + ] + }, + { + "id": "865", + "parents": [ + "864", + "863", + "862" + ] + }, + { + "id": "866", + "parents": [ + "863", + "862" + ] + }, + { + "id": "867", + "parents": [ + "863", + "864", + "862" + ] + }, + { + "id": "868", + "parents": [ + "865", + "866", + "867" + ] + }, + { + "id": "869", + "parents": [ + "867", + "866" + ] + }, + { + "id": "870", + "parents": [ + "867", + "865", + "866" + ] + }, + { + "id": "871", + "parents": [ + "868", + "869", + "870" + ] + }, + { + "id": "872", + "parents": [ + "868", + "870", + "869" + ] + }, + { + "id": "873", + "parents": [ + "871", + "872" + ] + }, + { + "id": "874", + "parents": [ + "873" + ] + }, + { + "id": "875", + "parents": [ + "874" + ] + }, + { + "id": "876", + "parents": [ + "874" + ] + }, + { + "id": "877", + "parents": [ + "874" + ] + }, + { + "id": "878", + "parents": [ + "874" + ] + }, + { + "id": "879", + "parents": [ + "875", + "877" + ] + }, + { + "id": "880", + "parents": [ + "878", + "879", + "876" + ] + }, + { + "id": "881", + "parents": [ + "879", + "876", + "878" + ] + }, + { + "id": "882", + "parents": [ + "880" + ] + }, + { + "id": "883", + "parents": [ + "880", + "881" + ] + }, + { + "id": "884", + "parents": [ + "882" + ] + }, + { + "id": "885", + "parents": [ + "883", + "884" + ] + }, + { + "id": "886", + "parents": [ + "882", + "883" + ] + }, + { + "id": "887", + "parents": [ + "886" + ] + }, + { + "id": "888", + "parents": [ + "887", + "885" + ] + }, + { + "id": "889", + "parents": [ + "888" + ] + }, + { + "id": "890", + "parents": [ + "888" + ] + }, + { + "id": "891", + "parents": [ + "888" + ] + }, + { + "id": "892", + "parents": [ + "889", + "891", + "890" + ] + }, + { + "id": "893", + "parents": [ + "891", + "889", + "890" + ] + }, + { + "id": "894", + "parents": [ + "893", + "892" + ] + }, + { + "id": "895", + "parents": [ + "892", + "893" + ] + }, + { + "id": "896", + "parents": [ + "894", + "895" + ] + }, + { + "id": "897", + "parents": [ + "895", + "894" + ] + }, + { + "id": "898", + "parents": [ + "896", + "897" + ] + }, + { + "id": "899", + "parents": [ + "896", + "897" + ] + }, + { + "id": "900", + "parents": [ + "899" + ] + }, + { + "id": "901", + "parents": [ + "898", + "900" + ] + }, + { + "id": "902", + "parents": [ + "898", + "900" + ] + }, + { + "id": "903", + "parents": [ + "901", + "902" + ] + }, + { + "id": "904", + "parents": [ + "902", + "901" + ] + }, + { + "id": "905", + "parents": [ + "902", + "901" + ] + }, + { + "id": "906", + "parents": [ + "903", + "905" + ] + }, + { + "id": "907", + "parents": [ + "904", + "906" + ] + }, + { + "id": "908", + "parents": [ + "904", + "906" + ] + }, + { + "id": "909", + "parents": [ + "906", + "904" + ] + }, + { + "id": "910", + "parents": [ + "907", + "908" + ] + }, + { + "id": "911", + "parents": [ + "909", + "910" + ] + }, + { + "id": "912", + "parents": [ + "911" + ] + }, + { + "id": "913", + "parents": [ + "909", + "910" + ] + }, + { + "id": "914", + "parents": [ + "913", + "912" + ] + }, + { + "id": "915", + "parents": [ + "914" + ] + }, + { + "id": "916", + "parents": [ + "914" + ] + }, + { + "id": "917", + "parents": [ + "916", + "915" + ] + }, + { + "id": "918", + "parents": [ + "916" + ] + }, + { + "id": "919", + "parents": [ + "918", + "917" + ] + }, + { + "id": "920", + "parents": [ + "916", + "915" + ] + }, + { + "id": "921", + "parents": [ + "919", + "920" + ] + }, + { + "id": "922", + "parents": [ + "921" + ] + }, + { + "id": "923", + "parents": [ + "922" + ] + }, + { + "id": "924", + "parents": [ + "922" + ] + }, + { + "id": "925", + "parents": [ + "924", + "923" + ] + }, + { + "id": "926", + "parents": [ + "925" + ] + }, + { + "id": "927", + "parents": [ + "926" + ] + }, + { + "id": "928", + "parents": [ + "927" + ] + }, + { + "id": "929", + "parents": [ + "928" + ] + }, + { + "id": "930", + "parents": [ + "927" + ] + }, + { + "id": "931", + "parents": [ + "930", + "928" + ] + }, + { + "id": "932", + "parents": [ + "929", + "931" + ] + }, + { + "id": "933", + "parents": [ + "930", + "928" + ] + }, + { + "id": "934", + "parents": [ + "933", + "932" + ] + }, + { + "id": "935", + "parents": [ + "930", + "928" + ] + }, + { + "id": "936", + "parents": [ + "929", + "931", + "933", + "935" + ] + }, + { + "id": "937", + "parents": [ + "936" + ] + }, + { + "id": "938", + "parents": [ + "934", + "936" + ] + }, + { + "id": "939", + "parents": [ + "931", + "929", + "933" + ] + }, + { + "id": "940", + "parents": [ + "939", + "937", + "932" + ] + }, + { + "id": "941", + "parents": [ + "937", + "939", + "938" + ] + }, + { + "id": "942", + "parents": [ + "938", + "940" + ] + }, + { + "id": "943", + "parents": [ + "941", + "942" + ] + }, + { + "id": "944", + "parents": [ + "940", + "938" + ] + }, + { + "id": "945", + "parents": [ + "944", + "943" + ] + }, + { + "id": "946", + "parents": [ + "941", + "942", + "944" + ] + }, + { + "id": "947", + "parents": [ + "945", + "946" + ] + }, + { + "id": "948", + "parents": [ + "946", + "943" + ] + }, + { + "id": "949", + "parents": [ + "943", + "944" + ] + }, + { + "id": "950", + "parents": [ + "948" + ] + }, + { + "id": "951", + "parents": [ + "948", + "945" + ] + }, + { + "id": "952", + "parents": [ + "943", + "944" + ] + }, + { + "id": "953", + "parents": [ + "948", + "952" + ] + }, + { + "id": "954", + "parents": [ + "948", + "952" + ] + }, + { + "id": "955", + "parents": [ + "954", + "953", + "949", + "950", + "951", + "947" + ] + }, + { + "id": "956", + "parents": [ + "955" + ] + }, + { + "id": "957", + "parents": [ + "956" + ] + }, + { + "id": "958", + "parents": [ + "957" + ] + }, + { + "id": "959", + "parents": [ + "957" + ] + }, + { + "id": "960", + "parents": [ + "956" + ] + }, + { + "id": "961", + "parents": [ + "960", + "959", + "958" + ] + }, + { + "id": "962", + "parents": [ + "958", + "960", + "959" + ] + }, + { + "id": "963", + "parents": [ + "959", + "958", + "960" + ] + }, + { + "id": "964", + "parents": [ + "961" + ] + }, + { + "id": "965", + "parents": [ + "963", + "962", + "961" + ] + }, + { + "id": "966", + "parents": [ + "961", + "962" + ] + }, + { + "id": "967", + "parents": [ + "966", + "965", + "964" + ] + }, + { + "id": "968", + "parents": [ + "963", + "962", + "964" + ] + }, + { + "id": "969", + "parents": [ + "965", + "966" + ] + }, + { + "id": "970", + "parents": [ + "968", + "965", + "966" + ] + }, + { + "id": "971", + "parents": [ + "970", + "969" + ] + }, + { + "id": "972", + "parents": [ + "971", + "967" + ] + }, + { + "id": "973", + "parents": [ + "972" + ] + }, + { + "id": "974", + "parents": [ + "971" + ] + }, + { + "id": "975", + "parents": [ + "971", + "967" + ] + }, + { + "id": "976", + "parents": [ + "967", + "974" + ] + }, + { + "id": "977", + "parents": [ + "972", + "975", + "974" + ] + }, + { + "id": "978", + "parents": [ + "976", + "977", + "973" + ] + }, + { + "id": "979", + "parents": [ + "976", + "973", + "977" + ] + }, + { + "id": "980", + "parents": [ + "979", + "978" + ] + }, + { + "id": "981", + "parents": [ + "979", + "978" + ] + }, + { + "id": "982", + "parents": [ + "981" + ] + }, + { + "id": "983", + "parents": [ + "982", + "980" + ] + }, + { + "id": "984", + "parents": [ + "983" + ] + }, + { + "id": "985", + "parents": [ + "984" + ] + }, + { + "id": "986", + "parents": [ + "985" + ] + }, + { + "id": "987", + "parents": [ + "985" + ] + }, + { + "id": "988", + "parents": [ + "987" + ] + }, + { + "id": "989", + "parents": [ + "987" + ] + }, + { + "id": "990", + "parents": [ + "989" + ] + }, + { + "id": "991", + "parents": [ + "988", + "986" + ] + }, + { + "id": "992", + "parents": [ + "986", + "988" + ] + }, + { + "id": "993", + "parents": [ + "988", + "986", + "990" + ] + }, + { + "id": "994", + "parents": [ + "993", + "991", + "992" + ] + }, + { + "id": "995", + "parents": [ + "989", + "991", + "992" + ] + }, + { + "id": "996", + "parents": [ + "994", + "995" + ] + }, + { + "id": "997", + "parents": [ + "992", + "993", + "991" + ] + }, + { + "id": "998", + "parents": [ + "994", + "995", + "997" + ] + }, + { + "id": "999", + "parents": [ + "998", + "996" + ] + }, + { + "id": "1000", + "parents": [ + "996", + "998" + ] + }, + { + "id": "1001", + "parents": [ + "996", + "998" + ] + }, + { + "id": "1002", + "parents": [ + "998", + "996" + ] + }, + { + "id": "1003", + "parents": [ + "999", + "1000", + "1001" + ] + }, + { + "id": "1004", + "parents": [ + "1001", + "1000", + "1002", + "999" + ] + }, + { + "id": "1005", + "parents": [ + "1004", + "1003" + ] + }, + { + "id": "1006", + "parents": [ + "1004", + "1003" + ] + }, + { + "id": "1007", + "parents": [ + "1006", + "1005" + ] + }, + { + "id": "1008", + "parents": [ + "1005", + "1006" + ] + }, + { + "id": "1009", + "parents": [ + "1006" + ] + }, + { + "id": "1010", + "parents": [ + "1005", + "1006" + ] + }, + { + "id": "1011", + "parents": [ + "1007", + "1009", + "1008" + ] + }, + { + "id": "1012", + "parents": [ + "1011", + "1010" + ] + }, + { + "id": "1013", + "parents": [ + "1010", + "1011" + ] + }, + { + "id": "1014", + "parents": [ + "1013" + ] + }, + { + "id": "1015", + "parents": [ + "1014" + ] + }, + { + "id": "1016", + "parents": [ + "1012", + "1015" + ] + }, + { + "id": "1017", + "parents": [ + "1011", + "1010" + ] + }, + { + "id": "1018", + "parents": [ + "1017", + "1016" + ] + }, + { + "id": "1019", + "parents": [ + "1018" + ] + }, + { + "id": "1020", + "parents": [ + "1019" + ] + }, + { + "id": "1021", + "parents": [ + "1019" + ] + }, + { + "id": "1022", + "parents": [ + "1018" + ] + }, + { + "id": "1023", + "parents": [ + "1022" + ] + }, + { + "id": "1024", + "parents": [ + "1021", + "1020", + "1023" + ] + }, + { + "id": "1025", + "parents": [ + "1023", + "1020", + "1021" + ] + }, + { + "id": "1026", + "parents": [ + "1023", + "1021", + "1020" + ] + }, + { + "id": "1027", + "parents": [ + "1024" + ] + }, + { + "id": "1028", + "parents": [ + "1026", + "1025", + "1027" + ] + }, + { + "id": "1029", + "parents": [ + "1027", + "1025", + "1026" + ] + }, + { + "id": "1030", + "parents": [ + "1029" + ] + }, + { + "id": "1031", + "parents": [ + "1030" + ] + }, + { + "id": "1032", + "parents": [ + "1031", + "1028" + ] + }, + { + "id": "1033", + "parents": [ + "1032" + ] + }, + { + "id": "1034", + "parents": [ + "1033" + ] + }, + { + "id": "1035", + "parents": [ + "1032" + ] + }, + { + "id": "1036", + "parents": [ + "1033", + "1035" + ] + }, + { + "id": "1037", + "parents": [ + "1033" + ] + }, + { + "id": "1038", + "parents": [ + "1033", + "1035" + ] + }, + { + "id": "1039", + "parents": [ + "1035" + ] + }, + { + "id": "1040", + "parents": [ + "1039", + "1038" + ] + }, + { + "id": "1041", + "parents": [ + "1039" + ] + }, + { + "id": "1042", + "parents": [ + "1034", + "1037", + "1040", + "1036", + "1041" + ] + }, + { + "id": "1043", + "parents": [ + "1034", + "1041", + "1037", + "1040", + "1036" + ] + }, + { + "id": "1044", + "parents": [ + "1041", + "1037", + "1040", + "1036", + "1034" + ] + }, + { + "id": "1045", + "parents": [ + "1044", + "1043", + "1042" + ] + }, + { + "id": "1046", + "parents": [ + "1044", + "1043" + ] + }, + { + "id": "1047", + "parents": [ + "1043", + "1044" + ] + }, + { + "id": "1048", + "parents": [ + "1042", + "1046", + "1047" + ] + }, + { + "id": "1049", + "parents": [ + "1048", + "1045" + ] + }, + { + "id": "1050", + "parents": [ + "1046", + "1047", + "1045" + ] + }, + { + "id": "1051", + "parents": [ + "1050", + "1049" + ] + }, + { + "id": "1052", + "parents": [ + "1050", + "1049" + ] + }, + { + "id": "1053", + "parents": [ + "1052", + "1051" + ] + }, + { + "id": "1054", + "parents": [ + "1053" + ] + }, + { + "id": "1055", + "parents": [ + "1054" + ] + }, + { + "id": "1056", + "parents": [ + "1053" + ] + }, + { + "id": "1057", + "parents": [ + "1053" + ] + }, + { + "id": "1058", + "parents": [ + "1055", + "1056", + "1057" + ] + }, + { + "id": "1059", + "parents": [ + "1058" + ] + }, + { + "id": "1060", + "parents": [ + "1056", + "1057", + "1055" + ] + }, + { + "id": "1061", + "parents": [ + "1060" + ] + }, + { + "id": "1062", + "parents": [ + "1058", + "1060" + ] + }, + { + "id": "1063", + "parents": [ + "1062", + "1061", + "1059" + ] + }, + { + "id": "1064", + "parents": [ + "1061", + "1059", + "1062" + ] + }, + { + "id": "1065", + "parents": [ + "1063", + "1064" + ] + }, + { + "id": "1066", + "parents": [ + "1063", + "1064" + ] + }, + { + "id": "1067", + "parents": [ + "1066", + "1065" + ] + }, + { + "id": "1068", + "parents": [ + "1066", + "1065" + ] + }, + { + "id": "1069", + "parents": [ + "1067", + "1068" + ] + }, + { + "id": "1070", + "parents": [ + "1069" + ] + }, + { + "id": "1071", + "parents": [ + "1070" + ] + }, + { + "id": "1072", + "parents": [ + "1069" + ] + }, + { + "id": "1073", + "parents": [ + "1072", + "1070" + ] + }, + { + "id": "1074", + "parents": [ + "1073", + "1071" + ] + }, + { + "id": "1075", + "parents": [ + "1074" + ] + }, + { + "id": "1076", + "parents": [ + "1074" + ] + }, + { + "id": "1077", + "parents": [ + "1075", + "1076" + ] + }, + { + "id": "1078", + "parents": [ + "1077" + ] + }, + { + "id": "1079", + "parents": [ + "1075" + ] + }, + { + "id": "1080", + "parents": [ + "1079", + "1077" + ] + }, + { + "id": "1081", + "parents": [ + "1080" + ] + }, + { + "id": "1082", + "parents": [ + "1076", + "1079" + ] + }, + { + "id": "1083", + "parents": [ + "1082" + ] + }, + { + "id": "1084", + "parents": [ + "1083", + "1080" + ] + }, + { + "id": "1085", + "parents": [ + "1083", + "1081", + "1078" + ] + }, + { + "id": "1086", + "parents": [ + "1085", + "1084" + ] + }, + { + "id": "1087", + "parents": [ + "1085" + ] + }, + { + "id": "1088", + "parents": [ + "1085", + "1084" + ] + }, + { + "id": "1089", + "parents": [ + "1086", + "1088", + "1087" + ] + }, + { + "id": "1090", + "parents": [ + "1086", + "1088", + "1087" + ] + }, + { + "id": "1091", + "parents": [ + "1090" + ] + }, + { + "id": "1092", + "parents": [ + "1091" + ] + }, + { + "id": "1093", + "parents": [ + "1090", + "1089" + ] + }, + { + "id": "1094", + "parents": [ + "1090", + "1089" + ] + }, + { + "id": "1095", + "parents": [ + "1091", + "1093" + ] + }, + { + "id": "1096", + "parents": [ + "1092", + "1095", + "1094" + ] + }, + { + "id": "1097", + "parents": [ + "1092", + "1095", + "1094" + ] + }, + { + "id": "1098", + "parents": [ + "1097" + ] + }, + { + "id": "1099", + "parents": [ + "1097" + ] + }, + { + "id": "1100", + "parents": [ + "1098" + ] + }, + { + "id": "1101", + "parents": [ + "1099", + "1100" + ] + }, + { + "id": "1102", + "parents": [ + "1098", + "1096" + ] + }, + { + "id": "1103", + "parents": [ + "1102", + "1101" + ] + }, + { + "id": "1104", + "parents": [ + "1101", + "1102" + ] + }, + { + "id": "1105", + "parents": [ + "1102", + "1101" + ] + }, + { + "id": "1106", + "parents": [ + "1105", + "1103" + ] + }, + { + "id": "1107", + "parents": [ + "1106" + ] + }, + { + "id": "1108", + "parents": [ + "1103" + ] + }, + { + "id": "1109", + "parents": [ + "1104", + "1107", + "1108" + ] + }, + { + "id": "1110", + "parents": [ + "1104", + "1107", + "1108" + ] + }, + { + "id": "1111", + "parents": [ + "1107", + "1104", + "1108" + ] + }, + { + "id": "1112", + "parents": [ + "1110", + "1109", + "1111" + ] + }, + { + "id": "1113", + "parents": [ + "1110" + ] + }, + { + "id": "1114", + "parents": [ + "1111", + "1113", + "1109" + ] + }, + { + "id": "1115", + "parents": [ + "1114" + ] + }, + { + "id": "1116", + "parents": [ + "1112", + "1115" + ] + }, + { + "id": "1117", + "parents": [ + "1113", + "1112" + ] + }, + { + "id": "1118", + "parents": [ + "1117", + "1115" + ] + }, + { + "id": "1119", + "parents": [ + "1116", + "1118" + ] + }, + { + "id": "1120", + "parents": [ + "1117", + "1116" + ] + }, + { + "id": "1121", + "parents": [ + "1118", + "1116" + ] + }, + { + "id": "1122", + "parents": [ + "1116", + "1118" + ] + }, + { + "id": "1123", + "parents": [ + "1122", + "1120", + "1119", + "1121" + ] + }, + { + "id": "1124", + "parents": [ + "1119", + "1120", + "1121", + "1122" + ] + }, + { + "id": "1125", + "parents": [ + "1122", + "1120", + "1121", + "1119" + ] + }, + { + "id": "1126", + "parents": [ + "1123", + "1124" + ] + }, + { + "id": "1127", + "parents": [ + "1123" + ] + }, + { + "id": "1128", + "parents": [ + "1125", + "1124", + "1127" + ] + }, + { + "id": "1129", + "parents": [ + "1126", + "1127" + ] + }, + { + "id": "1130", + "parents": [ + "1129", + "1125" + ] + }, + { + "id": "1131", + "parents": [ + "1130", + "1128" + ] + }, + { + "id": "1132", + "parents": [ + "1128", + "1130" + ] + }, + { + "id": "1133", + "parents": [ + "1128", + "1130" + ] + }, + { + "id": "1134", + "parents": [ + "1132", + "1131", + "1133" + ] + }, + { + "id": "1135", + "parents": [ + "1134" + ] + }, + { + "id": "1136", + "parents": [ + "1135" + ] + }, + { + "id": "1137", + "parents": [ + "1134" + ] + }, + { + "id": "1138", + "parents": [ + "1136", + "1137" + ] + }, + { + "id": "1139", + "parents": [ + "1138" + ] + }, + { + "id": "1140", + "parents": [ + "1138" + ] + }, + { + "id": "1141", + "parents": [ + "1140", + "1139" + ] + }, + { + "id": "1142", + "parents": [ + "1141" + ] + }, + { + "id": "1143", + "parents": [ + "1138" + ] + }, + { + "id": "1144", + "parents": [ + "1140", + "1139", + "1143" + ] + }, + { + "id": "1145", + "parents": [ + "1143", + "1140", + "1139" + ] + }, + { + "id": "1146", + "parents": [ + "1145", + "1144", + "1142" + ] + }, + { + "id": "1147", + "parents": [ + "1141", + "1144", + "1145" + ] + }, + { + "id": "1148", + "parents": [ + "1145", + "1144", + "1142" + ] + }, + { + "id": "1149", + "parents": [ + "1148", + "1147" + ] + }, + { + "id": "1150", + "parents": [ + "1149", + "1146" + ] + }, + { + "id": "1151", + "parents": [ + "1149", + "1146" + ] + }, + { + "id": "1152", + "parents": [ + "1149", + "1146" + ] + }, + { + "id": "1153", + "parents": [ + "1152" + ] + }, + { + "id": "1154", + "parents": [ + "1150", + "1151", + "1153" + ] + }, + { + "id": "1155", + "parents": [ + "1154" + ] + }, + { + "id": "1156", + "parents": [ + "1155" + ] + }, + { + "id": "1157", + "parents": [ + "1156" + ] + }, + { + "id": "1158", + "parents": [ + "1157" + ] + }, + { + "id": "1159", + "parents": [ + "1158" + ] + }, + { + "id": "1160", + "parents": [ + "1158" + ] + }, + { + "id": "1161", + "parents": [ + "1160", + "1159" + ] + }, + { + "id": "1162", + "parents": [ + "1159", + "1160" + ] + }, + { + "id": "1163", + "parents": [ + "1161", + "1162" + ] + }, + { + "id": "1164", + "parents": [ + "1163" + ] + }, + { + "id": "1165", + "parents": [ + "1162", + "1161" + ] + }, + { + "id": "1166", + "parents": [ + "1162", + "1161" + ] + }, + { + "id": "1167", + "parents": [ + "1164", + "1166", + "1165" + ] + }, + { + "id": "1168", + "parents": [ + "1166" + ] + }, + { + "id": "1169", + "parents": [ + "1167", + "1168" + ] + }, + { + "id": "1170", + "parents": [ + "1167", + "1168" + ] + }, + { + "id": "1171", + "parents": [ + "1169", + "1170" + ] + }, + { + "id": "1172", + "parents": [ + "1168", + "1167" + ] + }, + { + "id": "1173", + "parents": [ + "1172", + "1171" + ] + }, + { + "id": "1174", + "parents": [ + "1173" + ] + }, + { + "id": "1175", + "parents": [ + "1172", + "1171" + ] + }, + { + "id": "1176", + "parents": [ + "1173", + "1175" + ] + }, + { + "id": "1177", + "parents": [ + "1170", + "1169", + "1172" + ] + }, + { + "id": "1178", + "parents": [ + "1175", + "1173" + ] + }, + { + "id": "1179", + "parents": [ + "1175", + "1173" + ] + }, + { + "id": "1180", + "parents": [ + "1179" + ] + }, + { + "id": "1181", + "parents": [ + "1177", + "1176", + "1178", + "1180", + "1174" + ] + }, + { + "id": "1182", + "parents": [ + "1181" + ] + }, + { + "id": "1183", + "parents": [ + "1181" + ] + }, + { + "id": "1184", + "parents": [ + "1181" + ] + }, + { + "id": "1185", + "parents": [ + "1184" + ] + }, + { + "id": "1186", + "parents": [ + "1185" + ] + }, + { + "id": "1187", + "parents": [ + "1183", + "1186", + "1182" + ] + }, + { + "id": "1188", + "parents": [ + "1187" + ] + }, + { + "id": "1189", + "parents": [ + "1188" + ] + }, + { + "id": "1190", + "parents": [ + "1189" + ] + }, + { + "id": "1191", + "parents": [ + "1189" + ] + }, + { + "id": "1192", + "parents": [ + "1190" + ] + }, + { + "id": "1193", + "parents": [ + "1192", + "1191" + ] + }, + { + "id": "1194", + "parents": [ + "1193" + ] + }, + { + "id": "1195", + "parents": [ + "1194" + ] + }, + { + "id": "1196", + "parents": [ + "1194" + ] + }, + { + "id": "1197", + "parents": [ + "1195" + ] + }, + { + "id": "1198", + "parents": [ + "1196", + "1197" + ] + }, + { + "id": "1199", + "parents": [ + "1196", + "1195" + ] + }, + { + "id": "1200", + "parents": [ + "1196", + "1195" + ] + }, + { + "id": "1201", + "parents": [ + "1200" + ] + }, + { + "id": "1202", + "parents": [ + "1199", + "1198", + "1201" + ] + }, + { + "id": "1203", + "parents": [ + "1199", + "1198" + ] + }, + { + "id": "1204", + "parents": [ + "1201", + "1199", + "1197" + ] + }, + { + "id": "1205", + "parents": [ + "1201", + "1197", + "1199" + ] + }, + { + "id": "1206", + "parents": [ + "1203", + "1205", + "1204" + ] + }, + { + "id": "1207", + "parents": [ + "1206", + "1202" + ] + }, + { + "id": "1208", + "parents": [ + "1207" + ] + }, + { + "id": "1209", + "parents": [ + "1202", + "1206" + ] + }, + { + "id": "1210", + "parents": [ + "1207", + "1209" + ] + }, + { + "id": "1211", + "parents": [ + "1208", + "1209" + ] + }, + { + "id": "1212", + "parents": [ + "1207", + "1209" + ] + }, + { + "id": "1213", + "parents": [ + "1212", + "1208", + "1210" + ] + }, + { + "id": "1214", + "parents": [ + "1213", + "1211" + ] + }, + { + "id": "1215", + "parents": [ + "1214" + ] + }, + { + "id": "1216", + "parents": [ + "1214" + ] + }, + { + "id": "1217", + "parents": [ + "1216" + ] + }, + { + "id": "1218", + "parents": [ + "1214" + ] + }, + { + "id": "1219", + "parents": [ + "1217", + "1215" + ] + }, + { + "id": "1220", + "parents": [ + "1217", + "1215", + "1218" + ] + }, + { + "id": "1221", + "parents": [ + "1219", + "1220" + ] + }, + { + "id": "1222", + "parents": [ + "1216", + "1215" + ] + }, + { + "id": "1223", + "parents": [ + "1217", + "1215", + "1218" + ] + }, + { + "id": "1224", + "parents": [ + "1220", + "1222", + "1219" + ] + }, + { + "id": "1225", + "parents": [ + "1219", + "1222", + "1223", + "1220" + ] + }, + { + "id": "1226", + "parents": [ + "1224", + "1225", + "1221" + ] + }, + { + "id": "1227", + "parents": [ + "1226" + ] + }, + { + "id": "1228", + "parents": [ + "1226" + ] + }, + { + "id": "1229", + "parents": [ + "1226" + ] + }, + { + "id": "1230", + "parents": [ + "1229", + "1228" + ] + }, + { + "id": "1231", + "parents": [ + "1230", + "1227" + ] + }, + { + "id": "1232", + "parents": [ + "1230", + "1227" + ] + }, + { + "id": "1233", + "parents": [ + "1227", + "1230" + ] + }, + { + "id": "1234", + "parents": [ + "1233", + "1231", + "1232" + ] + }, + { + "id": "1235", + "parents": [ + "1233", + "1231", + "1232" + ] + }, + { + "id": "1236", + "parents": [ + "1235" + ] + }, + { + "id": "1237", + "parents": [ + "1235" + ] + }, + { + "id": "1238", + "parents": [ + "1237", + "1236", + "1234" + ] + }, + { + "id": "1239", + "parents": [ + "1237", + "1236" + ] + }, + { + "id": "1240", + "parents": [ + "1234", + "1236", + "1237" + ] + }, + { + "id": "1241", + "parents": [ + "1234", + "1236" + ] + }, + { + "id": "1242", + "parents": [ + "1238", + "1239", + "1241", + "1240" + ] + }, + { + "id": "1243", + "parents": [ + "1239", + "1238", + "1241", + "1240" + ] + }, + { + "id": "1244", + "parents": [ + "1243", + "1242" + ] + }, + { + "id": "1245", + "parents": [ + "1242", + "1243" + ] + }, + { + "id": "1246", + "parents": [ + "1244", + "1245" + ] + }, + { + "id": "1247", + "parents": [ + "1244" + ] + }, + { + "id": "1248", + "parents": [ + "1244", + "1245" + ] + }, + { + "id": "1249", + "parents": [ + "1247", + "1248" + ] + }, + { + "id": "1250", + "parents": [ + "1249" + ] + }, + { + "id": "1251", + "parents": [ + "1250", + "1246" + ] + }, + { + "id": "1252", + "parents": [ + "1251" + ] + }, + { + "id": "1253", + "parents": [ + "1251" + ] + }, + { + "id": "1254", + "parents": [ + "1252", + "1253" + ] + }, + { + "id": "1255", + "parents": [ + "1254" + ] + }, + { + "id": "1256", + "parents": [ + "1253", + "1252" + ] + }, + { + "id": "1257", + "parents": [ + "1256" + ] + }, + { + "id": "1258", + "parents": [ + "1257", + "1254" + ] + }, + { + "id": "1259", + "parents": [ + "1255", + "1258" + ] + }, + { + "id": "1260", + "parents": [ + "1258" + ] + }, + { + "id": "1261", + "parents": [ + "1255", + "1258" + ] + }, + { + "id": "1262", + "parents": [ + "1258" + ] + }, + { + "id": "1263", + "parents": [ + "1259", + "1260", + "1261", + "1262" + ] + }, + { + "id": "1264", + "parents": [ + "1261", + "1260", + "1259", + "1262" + ] + }, + { + "id": "1265", + "parents": [ + "1259", + "1260", + "1262", + "1261" + ] + }, + { + "id": "1266", + "parents": [ + "1263", + "1265", + "1264" + ] + }, + { + "id": "1267", + "parents": [ + "1266" + ] + }, + { + "id": "1268", + "parents": [ + "1267" + ] + }, + { + "id": "1269", + "parents": [ + "1266" + ] + }, + { + "id": "1270", + "parents": [ + "1268" + ] + }, + { + "id": "1271", + "parents": [ + "1270", + "1269" + ] + }, + { + "id": "1272", + "parents": [ + "1269", + "1270" + ] + }, + { + "id": "1273", + "parents": [ + "1269", + "1268" + ] + }, + { + "id": "1274", + "parents": [ + "1272", + "1273", + "1271" + ] + }, + { + "id": "1275", + "parents": [ + "1272", + "1271", + "1273" + ] + }, + { + "id": "1276", + "parents": [ + "1275" + ] + }, + { + "id": "1277", + "parents": [ + "1274" + ] + }, + { + "id": "1278", + "parents": [ + "1276", + "1274" + ] + }, + { + "id": "1279", + "parents": [ + "1276", + "1274" + ] + }, + { + "id": "1280", + "parents": [ + "1277", + "1276" + ] + }, + { + "id": "1281", + "parents": [ + "1278", + "1280", + "1279" + ] + }, + { + "id": "1282", + "parents": [ + "1281" + ] + }, + { + "id": "1283", + "parents": [ + "1282" + ] + }, + { + "id": "1284", + "parents": [ + "1283" + ] + }, + { + "id": "1285", + "parents": [ + "1281" + ] + }, + { + "id": "1286", + "parents": [ + "1281" + ] + }, + { + "id": "1287", + "parents": [ + "1282" + ] + }, + { + "id": "1288", + "parents": [ + "1284", + "1286", + "1285" + ] + }, + { + "id": "1289", + "parents": [ + "1285", + "1283", + "1286", + "1287" + ] + }, + { + "id": "1290", + "parents": [ + "1284", + "1289" + ] + }, + { + "id": "1291", + "parents": [ + "1287", + "1285", + "1286", + "1284" + ] + }, + { + "id": "1292", + "parents": [ + "1290" + ] + }, + { + "id": "1293", + "parents": [ + "1292" + ] + }, + { + "id": "1294", + "parents": [ + "1288", + "1290", + "1291" + ] + }, + { + "id": "1295", + "parents": [ + "1293", + "1288", + "1291" + ] + }, + { + "id": "1296", + "parents": [ + "1288", + "1291" + ] + }, + { + "id": "1297", + "parents": [ + "1294" + ] + }, + { + "id": "1298", + "parents": [ + "1296", + "1297", + "1295" + ] + }, + { + "id": "1299", + "parents": [ + "1293", + "1288", + "1291" + ] + }, + { + "id": "1300", + "parents": [ + "1295", + "1299", + "1296" + ] + }, + { + "id": "1301", + "parents": [ + "1298", + "1300" + ] + }, + { + "id": "1302", + "parents": [ + "1301" + ] + }, + { + "id": "1303", + "parents": [ + "1302" + ] + }, + { + "id": "1304", + "parents": [ + "1303" + ] + }, + { + "id": "1305", + "parents": [ + "1303" + ] + }, + { + "id": "1306", + "parents": [ + "1305", + "1304" + ] + }, + { + "id": "1307", + "parents": [ + "1303" + ] + }, + { + "id": "1308", + "parents": [ + "1307", + "1304", + "1305" + ] + }, + { + "id": "1309", + "parents": [ + "1305", + "1304" + ] + }, + { + "id": "1310", + "parents": [ + "1309", + "1306", + "1308" + ] + }, + { + "id": "1311", + "parents": [ + "1305", + "1307", + "1304" + ] + }, + { + "id": "1312", + "parents": [ + "1310", + "1311" + ] + }, + { + "id": "1313", + "parents": [ + "1312" + ] + }, + { + "id": "1314", + "parents": [ + "1312" + ] + }, + { + "id": "1315", + "parents": [ + "1312" + ] + }, + { + "id": "1316", + "parents": [ + "1312" + ] + }, + { + "id": "1317", + "parents": [ + "1313", + "1315", + "1314" + ] + }, + { + "id": "1318", + "parents": [ + "1312" + ] + }, + { + "id": "1319", + "parents": [ + "1317", + "1316" + ] + }, + { + "id": "1320", + "parents": [ + "1319", + "1318" + ] + }, + { + "id": "1321", + "parents": [ + "1319", + "1318" + ] + }, + { + "id": "1322", + "parents": [ + "1321" + ] + }, + { + "id": "1323", + "parents": [ + "1322", + "1320" + ] + }, + { + "id": "1324", + "parents": [ + "1322", + "1320" + ] + }, + { + "id": "1325", + "parents": [ + "1323", + "1324" + ] + }, + { + "id": "1326", + "parents": [ + "1320", + "1322" + ] + }, + { + "id": "1327", + "parents": [ + "1320", + "1322" + ] + }, + { + "id": "1328", + "parents": [ + "1323", + "1324", + "1326" + ] + }, + { + "id": "1329", + "parents": [ + "1328" + ] + }, + { + "id": "1330", + "parents": [ + "1323", + "1327" + ] + }, + { + "id": "1331", + "parents": [ + "1327", + "1325", + "1328" + ] + }, + { + "id": "1332", + "parents": [ + "1330", + "1325", + "1329" + ] + }, + { + "id": "1333", + "parents": [ + "1331", + "1332" + ] + }, + { + "id": "1334", + "parents": [ + "1333" + ] + }, + { + "id": "1335", + "parents": [ + "1333" + ] + }, + { + "id": "1336", + "parents": [ + "1333" + ] + }, + { + "id": "1337", + "parents": [ + "1335" + ] + }, + { + "id": "1338", + "parents": [ + "1333" + ] + }, + { + "id": "1339", + "parents": [ + "1334", + "1336", + "1337" + ] + }, + { + "id": "1340", + "parents": [ + "1336", + "1338", + "1334", + "1337" + ] + }, + { + "id": "1341", + "parents": [ + "1339", + "1340" + ] + }, + { + "id": "1342", + "parents": [ + "1339" + ] + }, + { + "id": "1343", + "parents": [ + "1342", + "1338" + ] + }, + { + "id": "1344", + "parents": [ + "1334", + "1337", + "1338", + "1336" + ] + }, + { + "id": "1345", + "parents": [ + "1343", + "1344", + "1340" + ] + }, + { + "id": "1346", + "parents": [ + "1341", + "1345" + ] + }, + { + "id": "1347", + "parents": [ + "1341", + "1345" + ] + }, + { + "id": "1348", + "parents": [ + "1347", + "1346" + ] + }, + { + "id": "1349", + "parents": [ + "1345", + "1341" + ] + }, + { + "id": "1350", + "parents": [ + "1349" + ] + }, + { + "id": "1351", + "parents": [ + "1350", + "1347", + "1346" + ] + }, + { + "id": "1352", + "parents": [ + "1341", + "1345" + ] + }, + { + "id": "1353", + "parents": [ + "1348", + "1352", + "1351" + ] + }, + { + "id": "1354", + "parents": [ + "1346", + "1347", + "1350", + "1352" + ] + }, + { + "id": "1355", + "parents": [ + "1347", + "1350", + "1352" + ] + }, + { + "id": "1356", + "parents": [ + "1355", + "1351", + "1354" + ] + }, + { + "id": "1357", + "parents": [ + "1355", + "1351", + "1354", + "1348" + ] + }, + { + "id": "1358", + "parents": [ + "1356", + "1357", + "1353" + ] + }, + { + "id": "1359", + "parents": [ + "1358" + ] + }, + { + "id": "1360", + "parents": [ + "1357", + "1356", + "1353" + ] + }, + { + "id": "1361", + "parents": [ + "1359" + ] + }, + { + "id": "1362", + "parents": [ + "1359", + "1360" + ] + }, + { + "id": "1363", + "parents": [ + "1361", + "1360" + ] + }, + { + "id": "1364", + "parents": [ + "1363", + "1362" + ] + }, + { + "id": "1365", + "parents": [ + "1363", + "1362" + ] + }, + { + "id": "1366", + "parents": [ + "1364", + "1365" + ] + }, + { + "id": "1367", + "parents": [ + "1365" + ] + }, + { + "id": "1368", + "parents": [ + "1366", + "1367" + ] + }, + { + "id": "1369", + "parents": [ + "1367" + ] + }, + { + "id": "1370", + "parents": [ + "1365" + ] + }, + { + "id": "1371", + "parents": [ + "1370", + "1368", + "1369" + ] + }, + { + "id": "1372", + "parents": [ + "1371" + ] + }, + { + "id": "1373", + "parents": [ + "1371" + ] + }, + { + "id": "1374", + "parents": [ + "1373" + ] + }, + { + "id": "1375", + "parents": [ + "1372", + "1374" + ] + }, + { + "id": "1376", + "parents": [ + "1372", + "1374" + ] + }, + { + "id": "1377", + "parents": [ + "1374", + "1372" + ] + }, + { + "id": "1378", + "parents": [ + "1377" + ] + }, + { + "id": "1379", + "parents": [ + "1374", + "1372" + ] + }, + { + "id": "1380", + "parents": [ + "1379", + "1378", + "1375", + "1376" + ] + }, + { + "id": "1381", + "parents": [ + "1380" + ] + }, + { + "id": "1382", + "parents": [ + "1380" + ] + }, + { + "id": "1383", + "parents": [ + "1382", + "1381" + ] + }, + { + "id": "1384", + "parents": [ + "1381", + "1382" + ] + }, + { + "id": "1385", + "parents": [ + "1381", + "1382" + ] + }, + { + "id": "1386", + "parents": [ + "1384", + "1385" + ] + }, + { + "id": "1387", + "parents": [ + "1384", + "1385" + ] + }, + { + "id": "1388", + "parents": [ + "1387", + "1383" + ] + }, + { + "id": "1389", + "parents": [ + "1384", + "1385", + "1383" + ] + }, + { + "id": "1390", + "parents": [ + "1383", + "1386" + ] + }, + { + "id": "1391", + "parents": [ + "1389", + "1388", + "1386" + ] + }, + { + "id": "1392", + "parents": [ + "1391", + "1390" + ] + }, + { + "id": "1393", + "parents": [ + "1389", + "1390", + "1387" + ] + }, + { + "id": "1394", + "parents": [ + "1393" + ] + }, + { + "id": "1395", + "parents": [ + "1388", + "1390", + "1389" + ] + }, + { + "id": "1396", + "parents": [ + "1391", + "1395", + "1394" + ] + }, + { + "id": "1397", + "parents": [ + "1394", + "1395", + "1392" + ] + }, + { + "id": "1398", + "parents": [ + "1394", + "1395" + ] + }, + { + "id": "1399", + "parents": [ + "1398", + "1391" + ] + }, + { + "id": "1400", + "parents": [ + "1391", + "1398" + ] + }, + { + "id": "1401", + "parents": [ + "1400", + "1392" + ] + }, + { + "id": "1402", + "parents": [ + "1397", + "1401" + ] + }, + { + "id": "1403", + "parents": [ + "1401", + "1396", + "1397", + "1399" + ] + }, + { + "id": "1404", + "parents": [ + "1396", + "1399", + "1392" + ] + }, + { + "id": "1405", + "parents": [ + "1404", + "1402" + ] + }, + { + "id": "1406", + "parents": [ + "1405", + "1403" + ] + }, + { + "id": "1407", + "parents": [ + "1404", + "1402" + ] + }, + { + "id": "1408", + "parents": [ + "1406", + "1407" + ] + }, + { + "id": "1409", + "parents": [ + "1408" + ] + }, + { + "id": "1410", + "parents": [ + "1408" + ] + }, + { + "id": "1411", + "parents": [ + "1409", + "1410" + ] + }, + { + "id": "1412", + "parents": [ + "1411" + ] + }, + { + "id": "1413", + "parents": [ + "1412" + ] + }, + { + "id": "1414", + "parents": [ + "1410", + "1409" + ] + }, + { + "id": "1415", + "parents": [ + "1414" + ] + }, + { + "id": "1416", + "parents": [ + "1414", + "1411" + ] + }, + { + "id": "1417", + "parents": [ + "1415", + "1413" + ] + }, + { + "id": "1418", + "parents": [ + "1417", + "1416" + ] + }, + { + "id": "1419", + "parents": [ + "1416", + "1415", + "1413" + ] + }, + { + "id": "1420", + "parents": [ + "1418", + "1419" + ] + }, + { + "id": "1421", + "parents": [ + "1419" + ] + }, + { + "id": "1422", + "parents": [ + "1421", + "1418" + ] + }, + { + "id": "1423", + "parents": [ + "1418", + "1421" + ] + }, + { + "id": "1424", + "parents": [ + "1419", + "1418" + ] + }, + { + "id": "1425", + "parents": [ + "1421", + "1418" + ] + }, + { + "id": "1426", + "parents": [ + "1425", + "1424", + "1423", + "1422", + "1420" + ] + }, + { + "id": "1427", + "parents": [ + "1424", + "1422", + "1420", + "1425", + "1423" + ] + }, + { + "id": "1428", + "parents": [ + "1427" + ] + }, + { + "id": "1429", + "parents": [ + "1426", + "1427" + ] + }, + { + "id": "1430", + "parents": [ + "1426" + ] + }, + { + "id": "1431", + "parents": [ + "1430" + ] + }, + { + "id": "1432", + "parents": [ + "1430", + "1429" + ] + }, + { + "id": "1433", + "parents": [ + "1430", + "1427" + ] + }, + { + "id": "1434", + "parents": [ + "1433", + "1431", + "1428", + "1429" + ] + }, + { + "id": "1435", + "parents": [ + "1433", + "1428", + "1431" + ] + }, + { + "id": "1436", + "parents": [ + "1432", + "1434" + ] + }, + { + "id": "1437", + "parents": [ + "1435", + "1432" + ] + }, + { + "id": "1438", + "parents": [ + "1432", + "1434", + "1435" + ] + }, + { + "id": "1439", + "parents": [ + "1438", + "1437", + "1436" + ] + }, + { + "id": "1440", + "parents": [ + "1439" + ] + }, + { + "id": "1441", + "parents": [ + "1439" + ] + }, + { + "id": "1442", + "parents": [ + "1439" + ] + }, + { + "id": "1443", + "parents": [ + "1440", + "1441", + "1442" + ] + }, + { + "id": "1444", + "parents": [ + "1440" + ] + }, + { + "id": "1445", + "parents": [ + "1444", + "1443" + ] + }, + { + "id": "1446", + "parents": [ + "1445" + ] + }, + { + "id": "1447", + "parents": [ + "1442", + "1441", + "1440" + ] + }, + { + "id": "1448", + "parents": [ + "1442", + "1440", + "1441" + ] + }, + { + "id": "1449", + "parents": [ + "1444", + "1448", + "1443" + ] + }, + { + "id": "1450", + "parents": [ + "1448", + "1443", + "1447", + "1444" + ] + }, + { + "id": "1451", + "parents": [ + "1449", + "1447", + "1446" + ] + }, + { + "id": "1452", + "parents": [ + "1450" + ] + }, + { + "id": "1453", + "parents": [ + "1452" + ] + }, + { + "id": "1454", + "parents": [ + "1449", + "1453", + "1446" + ] + }, + { + "id": "1455", + "parents": [ + "1446", + "1449", + "1453" + ] + }, + { + "id": "1456", + "parents": [ + "1453", + "1449", + "1446" + ] + }, + { + "id": "1457", + "parents": [ + "1447", + "1449" + ] + }, + { + "id": "1458", + "parents": [ + "1457", + "1455" + ] + }, + { + "id": "1459", + "parents": [ + "1457", + "1455", + "1456", + "1451" + ] + }, + { + "id": "1460", + "parents": [ + "1454", + "1459", + "1458" + ] + }, + { + "id": "1461", + "parents": [ + "1458", + "1459", + "1454" + ] + }, + { + "id": "1462", + "parents": [ + "1454", + "1458", + "1459" + ] + }, + { + "id": "1463", + "parents": [ + "1462" + ] + }, + { + "id": "1464", + "parents": [ + "1461", + "1463", + "1460" + ] + }, + { + "id": "1465", + "parents": [ + "1464" + ] + }, + { + "id": "1466", + "parents": [ + "1465" + ] + }, + { + "id": "1467", + "parents": [ + "1464" + ] + }, + { + "id": "1468", + "parents": [ + "1465" + ] + }, + { + "id": "1469", + "parents": [ + "1468" + ] + }, + { + "id": "1470", + "parents": [ + "1466", + "1469", + "1467" + ] + }, + { + "id": "1471", + "parents": [ + "1470" + ] + }, + { + "id": "1472", + "parents": [ + "1466", + "1469", + "1467" + ] + }, + { + "id": "1473", + "parents": [ + "1466", + "1467", + "1469" + ] + }, + { + "id": "1474", + "parents": [ + "1473", + "1470", + "1472" + ] + }, + { + "id": "1475", + "parents": [ + "1474" + ] + }, + { + "id": "1476", + "parents": [ + "1475", + "1471" + ] + }, + { + "id": "1477", + "parents": [ + "1476" + ] + }, + { + "id": "1478", + "parents": [ + "1475", + "1471" + ] + }, + { + "id": "1479", + "parents": [ + "1471", + "1475" + ] + }, + { + "id": "1480", + "parents": [ + "1475", + "1471" + ] + }, + { + "id": "1481", + "parents": [ + "1478", + "1480", + "1476" + ] + }, + { + "id": "1482", + "parents": [ + "1476", + "1480" + ] + }, + { + "id": "1483", + "parents": [ + "1477", + "1482", + "1481", + "1479" + ] + }, + { + "id": "1484", + "parents": [ + "1481", + "1477", + "1482", + "1479" + ] + }, + { + "id": "1485", + "parents": [ + "1483", + "1484" + ] + }, + { + "id": "1486", + "parents": [ + "1481", + "1477", + "1482", + "1479" + ] + }, + { + "id": "1487", + "parents": [ + "1479", + "1482", + "1477", + "1481" + ] + }, + { + "id": "1488", + "parents": [ + "1479", + "1477", + "1481", + "1482" + ] + }, + { + "id": "1489", + "parents": [ + "1484", + "1483", + "1486" + ] + }, + { + "id": "1490", + "parents": [ + "1487", + "1483", + "1486", + "1488", + "1484" + ] + }, + { + "id": "1491", + "parents": [ + "1484", + "1483" + ] + }, + { + "id": "1492", + "parents": [ + "1489", + "1491", + "1490" + ] + }, + { + "id": "1493", + "parents": [ + "1487", + "1488", + "1486", + "1491" + ] + }, + { + "id": "1494", + "parents": [ + "1485", + "1491", + "1489", + "1490" + ] + }, + { + "id": "1495", + "parents": [ + "1492" + ] + }, + { + "id": "1496", + "parents": [ + "1491", + "1490", + "1489", + "1485" + ] + }, + { + "id": "1497", + "parents": [ + "1485", + "1493", + "1489", + "1490" + ] + }, + { + "id": "1498", + "parents": [ + "1496", + "1497", + "1495", + "1494" + ] + }, + { + "id": "1499", + "parents": [ + "1496", + "1495", + "1497", + "1494" + ] + }, + { + "id": "1500", + "parents": [ + "1495", + "1497", + "1494", + "1496" + ] + }, + { + "id": "1501", + "parents": [ + "1500", + "1499" + ] + }, + { + "id": "1502", + "parents": [ + "1499" + ] + }, + { + "id": "1503", + "parents": [ + "1502", + "1500", + "1498" + ] + }, + { + "id": "1504", + "parents": [ + "1501", + "1503" + ] + }, + { + "id": "1505", + "parents": [ + "1501", + "1503" + ] + }, + { + "id": "1506", + "parents": [ + "1503", + "1501" + ] + }, + { + "id": "1507", + "parents": [ + "1504", + "1505", + "1506" + ] + }, + { + "id": "1508", + "parents": [ + "1507" + ] + }, + { + "id": "1509", + "parents": [ + "1505" + ] + }, + { + "id": "1510", + "parents": [ + "1505" + ] + }, + { + "id": "1511", + "parents": [ + "1508", + "1509", + "1510" + ] + }, + { + "id": "1512", + "parents": [ + "1509", + "1508", + "1510" + ] + }, + { + "id": "1513", + "parents": [ + "1511" + ] + }, + { + "id": "1514", + "parents": [ + "1512", + "1513" + ] + }, + { + "id": "1515", + "parents": [ + "1511", + "1512" + ] + }, + { + "id": "1516", + "parents": [ + "1515", + "1513" + ] + }, + { + "id": "1517", + "parents": [ + "1515", + "1514" + ] + }, + { + "id": "1518", + "parents": [ + "1516", + "1517" + ] + }, + { + "id": "1519", + "parents": [ + "1512", + "1513" + ] + }, + { + "id": "1520", + "parents": [ + "1515", + "1514" + ] + }, + { + "id": "1521", + "parents": [ + "1518", + "1519" + ] + }, + { + "id": "1522", + "parents": [ + "1520", + "1521" + ] + }, + { + "id": "1523", + "parents": [ + "1522" + ] + }, + { + "id": "1524", + "parents": [ + "1521", + "1520" + ] + }, + { + "id": "1525", + "parents": [ + "1524", + "1523" + ] + }, + { + "id": "1526", + "parents": [ + "1523" + ] + }, + { + "id": "1527", + "parents": [ + "1526" + ] + }, + { + "id": "1528", + "parents": [ + "1526", + "1525" + ] + }, + { + "id": "1529", + "parents": [ + "1526", + "1525" + ] + }, + { + "id": "1530", + "parents": [ + "1527", + "1528" + ] + }, + { + "id": "1531", + "parents": [ + "1530", + "1529" + ] + }, + { + "id": "1532", + "parents": [ + "1530", + "1529" + ] + }, + { + "id": "1533", + "parents": [ + "1532" + ] + }, + { + "id": "1534", + "parents": [ + "1531", + "1532" + ] + }, + { + "id": "1535", + "parents": [ + "1534" + ] + }, + { + "id": "1536", + "parents": [ + "1535" + ] + }, + { + "id": "1537", + "parents": [ + "1533", + "1535" + ] + }, + { + "id": "1538", + "parents": [ + "1537" + ] + }, + { + "id": "1539", + "parents": [ + "1533", + "1536" + ] + }, + { + "id": "1540", + "parents": [ + "1536", + "1533" + ] + }, + { + "id": "1541", + "parents": [ + "1536", + "1538" + ] + }, + { + "id": "1542", + "parents": [ + "1540", + "1541" + ] + }, + { + "id": "1543", + "parents": [ + "1538", + "1540", + "1539" + ] + }, + { + "id": "1544", + "parents": [ + "1543", + "1541" + ] + }, + { + "id": "1545", + "parents": [ + "1542" + ] + }, + { + "id": "1546", + "parents": [ + "1544" + ] + }, + { + "id": "1547", + "parents": [ + "1545", + "1546" + ] + }, + { + "id": "1548", + "parents": [ + "1547" + ] + }, + { + "id": "1549", + "parents": [ + "1546", + "1545" + ] + }, + { + "id": "1550", + "parents": [ + "1548" + ] + }, + { + "id": "1551", + "parents": [ + "1549", + "1550" + ] + }, + { + "id": "1552", + "parents": [ + "1549", + "1550" + ] + }, + { + "id": "1553", + "parents": [ + "1551" + ] + }, + { + "id": "1554", + "parents": [ + "1549", + "1550" + ] + }, + { + "id": "1555", + "parents": [ + "1553", + "1554", + "1552" + ] + }, + { + "id": "1556", + "parents": [ + "1551", + "1552" + ] + }, + { + "id": "1557", + "parents": [ + "1551" + ] + }, + { + "id": "1558", + "parents": [ + "1557", + "1552" + ] + }, + { + "id": "1559", + "parents": [ + "1558" + ] + }, + { + "id": "1560", + "parents": [ + "1559" + ] + }, + { + "id": "1561", + "parents": [ + "1551" + ] + }, + { + "id": "1562", + "parents": [ + "1560", + "1555", + "1556", + "1561" + ] + }, + { + "id": "1563", + "parents": [ + "1553", + "1561", + "1558" + ] + }, + { + "id": "1564", + "parents": [ + "1556", + "1560", + "1563", + "1555" + ] + }, + { + "id": "1565", + "parents": [ + "1562", + "1564" + ] + }, + { + "id": "1566", + "parents": [ + "1565" + ] + }, + { + "id": "1567", + "parents": [ + "1566" + ] + }, + { + "id": "1568", + "parents": [ + "1565" + ] + }, + { + "id": "1569", + "parents": [ + "1567" + ] + }, + { + "id": "1570", + "parents": [ + "1568", + "1569" + ] + }, + { + "id": "1571", + "parents": [ + "1570" + ] + }, + { + "id": "1572", + "parents": [ + "1570" + ] + }, + { + "id": "1573", + "parents": [ + "1572" + ] + }, + { + "id": "1574", + "parents": [ + "1573" + ] + }, + { + "id": "1575", + "parents": [ + "1572", + "1571" + ] + }, + { + "id": "1576", + "parents": [ + "1571", + "1574" + ] + }, + { + "id": "1577", + "parents": [ + "1571", + "1573" + ] + }, + { + "id": "1578", + "parents": [ + "1577", + "1574" + ] + }, + { + "id": "1579", + "parents": [ + "1576", + "1575", + "1577" + ] + }, + { + "id": "1580", + "parents": [ + "1578", + "1579" + ] + }, + { + "id": "1581", + "parents": [ + "1575", + "1576", + "1577" + ] + }, + { + "id": "1582", + "parents": [ + "1581", + "1580" + ] + }, + { + "id": "1583", + "parents": [ + "1579", + "1581", + "1578" + ] + }, + { + "id": "1584", + "parents": [ + "1582", + "1583" + ] + }, + { + "id": "1585", + "parents": [ + "1582", + "1583" + ] + }, + { + "id": "1586", + "parents": [ + "1583", + "1582" + ] + }, + { + "id": "1587", + "parents": [ + "1585", + "1584" + ] + }, + { + "id": "1588", + "parents": [ + "1586", + "1587" + ] + }, + { + "id": "1589", + "parents": [ + "1588" + ] + }, + { + "id": "1590", + "parents": [ + "1589" + ] + }, + { + "id": "1591", + "parents": [ + "1588" + ] + }, + { + "id": "1592", + "parents": [ + "1591" + ] + }, + { + "id": "1593", + "parents": [ + "1589", + "1591" + ] + }, + { + "id": "1594", + "parents": [ + "1590", + "1591" + ] + }, + { + "id": "1595", + "parents": [ + "1594" + ] + }, + { + "id": "1596", + "parents": [ + "1593", + "1592", + "1590" + ] + }, + { + "id": "1597", + "parents": [ + "1596", + "1595" + ] + }, + { + "id": "1598", + "parents": [ + "1596", + "1595" + ] + }, + { + "id": "1599", + "parents": [ + "1598", + "1597" + ] + }, + { + "id": "1600", + "parents": [ + "1598", + "1597" + ] + }, + { + "id": "1601", + "parents": [ + "1596", + "1595" + ] + }, + { + "id": "1602", + "parents": [ + "1601", + "1600" + ] + }, + { + "id": "1603", + "parents": [ + "1602", + "1599" + ] + }, + { + "id": "1604", + "parents": [ + "1602", + "1599" + ] + }, + { + "id": "1605", + "parents": [ + "1604", + "1603" + ] + }, + { + "id": "1606", + "parents": [ + "1604" + ] + }, + { + "id": "1607", + "parents": [ + "1605", + "1606" + ] + }, + { + "id": "1608", + "parents": [ + "1606", + "1605" + ] + }, + { + "id": "1609", + "parents": [ + "1607" + ] + }, + { + "id": "1610", + "parents": [ + "1609", + "1608" + ] + }, + { + "id": "1611", + "parents": [ + "1607" + ] + }, + { + "id": "1612", + "parents": [ + "1611", + "1610" + ] + }, + { + "id": "1613", + "parents": [ + "1607" + ] + }, + { + "id": "1614", + "parents": [ + "1610", + "1611", + "1613" + ] + }, + { + "id": "1615", + "parents": [ + "1614" + ] + }, + { + "id": "1616", + "parents": [ + "1615", + "1612" + ] + }, + { + "id": "1617", + "parents": [ + "1612", + "1613" + ] + }, + { + "id": "1618", + "parents": [ + "1615", + "1612" + ] + }, + { + "id": "1619", + "parents": [ + "1616", + "1618", + "1617" + ] + }, + { + "id": "1620", + "parents": [ + "1616", + "1617", + "1618" + ] + }, + { + "id": "1621", + "parents": [ + "1617", + "1618", + "1616" + ] + }, + { + "id": "1622", + "parents": [ + "1616", + "1618", + "1617" + ] + }, + { + "id": "1623", + "parents": [ + "1619", + "1622", + "1620", + "1621" + ] + }, + { + "id": "1624", + "parents": [ + "1620", + "1622", + "1619", + "1621" + ] + }, + { + "id": "1625", + "parents": [ + "1623" + ] + }, + { + "id": "1626", + "parents": [ + "1625", + "1624" + ] + }, + { + "id": "1627", + "parents": [ + "1625", + "1624" + ] + }, + { + "id": "1628", + "parents": [ + "1627", + "1626" + ] + }, + { + "id": "1629", + "parents": [ + "1624", + "1625" + ] + }, + { + "id": "1630", + "parents": [ + "1629" + ] + }, + { + "id": "1631", + "parents": [ + "1625", + "1624" + ] + }, + { + "id": "1632", + "parents": [ + "1631", + "1626", + "1627", + "1630" + ] + }, + { + "id": "1633", + "parents": [ + "1628", + "1632" + ] + }, + { + "id": "1634", + "parents": [ + "1633" + ] + }, + { + "id": "1635", + "parents": [ + "1632", + "1628" + ] + }, + { + "id": "1636", + "parents": [ + "1632", + "1628" + ] + }, + { + "id": "1637", + "parents": [ + "1633" + ] + }, + { + "id": "1638", + "parents": [ + "1633", + "1635", + "1636" + ] + }, + { + "id": "1639", + "parents": [ + "1636", + "1637", + "1634", + "1635" + ] + }, + { + "id": "1640", + "parents": [ + "1638", + "1634" + ] + }, + { + "id": "1641", + "parents": [ + "1638", + "1639" + ] + }, + { + "id": "1642", + "parents": [ + "1639" + ] + }, + { + "id": "1643", + "parents": [ + "1642" + ] + }, + { + "id": "1644", + "parents": [ + "1640", + "1641", + "1643" + ] + }, + { + "id": "1645", + "parents": [ + "1637", + "1635", + "1634", + "1636" + ] + }, + { + "id": "1646", + "parents": [ + "1640", + "1645", + "1643", + "1641" + ] + }, + { + "id": "1647", + "parents": [ + "1640", + "1645", + "1641" + ] + }, + { + "id": "1648", + "parents": [ + "1643", + "1647" + ] + }, + { + "id": "1649", + "parents": [ + "1646", + "1648", + "1644" + ] + }, + { + "id": "1650", + "parents": [ + "1648", + "1646", + "1644" + ] + }, + { + "id": "1651", + "parents": [ + "1650", + "1649" + ] + }, + { + "id": "1652", + "parents": [ + "1650", + "1649" + ] + }, + { + "id": "1653", + "parents": [ + "1652" + ] + }, + { + "id": "1654", + "parents": [ + "1651", + "1653" + ] + }, + { + "id": "1655", + "parents": [ + "1654" + ] + }, + { + "id": "1656", + "parents": [ + "1651" + ] + }, + { + "id": "1657", + "parents": [ + "1656", + "1653" + ] + }, + { + "id": "1658", + "parents": [ + "1657", + "1655" + ] + }, + { + "id": "1659", + "parents": [ + "1657" + ] + }, + { + "id": "1660", + "parents": [ + "1656", + "1655" + ] + }, + { + "id": "1661", + "parents": [ + "1659", + "1658" + ] + }, + { + "id": "1662", + "parents": [ + "1657", + "1655" + ] + }, + { + "id": "1663", + "parents": [ + "1662" + ] + }, + { + "id": "1664", + "parents": [ + "1660", + "1659" + ] + }, + { + "id": "1665", + "parents": [ + "1659", + "1654" + ] + }, + { + "id": "1666", + "parents": [ + "1665", + "1661", + "1663" + ] + }, + { + "id": "1667", + "parents": [ + "1666", + "1664" + ] + }, + { + "id": "1668", + "parents": [ + "1664", + "1666" + ] + }, + { + "id": "1669", + "parents": [ + "1664", + "1666" + ] + }, + { + "id": "1670", + "parents": [ + "1667", + "1668", + "1669" + ] + }, + { + "id": "1671", + "parents": [ + "1670" + ] + }, + { + "id": "1672", + "parents": [ + "1667", + "1668", + "1669" + ] + }, + { + "id": "1673", + "parents": [ + "1672" + ] + }, + { + "id": "1674", + "parents": [ + "1671", + "1673" + ] + }, + { + "id": "1675", + "parents": [ + "1673", + "1670" + ] + }, + { + "id": "1676", + "parents": [ + "1675", + "1671" + ] + }, + { + "id": "1677", + "parents": [ + "1676", + "1674" + ] + }, + { + "id": "1678", + "parents": [ + "1675", + "1671" + ] + }, + { + "id": "1679", + "parents": [ + "1676" + ] + }, + { + "id": "1680", + "parents": [ + "1677", + "1678" + ] + }, + { + "id": "1681", + "parents": [ + "1680", + "1679" + ] + }, + { + "id": "1682", + "parents": [ + "1680", + "1679" + ] + }, + { + "id": "1683", + "parents": [ + "1682" + ] + }, + { + "id": "1684", + "parents": [ + "1679", + "1680" + ] + }, + { + "id": "1685", + "parents": [ + "1684", + "1681", + "1683" + ] + }, + { + "id": "1686", + "parents": [ + "1685" + ] + }, + { + "id": "1687", + "parents": [ + "1685" + ] + }, + { + "id": "1688", + "parents": [ + "1687" + ] + }, + { + "id": "1689", + "parents": [ + "1685" + ] + }, + { + "id": "1690", + "parents": [ + "1689", + "1688" + ] + }, + { + "id": "1691", + "parents": [ + "1685" + ] + }, + { + "id": "1692", + "parents": [ + "1686", + "1690" + ] + }, + { + "id": "1693", + "parents": [ + "1691", + "1688" + ] + }, + { + "id": "1694", + "parents": [ + "1690", + "1686" + ] + }, + { + "id": "1695", + "parents": [ + "1688", + "1689", + "1691" + ] + }, + { + "id": "1696", + "parents": [ + "1694", + "1693", + "1695" + ] + }, + { + "id": "1697", + "parents": [ + "1693", + "1695", + "1694", + "1692" + ] + }, + { + "id": "1698", + "parents": [ + "1692", + "1696" + ] + }, + { + "id": "1699", + "parents": [ + "1697", + "1698" + ] + }, + { + "id": "1700", + "parents": [ + "1699" + ] + }, + { + "id": "1701", + "parents": [ + "1698", + "1697" + ] + }, + { + "id": "1702", + "parents": [ + "1699" + ] + }, + { + "id": "1703", + "parents": [ + "1701", + "1702", + "1700" + ] + }, + { + "id": "1704", + "parents": [ + "1702", + "1701", + "1700" + ] + }, + { + "id": "1705", + "parents": [ + "1701", + "1702", + "1700" + ] + }, + { + "id": "1706", + "parents": [ + "1703", + "1705", + "1704" + ] + }, + { + "id": "1707", + "parents": [ + "1704", + "1705", + "1703" + ] + }, + { + "id": "1708", + "parents": [ + "1706", + "1707" + ] + }, + { + "id": "1709", + "parents": [ + "1707", + "1706" + ] + }, + { + "id": "1710", + "parents": [ + "1706", + "1707" + ] + }, + { + "id": "1711", + "parents": [ + "1706", + "1707" + ] + }, + { + "id": "1712", + "parents": [ + "1711", + "1708" + ] + }, + { + "id": "1713", + "parents": [ + "1712", + "1709", + "1710" + ] + }, + { + "id": "1714", + "parents": [ + "1713" + ] + }, + { + "id": "1715", + "parents": [ + "1709", + "1710", + "1708", + "1711" + ] + }, + { + "id": "1716", + "parents": [ + "1715" + ] + }, + { + "id": "1717", + "parents": [ + "1709", + "1712", + "1710" + ] + }, + { + "id": "1718", + "parents": [ + "1717", + "1714", + "1716" + ] + }, + { + "id": "1719", + "parents": [ + "1718" + ] + }, + { + "id": "1720", + "parents": [ + "1715" + ] + }, + { + "id": "1721", + "parents": [ + "1718" + ] + }, + { + "id": "1722", + "parents": [ + "1720", + "1719", + "1721" + ] + }, + { + "id": "1723", + "parents": [ + "1719", + "1720" + ] + }, + { + "id": "1724", + "parents": [ + "1720", + "1718" + ] + }, + { + "id": "1725", + "parents": [ + "1724", + "1722", + "1723" + ] + }, + { + "id": "1726", + "parents": [ + "1723", + "1724", + "1722" + ] + }, + { + "id": "1727", + "parents": [ + "1726" + ] + }, + { + "id": "1728", + "parents": [ + "1726", + "1725" + ] + }, + { + "id": "1729", + "parents": [ + "1726", + "1725" + ] + }, + { + "id": "1730", + "parents": [ + "1728", + "1727" + ] + }, + { + "id": "1731", + "parents": [ + "1725", + "1727" + ] + }, + { + "id": "1732", + "parents": [ + "1727", + "1725" + ] + }, + { + "id": "1733", + "parents": [ + "1730", + "1729" + ] + }, + { + "id": "1734", + "parents": [ + "1731", + "1732", + "1733" + ] + }, + { + "id": "1735", + "parents": [ + "1728", + "1727" + ] + }, + { + "id": "1736", + "parents": [ + "1733", + "1735", + "1731", + "1732" + ] + }, + { + "id": "1737", + "parents": [ + "1735", + "1734" + ] + }, + { + "id": "1738", + "parents": [ + "1736", + "1737" + ] + }, + { + "id": "1739", + "parents": [ + "1736", + "1737" + ] + }, + { + "id": "1740", + "parents": [ + "1736" + ] + }, + { + "id": "1741", + "parents": [ + "1740", + "1738" + ] + }, + { + "id": "1742", + "parents": [ + "1739", + "1741" + ] + }, + { + "id": "1743", + "parents": [ + "1742" + ] + }, + { + "id": "1744", + "parents": [ + "1743" + ] + }, + { + "id": "1745", + "parents": [ + "1739", + "1738", + "1740" + ] + }, + { + "id": "1746", + "parents": [ + "1744" + ] + }, + { + "id": "1747", + "parents": [ + "1745", + "1746" + ] + }, + { + "id": "1748", + "parents": [ + "1745", + "1741" + ] + }, + { + "id": "1749", + "parents": [ + "1745", + "1744" + ] + }, + { + "id": "1750", + "parents": [ + "1746", + "1749", + "1748" + ] + }, + { + "id": "1751", + "parents": [ + "1750", + "1747" + ] + }, + { + "id": "1752", + "parents": [ + "1750" + ] + }, + { + "id": "1753", + "parents": [ + "1747", + "1752" + ] + }, + { + "id": "1754", + "parents": [ + "1753", + "1751" + ] + }, + { + "id": "1755", + "parents": [ + "1751", + "1753" + ] + }, + { + "id": "1756", + "parents": [ + "1751", + "1753" + ] + }, + { + "id": "1757", + "parents": [ + "1754", + "1755", + "1756" + ] + }, + { + "id": "1758", + "parents": [ + "1757" + ] + }, + { + "id": "1759", + "parents": [ + "1757" + ] + }, + { + "id": "1760", + "parents": [ + "1755", + "1756", + "1754" + ] + }, + { + "id": "1761", + "parents": [ + "1757", + "1760" + ] + }, + { + "id": "1762", + "parents": [ + "1761", + "1758" + ] + }, + { + "id": "1763", + "parents": [ + "1761", + "1759" + ] + }, + { + "id": "1764", + "parents": [ + "1759", + "1758", + "1760" + ] + }, + { + "id": "1765", + "parents": [ + "1763" + ] + }, + { + "id": "1766", + "parents": [ + "1761", + "1758" + ] + }, + { + "id": "1767", + "parents": [ + "1766", + "1765" + ] + }, + { + "id": "1768", + "parents": [ + "1762", + "1765", + "1766" + ] + }, + { + "id": "1769", + "parents": [ + "1767", + "1768", + "1764" + ] + }, + { + "id": "1770", + "parents": [ + "1767", + "1764", + "1768" + ] + }, + { + "id": "1771", + "parents": [ + "1770", + "1769" + ] + }, + { + "id": "1772", + "parents": [ + "1771" + ] + }, + { + "id": "1773", + "parents": [ + "1769", + "1770" + ] + }, + { + "id": "1774", + "parents": [ + "1772", + "1773" + ] + }, + { + "id": "1775", + "parents": [ + "1773", + "1772" + ] + }, + { + "id": "1776", + "parents": [ + "1775" + ] + }, + { + "id": "1777", + "parents": [ + "1774" + ] + }, + { + "id": "1778", + "parents": [ + "1774" + ] + }, + { + "id": "1779", + "parents": [ + "1778" + ] + }, + { + "id": "1780", + "parents": [ + "1778", + "1775" + ] + }, + { + "id": "1781", + "parents": [ + "1779", + "1776" + ] + }, + { + "id": "1782", + "parents": [ + "1780", + "1781", + "1777" + ] + }, + { + "id": "1783", + "parents": [ + "1782" + ] + }, + { + "id": "1784", + "parents": [ + "1782" + ] + }, + { + "id": "1785", + "parents": [ + "1783", + "1784" + ] + }, + { + "id": "1786", + "parents": [ + "1785" + ] + }, + { + "id": "1787", + "parents": [ + "1786" + ] + }, + { + "id": "1788", + "parents": [ + "1784", + "1783" + ] + }, + { + "id": "1789", + "parents": [ + "1784", + "1783" + ] + }, + { + "id": "1790", + "parents": [ + "1788", + "1786", + "1789" + ] + }, + { + "id": "1791", + "parents": [ + "1788", + "1789", + "1786" + ] + }, + { + "id": "1792", + "parents": [ + "1790", + "1787" + ] + }, + { + "id": "1793", + "parents": [ + "1790", + "1787", + "1791" + ] + }, + { + "id": "1794", + "parents": [ + "1792", + "1793" + ] + }, + { + "id": "1795", + "parents": [ + "1794" + ] + }, + { + "id": "1796", + "parents": [ + "1795" + ] + }, + { + "id": "1797", + "parents": [ + "1793" + ] + }, + { + "id": "1798", + "parents": [ + "1796", + "1797" + ] + }, + { + "id": "1799", + "parents": [ + "1792", + "1793" + ] + }, + { + "id": "1800", + "parents": [ + "1795", + "1797" + ] + }, + { + "id": "1801", + "parents": [ + "1792", + "1797" + ] + }, + { + "id": "1802", + "parents": [ + "1794", + "1797" + ] + }, + { + "id": "1803", + "parents": [ + "1801", + "1799", + "1798", + "1800" + ] + }, + { + "id": "1804", + "parents": [ + "1801", + "1799", + "1794" + ] + }, + { + "id": "1805", + "parents": [ + "1800", + "1804" + ] + }, + { + "id": "1806", + "parents": [ + "1802", + "1803", + "1805" + ] + }, + { + "id": "1807", + "parents": [ + "1805" + ] + }, + { + "id": "1808", + "parents": [ + "1802", + "1803", + "1807" + ] + }, + { + "id": "1809", + "parents": [ + "1807", + "1806" + ] + }, + { + "id": "1810", + "parents": [ + "1809", + "1808" + ] + }, + { + "id": "1811", + "parents": [ + "1809", + "1808" + ] + }, + { + "id": "1812", + "parents": [ + "1808", + "1809" + ] + }, + { + "id": "1813", + "parents": [ + "1812", + "1811" + ] + }, + { + "id": "1814", + "parents": [ + "1810", + "1813" + ] + }, + { + "id": "1815", + "parents": [ + "1811", + "1812" + ] + }, + { + "id": "1816", + "parents": [ + "1813", + "1810" + ] + }, + { + "id": "1817", + "parents": [ + "1814" + ] + }, + { + "id": "1818", + "parents": [ + "1810", + "1815", + "1813" + ] + }, + { + "id": "1819", + "parents": [ + "1817", + "1815" + ] + }, + { + "id": "1820", + "parents": [ + "1815", + "1816" + ] + }, + { + "id": "1821", + "parents": [ + "1818", + "1816" + ] + }, + { + "id": "1822", + "parents": [ + "1818", + "1819", + "1816" + ] + }, + { + "id": "1823", + "parents": [ + "1819", + "1820", + "1818" + ] + }, + { + "id": "1824", + "parents": [ + "1820", + "1819", + "1821" + ] + }, + { + "id": "1825", + "parents": [ + "1824", + "1823", + "1822" + ] + }, + { + "id": "1826", + "parents": [ + "1825" + ] + }, + { + "id": "1827", + "parents": [ + "1821", + "1823", + "1822" + ] + }, + { + "id": "1828", + "parents": [ + "1821", + "1822", + "1820" + ] + }, + { + "id": "1829", + "parents": [ + "1823", + "1828" + ] + }, + { + "id": "1830", + "parents": [ + "1825", + "1827", + "1829" + ] + }, + { + "id": "1831", + "parents": [ + "1830" + ] + }, + { + "id": "1832", + "parents": [ + "1830" + ] + }, + { + "id": "1833", + "parents": [ + "1831", + "1826", + "1832" + ] + }, + { + "id": "1834", + "parents": [ + "1833" + ] + }, + { + "id": "1835", + "parents": [ + "1833" + ] + }, + { + "id": "1836", + "parents": [ + "1832", + "1826", + "1831" + ] + }, + { + "id": "1837", + "parents": [ + "1836" + ] + }, + { + "id": "1838", + "parents": [ + "1836", + "1835" + ] + }, + { + "id": "1839", + "parents": [ + "1833", + "1837" + ] + }, + { + "id": "1840", + "parents": [ + "1833", + "1836" + ] + }, + { + "id": "1841", + "parents": [ + "1839", + "1834", + "1838" + ] + }, + { + "id": "1842", + "parents": [ + "1841", + "1840" + ] + }, + { + "id": "1843", + "parents": [ + "1841", + "1840" + ] + }, + { + "id": "1844", + "parents": [ + "1843" + ] + }, + { + "id": "1845", + "parents": [ + "1843" + ] + }, + { + "id": "1846", + "parents": [ + "1843" + ] + }, + { + "id": "1847", + "parents": [ + "1845", + "1844" + ] + }, + { + "id": "1848", + "parents": [ + "1844", + "1846" + ] + }, + { + "id": "1849", + "parents": [ + "1848", + "1847", + "1842" + ] + }, + { + "id": "1850", + "parents": [ + "1842", + "1848", + "1847" + ] + }, + { + "id": "1851", + "parents": [ + "1847", + "1842", + "1848" + ] + }, + { + "id": "1852", + "parents": [ + "1849", + "1850" + ] + }, + { + "id": "1853", + "parents": [ + "1851", + "1852" + ] + }, + { + "id": "1854", + "parents": [ + "1852" + ] + }, + { + "id": "1855", + "parents": [ + "1851", + "1854" + ] + }, + { + "id": "1856", + "parents": [ + "1853", + "1855" + ] + }, + { + "id": "1857", + "parents": [ + "1855", + "1853" + ] + }, + { + "id": "1858", + "parents": [ + "1857" + ] + }, + { + "id": "1859", + "parents": [ + "1855", + "1853" + ] + }, + { + "id": "1860", + "parents": [ + "1859", + "1856" + ] + }, + { + "id": "1861", + "parents": [ + "1860", + "1858" + ] + }, + { + "id": "1862", + "parents": [ + "1861" + ] + }, + { + "id": "1863", + "parents": [ + "1858", + "1859", + "1856" + ] + }, + { + "id": "1864", + "parents": [ + "1862", + "1863" + ] + }, + { + "id": "1865", + "parents": [ + "1863", + "1861" + ] + }, + { + "id": "1866", + "parents": [ + "1865", + "1864" + ] + }, + { + "id": "1867", + "parents": [ + "1865", + "1864" + ] + }, + { + "id": "1868", + "parents": [ + "1864", + "1865" + ] + }, + { + "id": "1869", + "parents": [ + "1866" + ] + }, + { + "id": "1870", + "parents": [ + "1868", + "1867", + "1869" + ] + }, + { + "id": "1871", + "parents": [ + "1868", + "1867", + "1869" + ] + }, + { + "id": "1872", + "parents": [ + "1871" + ] + }, + { + "id": "1873", + "parents": [ + "1872", + "1870" + ] + }, + { + "id": "1874", + "parents": [ + "1872", + "1870" + ] + }, + { + "id": "1875", + "parents": [ + "1874" + ] + }, + { + "id": "1876", + "parents": [ + "1873" + ] + }, + { + "id": "1877", + "parents": [ + "1873" + ] + }, + { + "id": "1878", + "parents": [ + "1877" + ] + }, + { + "id": "1879", + "parents": [ + "1878", + "1876", + "1875" + ] + }, + { + "id": "1880", + "parents": [ + "1879" + ] + }, + { + "id": "1881", + "parents": [ + "1879" + ] + }, + { + "id": "1882", + "parents": [ + "1880", + "1881" + ] + }, + { + "id": "1883", + "parents": [ + "1881", + "1880" + ] + }, + { + "id": "1884", + "parents": [ + "1882", + "1883" + ] + }, + { + "id": "1885", + "parents": [ + "1880" + ] + }, + { + "id": "1886", + "parents": [ + "1881", + "1880" + ] + }, + { + "id": "1887", + "parents": [ + "1886", + "1884", + "1885" + ] + }, + { + "id": "1888", + "parents": [ + "1885", + "1883" + ] + }, + { + "id": "1889", + "parents": [ + "1888" + ] + }, + { + "id": "1890", + "parents": [ + "1887", + "1889" + ] + }, + { + "id": "1891", + "parents": [ + "1886", + "1888", + "1884" + ] + }, + { + "id": "1892", + "parents": [ + "1891", + "1890" + ] + }, + { + "id": "1893", + "parents": [ + "1892" + ] + }, + { + "id": "1894", + "parents": [ + "1893" + ] + }, + { + "id": "1895", + "parents": [ + "1890", + "1891" + ] + }, + { + "id": "1896", + "parents": [ + "1892", + "1895" + ] + }, + { + "id": "1897", + "parents": [ + "1896", + "1894" + ] + }, + { + "id": "1898", + "parents": [ + "1897" + ] + }, + { + "id": "1899", + "parents": [ + "1898" + ] + }, + { + "id": "1900", + "parents": [ + "1897" + ] + }, + { + "id": "1901", + "parents": [ + "1896", + "1894" + ] + }, + { + "id": "1902", + "parents": [ + "1901", + "1897" + ] + }, + { + "id": "1903", + "parents": [ + "1902", + "1900", + "1899" + ] + }, + { + "id": "1904", + "parents": [ + "1903" + ] + }, + { + "id": "1905", + "parents": [ + "1900", + "1899", + "1902" + ] + }, + { + "id": "1906", + "parents": [ + "1903", + "1905" + ] + }, + { + "id": "1907", + "parents": [ + "1905", + "1903" + ] + }, + { + "id": "1908", + "parents": [ + "1903", + "1905" + ] + }, + { + "id": "1909", + "parents": [ + "1904", + "1908", + "1907", + "1906" + ] + }, + { + "id": "1910", + "parents": [ + "1904", + "1906", + "1908", + "1907" + ] + }, + { + "id": "1911", + "parents": [ + "1909" + ] + }, + { + "id": "1912", + "parents": [ + "1909" + ] + }, + { + "id": "1913", + "parents": [ + "1911" + ] + }, + { + "id": "1914", + "parents": [ + "1910", + "1911" + ] + }, + { + "id": "1915", + "parents": [ + "1911", + "1912", + "1910" + ] + }, + { + "id": "1916", + "parents": [ + "1911", + "1910" + ] + }, + { + "id": "1917", + "parents": [ + "1913", + "1916" + ] + }, + { + "id": "1918", + "parents": [ + "1912", + "1917" + ] + }, + { + "id": "1919", + "parents": [ + "1910", + "1911", + "1912" + ] + }, + { + "id": "1920", + "parents": [ + "1918", + "1914" + ] + }, + { + "id": "1921", + "parents": [ + "1914", + "1915", + "1918" + ] + }, + { + "id": "1922", + "parents": [ + "1919", + "1920", + "1921" + ] + }, + { + "id": "1923", + "parents": [ + "1922" + ] + }, + { + "id": "1924", + "parents": [ + "1923" + ] + }, + { + "id": "1925", + "parents": [ + "1923" + ] + }, + { + "id": "1926", + "parents": [ + "1922" + ] + }, + { + "id": "1927", + "parents": [ + "1923" + ] + }, + { + "id": "1928", + "parents": [ + "1925" + ] + }, + { + "id": "1929", + "parents": [ + "1924", + "1928", + "1927", + "1926" + ] + }, + { + "id": "1930", + "parents": [ + "1929" + ] + }, + { + "id": "1931", + "parents": [ + "1926", + "1927", + "1924", + "1928" + ] + }, + { + "id": "1932", + "parents": [ + "1929", + "1931" + ] + }, + { + "id": "1933", + "parents": [ + "1930", + "1932" + ] + }, + { + "id": "1934", + "parents": [ + "1933" + ] + }, + { + "id": "1935", + "parents": [ + "1933" + ] + }, + { + "id": "1936", + "parents": [ + "1935", + "1934" + ] + }, + { + "id": "1937", + "parents": [ + "1934", + "1935" + ] + }, + { + "id": "1938", + "parents": [ + "1934", + "1935" + ] + }, + { + "id": "1939", + "parents": [ + "1937" + ] + }, + { + "id": "1940", + "parents": [ + "1936", + "1939", + "1938" + ] + }, + { + "id": "1941", + "parents": [ + "1940" + ] + }, + { + "id": "1942", + "parents": [ + "1940" + ] + }, + { + "id": "1943", + "parents": [ + "1940" + ] + }, + { + "id": "1944", + "parents": [ + "1942" + ] + }, + { + "id": "1945", + "parents": [ + "1942", + "1941" + ] + }, + { + "id": "1946", + "parents": [ + "1945", + "1943" + ] + }, + { + "id": "1947", + "parents": [ + "1944", + "1946" + ] + }, + { + "id": "1948", + "parents": [ + "1946", + "1944" + ] + }, + { + "id": "1949", + "parents": [ + "1948" + ] + }, + { + "id": "1950", + "parents": [ + "1948" + ] + }, + { + "id": "1951", + "parents": [ + "1945", + "1944", + "1943" + ] + }, + { + "id": "1952", + "parents": [ + "1944", + "1943", + "1945" + ] + }, + { + "id": "1953", + "parents": [ + "1952", + "1949", + "1951" + ] + }, + { + "id": "1954", + "parents": [ + "1952", + "1949", + "1947", + "1951", + "1950" + ] + }, + { + "id": "1955", + "parents": [ + "1954", + "1953" + ] + }, + { + "id": "1956", + "parents": [ + "1953", + "1954" + ] + }, + { + "id": "1957", + "parents": [ + "1953", + "1954" + ] + }, + { + "id": "1958", + "parents": [ + "1956" + ] + }, + { + "id": "1959", + "parents": [ + "1955", + "1957", + "1956" + ] + }, + { + "id": "1960", + "parents": [ + "1957", + "1956", + "1955" + ] + }, + { + "id": "1961", + "parents": [ + "1958", + "1960" + ] + }, + { + "id": "1962", + "parents": [ + "1959", + "1961" + ] + }, + { + "id": "1963", + "parents": [ + "1959", + "1961" + ] + }, + { + "id": "1964", + "parents": [ + "1963" + ] + }, + { + "id": "1965", + "parents": [ + "1959", + "1961" + ] + }, + { + "id": "1966", + "parents": [ + "1965", + "1962", + "1964" + ] + }, + { + "id": "1967", + "parents": [ + "1962", + "1963", + "1965" + ] + }, + { + "id": "1968", + "parents": [ + "1964", + "1965", + "1962" + ] + }, + { + "id": "1969", + "parents": [ + "1967", + "1966", + "1968" + ] + }, + { + "id": "1970", + "parents": [ + "1969" + ] + }, + { + "id": "1971", + "parents": [ + "1964", + "1967" + ] + }, + { + "id": "1972", + "parents": [ + "1969", + "1971" + ] + }, + { + "id": "1973", + "parents": [ + "1970", + "1971" + ] + }, + { + "id": "1974", + "parents": [ + "1972", + "1973" + ] + }, + { + "id": "1975", + "parents": [ + "1971", + "1970" + ] + }, + { + "id": "1976", + "parents": [ + "1973", + "1972" + ] + }, + { + "id": "1977", + "parents": [ + "1975", + "1976" + ] + }, + { + "id": "1978", + "parents": [ + "1977", + "1974" + ] + }, + { + "id": "1979", + "parents": [ + "1977", + "1974" + ] + }, + { + "id": "1980", + "parents": [ + "1978", + "1979" + ] + }, + { + "id": "1981", + "parents": [ + "1977", + "1974" + ] + }, + { + "id": "1982", + "parents": [ + "1979", + "1981" + ] + }, + { + "id": "1983", + "parents": [ + "1982", + "1980" + ] + }, + { + "id": "1984", + "parents": [ + "1982", + "1980" + ] + }, + { + "id": "1985", + "parents": [ + "1981", + "1978", + "1979" + ] + }, + { + "id": "1986", + "parents": [ + "1984", + "1983", + "1985" + ] + }, + { + "id": "1987", + "parents": [ + "1986" + ] + }, + { + "id": "1988", + "parents": [ + "1983", + "1985", + "1984" + ] + }, + { + "id": "1989", + "parents": [ + "1988", + "1986" + ] + }, + { + "id": "1990", + "parents": [ + "1987" + ] + }, + { + "id": "1991", + "parents": [ + "1989", + "1990" + ] + }, + { + "id": "1992", + "parents": [ + "1989", + "1990" + ] + }, + { + "id": "1993", + "parents": [ + "1991" + ] + }, + { + "id": "1994", + "parents": [ + "1989", + "1990" + ] + }, + { + "id": "1995", + "parents": [ + "1992", + "1991" + ] + }, + { + "id": "1996", + "parents": [ + "1993" + ] + }, + { + "id": "1997", + "parents": [ + "1995", + "1996", + "1994" + ] + }, + { + "id": "1998", + "parents": [ + "1997" + ] + }, + { + "id": "1999", + "parents": [ + "1997" + ] + }, + { + "id": "2000", + "parents": [ + "1999" + ] + }, + { + "id": "2001", + "parents": [ + "2000" + ] + }, + { + "id": "2002", + "parents": [ + "1998", + "2000" + ] + }, + { + "id": "2003", + "parents": [ + "2002", + "2001" + ] + }, + { + "id": "2004", + "parents": [ + "2003" + ] + }, + { + "id": "2005", + "parents": [ + "2001", + "2002" + ] + }, + { + "id": "2006", + "parents": [ + "2005" + ] + }, + { + "id": "2007", + "parents": [ + "2006", + "2004" + ] + }, + { + "id": "2008", + "parents": [ + "2003", + "2006" + ] + }, + { + "id": "2009", + "parents": [ + "2006", + "2003" + ] + }, + { + "id": "2010", + "parents": [ + "2007", + "2008", + "2009" + ] + }, + { + "id": "2011", + "parents": [ + "2008", + "2009", + "2007" + ] + }, + { + "id": "2012", + "parents": [ + "2010", + "2011" + ] + }, + { + "id": "2013", + "parents": [ + "2012" + ] + }, + { + "id": "2014", + "parents": [ + "2010", + "2011" + ] + }, + { + "id": "2015", + "parents": [ + "2011", + "2010" + ] + }, + { + "id": "2016", + "parents": [ + "2011", + "2010" + ] + }, + { + "id": "2017", + "parents": [ + "2015", + "2016", + "2014", + "2013" + ] + }, + { + "id": "2018", + "parents": [ + "2017" + ] + }, + { + "id": "2019", + "parents": [ + "2018" + ] + }, + { + "id": "2020", + "parents": [ + "2018" + ] + }, + { + "id": "2021", + "parents": [ + "2017" + ] + }, + { + "id": "2022", + "parents": [ + "2013", + "2014", + "2015", + "2016" + ] + }, + { + "id": "2023", + "parents": [ + "2022", + "2019", + "2020" + ] + }, + { + "id": "2024", + "parents": [ + "2023", + "2021" + ] + }, + { + "id": "2025", + "parents": [ + "2024" + ] + } + ] +} \ No newline at end of file diff --git a/domain/consensus/processes/pruningproofmanager/log.go b/domain/consensus/processes/pruningproofmanager/log.go new file mode 100644 index 0000000..1ccd348 --- /dev/null +++ b/domain/consensus/processes/pruningproofmanager/log.go @@ -0,0 +1,5 @@ +package pruningproofmanager + +import "github.com/spectre-project/spectred/infrastructure/logger" + +var log = logger.RegisterSubSystem("PPMN") diff --git a/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go b/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go new file mode 100644 index 0000000..5fd237a --- /dev/null +++ b/domain/consensus/processes/pruningproofmanager/pruningproofmanager.go @@ -0,0 +1,939 @@ +package pruningproofmanager + +import ( + "math/big" + + "github.com/pkg/errors" + consensusDB "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockheaderstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/blockrelationstore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/ghostdagdatastore" + "github.com/spectre-project/spectred/domain/consensus/datastructures/reachabilitydatastore" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/processes/dagtopologymanager" + "github.com/spectre-project/spectred/domain/consensus/processes/dagtraversalmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/ghostdagmanager" + "github.com/spectre-project/spectred/domain/consensus/processes/reachabilitymanager" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/staging" +) + +type pruningProofManager struct { + databaseContext model.DBManager + + dagTopologyManagers []model.DAGTopologyManager + ghostdagManagers []model.GHOSTDAGManager + reachabilityManager model.ReachabilityManager + dagTraversalManagers []model.DAGTraversalManager + parentsManager model.ParentsManager + pruningManager model.PruningManager + + ghostdagDataStores []model.GHOSTDAGDataStore + pruningStore model.PruningStore + blockHeaderStore model.BlockHeaderStore + blockStatusStore model.BlockStatusStore + finalityStore model.FinalityStore + consensusStateStore model.ConsensusStateStore + blockRelationStore model.BlockRelationStore + reachabilityDataStore model.ReachabilityDataStore + + genesisHash *externalapi.DomainHash + k externalapi.KType + pruningProofM uint64 + maxBlockLevel int + + cachedPruningPoint *externalapi.DomainHash + cachedProof *externalapi.PruningPointProof +} + +// New instantiates a new PruningManager +func New( + databaseContext model.DBManager, + + dagTopologyManagers []model.DAGTopologyManager, + ghostdagManagers []model.GHOSTDAGManager, + reachabilityManager model.ReachabilityManager, + dagTraversalManagers []model.DAGTraversalManager, + parentsManager model.ParentsManager, + pruningManager model.PruningManager, + + ghostdagDataStores []model.GHOSTDAGDataStore, + pruningStore model.PruningStore, + blockHeaderStore model.BlockHeaderStore, + blockStatusStore model.BlockStatusStore, + finalityStore model.FinalityStore, + consensusStateStore model.ConsensusStateStore, + blockRelationStore model.BlockRelationStore, + reachabilityDataStore model.ReachabilityDataStore, + + genesisHash *externalapi.DomainHash, + k externalapi.KType, + pruningProofM uint64, + maxBlockLevel int, +) model.PruningProofManager { + + return &pruningProofManager{ + databaseContext: databaseContext, + dagTopologyManagers: dagTopologyManagers, + ghostdagManagers: ghostdagManagers, + reachabilityManager: reachabilityManager, + dagTraversalManagers: dagTraversalManagers, + parentsManager: parentsManager, + pruningManager: pruningManager, + + ghostdagDataStores: ghostdagDataStores, + pruningStore: pruningStore, + blockHeaderStore: blockHeaderStore, + blockStatusStore: blockStatusStore, + finalityStore: finalityStore, + consensusStateStore: consensusStateStore, + blockRelationStore: blockRelationStore, + reachabilityDataStore: reachabilityDataStore, + + genesisHash: genesisHash, + k: k, + pruningProofM: pruningProofM, + maxBlockLevel: maxBlockLevel, + } +} + +func (ppm *pruningProofManager) BuildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "BuildPruningPointProof") + defer onEnd() + + pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if ppm.cachedPruningPoint != nil && ppm.cachedPruningPoint.Equal(pruningPoint) { + return ppm.cachedProof, nil + } + + proof, err := ppm.buildPruningPointProof(stagingArea) + if err != nil { + return nil, err + } + + ppm.cachedProof = proof + ppm.cachedPruningPoint = pruningPoint + + return proof, nil +} + +func (ppm *pruningProofManager) buildPruningPointProof(stagingArea *model.StagingArea) (*externalapi.PruningPointProof, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "buildPruningPointProof") + defer onEnd() + + pruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + if pruningPoint.Equal(ppm.genesisHash) { + return &externalapi.PruningPointProof{}, nil + } + + pruningPointHeader, err := ppm.blockHeaderStore.BlockHeader(ppm.databaseContext, stagingArea, pruningPoint) + if err != nil { + return nil, err + } + + maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1 + headersByLevel := make(map[int][]externalapi.BlockHeader) + selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1) + pruningPointLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel) + for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- { + var selectedTip *externalapi.DomainHash + if blockLevel <= pruningPointLevel { + selectedTip = pruningPoint + } else { + blockLevelParents := ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel) + selectedTipCandidates := make([]*externalapi.DomainHash, 0, len(blockLevelParents)) + + // In a pruned node, some pruning point parents might be missing, but we're guaranteed that its + // selected parent is not missing. + for _, parent := range blockLevelParents { + _, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) + if database.IsNotFoundError(err) { + continue + } + if err != nil { + return nil, err + } + + selectedTipCandidates = append(selectedTipCandidates, parent) + } + + selectedTip, err = ppm.ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTipCandidates...) + if err != nil { + return nil, err + } + } + selectedTipByLevel[blockLevel] = selectedTip + + blockAtDepth2M, err := ppm.blockAtDepth(stagingArea, ppm.ghostdagDataStores[blockLevel], selectedTip, 2*ppm.pruningProofM) + if err != nil { + return nil, err + } + + root := blockAtDepth2M + if blockLevel != maxLevel { + blockAtDepthMAtNextLevel, err := ppm.blockAtDepth(stagingArea, ppm.ghostdagDataStores[blockLevel+1], selectedTipByLevel[blockLevel+1], ppm.pruningProofM) + if err != nil { + return nil, err + } + + isBlockAtDepthMAtNextLevelAncestorOfBlockAtDepth2M, err := ppm.dagTopologyManagers[blockLevel].IsAncestorOf(stagingArea, blockAtDepthMAtNextLevel, blockAtDepth2M) + if err != nil { + return nil, err + } + + if isBlockAtDepthMAtNextLevelAncestorOfBlockAtDepth2M { + root = blockAtDepthMAtNextLevel + } else { + isBlockAtDepth2MAncestorOfBlockAtDepthMAtNextLevel, err := ppm.dagTopologyManagers[blockLevel].IsAncestorOf(stagingArea, blockAtDepth2M, blockAtDepthMAtNextLevel) + if err != nil { + return nil, err + } + + if !isBlockAtDepth2MAncestorOfBlockAtDepthMAtNextLevel { + // find common ancestor + current := blockAtDepthMAtNextLevel + for { + ghostdagData, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, current, false) + if err != nil { + return nil, err + } + + current = ghostdagData.SelectedParent() + if current.Equal(model.VirtualGenesisBlockHash) { + return nil, errors.Errorf("No common ancestor between %s and %s at level %d", blockAtDepth2M, blockAtDepthMAtNextLevel, blockLevel) + } + + isCurrentAncestorOfBlockAtDepth2M, err := ppm.dagTopologyManagers[blockLevel].IsAncestorOf(stagingArea, current, blockAtDepth2M) + if err != nil { + return nil, err + } + + if isCurrentAncestorOfBlockAtDepth2M { + root = current + break + } + } + } + } + } + + headers := make([]externalapi.BlockHeader, 0, 2*ppm.pruningProofM) + visited := hashset.New() + queue := ppm.dagTraversalManagers[blockLevel].NewUpHeap(stagingArea) + err = queue.Push(root) + if err != nil { + return nil, err + } + for queue.Len() > 0 { + current := queue.Pop() + + if visited.Contains(current) { + continue + } + + visited.Add(current) + isAncestorOfSelectedTip, err := ppm.dagTopologyManagers[blockLevel].IsAncestorOf(stagingArea, current, selectedTip) + if err != nil { + return nil, err + } + + if !isAncestorOfSelectedTip { + continue + } + + currentHeader, err := ppm.blockHeaderStore.BlockHeader(ppm.databaseContext, stagingArea, current) + if err != nil { + return nil, err + } + + headers = append(headers, currentHeader) + children, err := ppm.dagTopologyManagers[blockLevel].Children(stagingArea, current) + if err != nil { + return nil, err + } + + for _, child := range children { + if child.Equal(model.VirtualBlockHash) { + continue + } + + err = queue.Push(child) + if err != nil { + return nil, err + } + } + } + + headersByLevel[blockLevel] = headers + } + + proof := &externalapi.PruningPointProof{Headers: make([][]externalapi.BlockHeader, len(headersByLevel))} + for i := 0; i < len(headersByLevel); i++ { + proof.Headers[i] = headersByLevel[i] + } + + return proof, nil +} + +func (ppm *pruningProofManager) blockAtDepth(stagingArea *model.StagingArea, ghostdagDataStore model.GHOSTDAGDataStore, highHash *externalapi.DomainHash, depth uint64) (*externalapi.DomainHash, error) { + currentBlockHash := highHash + highBlockGHOSTDAGData, err := ghostdagDataStore.Get(ppm.databaseContext, stagingArea, highHash, false) + if err != nil { + return nil, err + } + + requiredBlueScore := uint64(0) + if highBlockGHOSTDAGData.BlueScore() > depth { + requiredBlueScore = highBlockGHOSTDAGData.BlueScore() - depth + } + + currentBlockGHOSTDAGData := highBlockGHOSTDAGData + // If we used `BlockIterator` we'd need to do more calls to `ghostdagDataStore` so we can get the blueScore + for currentBlockGHOSTDAGData.BlueScore() >= requiredBlueScore { + if currentBlockGHOSTDAGData.SelectedParent().Equal(model.VirtualGenesisBlockHash) { + break + } + + currentBlockHash = currentBlockGHOSTDAGData.SelectedParent() + currentBlockGHOSTDAGData, err = ghostdagDataStore.Get(ppm.databaseContext, stagingArea, currentBlockHash, false) + if err != nil { + return nil, err + } + } + return currentBlockHash, nil +} + +func (ppm *pruningProofManager) ValidatePruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ValidatePruningPointProof") + defer onEnd() + + stagingArea := model.NewStagingArea() + + if len(pruningPointProof.Headers) == 0 { + return errors.Wrap(ruleerrors.ErrPruningProofEmpty, "pruning proof is empty") + } + + level0Headers := pruningPointProof.Headers[0] + pruningPointHeader := level0Headers[len(level0Headers)-1] + pruningPoint := consensushashing.HeaderHash(pruningPointHeader) + pruningPointBlockLevel := pruningPointHeader.BlockLevel(ppm.maxBlockLevel) + maxLevel := len(ppm.parentsManager.Parents(pruningPointHeader)) - 1 + if maxLevel >= len(pruningPointProof.Headers) { + return errors.Wrapf(ruleerrors.ErrPruningProofEmpty, "proof has only %d levels while pruning point "+ + "has parents from %d levels", len(pruningPointProof.Headers), maxLevel+1) + } + + blockHeaderStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores, err := ppm.dagStores(maxLevel) + if err != nil { + return err + } + + reachabilityManagers, dagTopologyManagers, ghostdagManagers := ppm.dagProcesses(maxLevel, blockHeaderStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores) + + for blockLevel := 0; blockLevel <= maxLevel; blockLevel++ { + err := reachabilityManagers[blockLevel].Init(stagingArea) + if err != nil { + return err + } + + err = dagTopologyManagers[blockLevel].SetParents(stagingArea, model.VirtualGenesisBlockHash, nil) + if err != nil { + return err + } + + ghostdagDataStores[blockLevel].Stage(stagingArea, model.VirtualGenesisBlockHash, externalapi.NewBlockGHOSTDAGData( + 0, + big.NewInt(0), + nil, + nil, + nil, + nil, + ), false) + } + + selectedTipByLevel := make([]*externalapi.DomainHash, maxLevel+1) + for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- { + log.Infof("Validating level %d from the pruning point proof", blockLevel) + headers := make([]externalapi.BlockHeader, len(pruningPointProof.Headers[blockLevel])) + copy(headers, pruningPointProof.Headers[blockLevel]) + + var selectedTip *externalapi.DomainHash + for i, header := range headers { + blockHash := consensushashing.HeaderHash(header) + if header.BlockLevel(ppm.maxBlockLevel) < blockLevel { + return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ + "expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel) + } + + blockHeaderStore.Stage(stagingArea, blockHash, header) + + var parents []*externalapi.DomainHash + for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) { + _, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) + if database.IsNotFoundError(err) { + continue + } + if err != nil { + return err + } + + parents = append(parents, parent) + } + + if len(parents) == 0 { + if i != 0 { + return errors.Wrapf(ruleerrors.ErrPruningProofHeaderWithNoKnownParents, "the proof header "+ + "%s is missing known parents", blockHash) + } + parents = append(parents, model.VirtualGenesisBlockHash) + } + + err := dagTopologyManagers[blockLevel].SetParents(stagingArea, blockHash, parents) + if err != nil { + return err + } + + err = ghostdagManagers[blockLevel].GHOSTDAG(stagingArea, blockHash) + if err != nil { + return err + } + + if selectedTip == nil { + selectedTip = blockHash + } else { + selectedTip, err = ghostdagManagers[blockLevel].ChooseSelectedParent(stagingArea, selectedTip, blockHash) + if err != nil { + return err + } + } + + err = reachabilityManagers[blockLevel].AddBlock(stagingArea, blockHash) + if err != nil { + return err + } + + if selectedTip.Equal(blockHash) { + err := reachabilityManagers[blockLevel].UpdateReindexRoot(stagingArea, selectedTip) + if err != nil { + return err + } + } + } + + if blockLevel < maxLevel { + blockAtDepthMAtNextLevel, err := ppm.blockAtDepth(stagingArea, ghostdagDataStores[blockLevel+1], selectedTipByLevel[blockLevel+1], ppm.pruningProofM) + if err != nil { + return err + } + + hasBlockAtDepthMAtNextLevel, err := blockRelationStores[blockLevel].Has(ppm.databaseContext, stagingArea, blockAtDepthMAtNextLevel) + if err != nil { + return err + } + + if !hasBlockAtDepthMAtNextLevel { + return errors.Wrapf(ruleerrors.ErrPruningProofMissingBlockAtDepthMFromNextLevel, "proof level %d "+ + "is missing the block at depth m in level %d", blockLevel, blockLevel+1) + } + } + + if !selectedTip.Equal(pruningPoint) && !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) { + return errors.Wrapf(ruleerrors.ErrPruningProofMissesBlocksBelowPruningPoint, "the selected tip %s at "+ + "level %d is not a parent of the pruning point", selectedTip, blockLevel) + } + selectedTipByLevel[blockLevel] = selectedTip + } + + currentDAGPruningPoint, err := ppm.pruningStore.PruningPoint(ppm.databaseContext, model.NewStagingArea()) + if err != nil { + return err + } + + currentDAGPruningPointHeader, err := ppm.blockHeaderStore.BlockHeader(ppm.databaseContext, model.NewStagingArea(), currentDAGPruningPoint) + if err != nil { + return err + } + + for blockLevel, selectedTip := range selectedTipByLevel { + if blockLevel <= pruningPointBlockLevel { + if !selectedTip.Equal(consensushashing.HeaderHash(pruningPointHeader)) { + return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipIsNotThePruningPoint, "the pruning "+ + "proof selected tip %s at level %d is not the pruning point", selectedTip, blockLevel) + } + } else if !ppm.parentsManager.ParentsAtLevel(pruningPointHeader, blockLevel).Contains(selectedTip) { + return errors.Wrapf(ruleerrors.ErrPruningProofSelectedTipNotParentOfPruningPoint, "the pruning "+ + "proof selected tip %s at level %d is not a parent of the of the pruning point on the same "+ + "level", selectedTip, blockLevel) + } + + selectedTipGHOSTDAGData, err := ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, selectedTip, false) + if err != nil { + return err + } + + if selectedTipGHOSTDAGData.BlueScore() < 2*ppm.pruningProofM { + continue + } + + current := selectedTip + currentGHOSTDAGData := selectedTipGHOSTDAGData + var commonAncestor *externalapi.DomainHash + var commonAncestorGHOSTDAGData *externalapi.BlockGHOSTDAGData + var currentDAGCommonAncestorGHOSTDAGData *externalapi.BlockGHOSTDAGData + for { + currentDAGHOSTDAGData, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, model.NewStagingArea(), current, false) + if err == nil { + commonAncestor = current + commonAncestorGHOSTDAGData = currentGHOSTDAGData + currentDAGCommonAncestorGHOSTDAGData = currentDAGHOSTDAGData + break + } + + if !database.IsNotFoundError(err) { + return err + } + + current = currentGHOSTDAGData.SelectedParent() + if current.Equal(model.VirtualGenesisBlockHash) { + break + } + + currentGHOSTDAGData, err = ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, current, false) + if err != nil { + return err + } + } + + if commonAncestor != nil { + selectedTipBlueWorkDiff := big.NewInt(0).Sub(selectedTipGHOSTDAGData.BlueWork(), commonAncestorGHOSTDAGData.BlueWork()) + currentDAGPruningPointParents := ppm.parentsManager.ParentsAtLevel(currentDAGPruningPointHeader, blockLevel) + + foundBetterParent := false + for _, parent := range currentDAGPruningPointParents { + parentGHOSTDAGData, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, model.NewStagingArea(), parent, false) + if err != nil { + return err + } + + parentBlueWorkDiff := big.NewInt(0).Sub(parentGHOSTDAGData.BlueWork(), currentDAGCommonAncestorGHOSTDAGData.BlueWork()) + if parentBlueWorkDiff.Cmp(selectedTipBlueWorkDiff) >= 0 { + foundBetterParent = true + break + } + } + + if foundBetterParent { + return errors.Wrapf(ruleerrors.ErrPruningProofInsufficientBlueWork, "the proof doesn't "+ + "have sufficient blue work in order to replace the current DAG") + } + return nil + } + } + + for blockLevel := maxLevel; blockLevel >= 0; blockLevel-- { + currentDAGPruningPointParents, err := ppm.dagTopologyManagers[blockLevel].Parents(model.NewStagingArea(), currentDAGPruningPoint) + // If the current pruning point doesn't have a parent at this level, we consider the proof state to be better. + if database.IsNotFoundError(err) { + return nil + } + if err != nil { + return err + } + + for _, parent := range currentDAGPruningPointParents { + parentGHOSTDAGData, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, model.NewStagingArea(), parent, false) + if err != nil { + return err + } + + if parentGHOSTDAGData.BlueScore() < 2*ppm.pruningProofM { + return nil + } + } + } + + return errors.Wrapf(ruleerrors.ErrPruningProofInsufficientBlueWork, "the pruning proof doesn't have any "+ + "shared blocks with the known DAGs, but doesn't have enough headers from levels higher than the existing block levels.") +} + +func (ppm *pruningProofManager) dagStores(maxLevel int) (model.BlockHeaderStore, []model.BlockRelationStore, []model.ReachabilityDataStore, []model.GHOSTDAGDataStore, error) { + blockRelationStores := make([]model.BlockRelationStore, maxLevel+1) + reachabilityDataStores := make([]model.ReachabilityDataStore, maxLevel+1) + ghostdagDataStores := make([]model.GHOSTDAGDataStore, maxLevel+1) + + prefix := consensusDB.MakeBucket([]byte("pruningProofManager")) + blockHeaderStore, err := blockheaderstore.New(ppm.databaseContext, prefix, 0, false) + if err != nil { + return nil, nil, nil, nil, err + } + + for i := 0; i <= maxLevel; i++ { + blockRelationStores[i] = blockrelationstore.New(prefix, 0, false) + reachabilityDataStores[i] = reachabilitydatastore.New(prefix, 0, false) + ghostdagDataStores[i] = ghostdagdatastore.New(prefix, 0, false) + } + + return blockHeaderStore, blockRelationStores, reachabilityDataStores, ghostdagDataStores, nil +} + +func (ppm *pruningProofManager) dagProcesses( + maxLevel int, + blockHeaderStore model.BlockHeaderStore, + blockRelationStores []model.BlockRelationStore, + reachabilityDataStores []model.ReachabilityDataStore, + ghostdagDataStores []model.GHOSTDAGDataStore) ( + []model.ReachabilityManager, + []model.DAGTopologyManager, + []model.GHOSTDAGManager, +) { + + reachabilityManagers := make([]model.ReachabilityManager, ppm.maxBlockLevel+1) + dagTopologyManagers := make([]model.DAGTopologyManager, ppm.maxBlockLevel+1) + ghostdagManagers := make([]model.GHOSTDAGManager, ppm.maxBlockLevel+1) + + for i := 0; i <= maxLevel; i++ { + reachabilityManagers[i] = reachabilitymanager.New( + ppm.databaseContext, + ghostdagDataStores[i], + reachabilityDataStores[i]) + + dagTopologyManagers[i] = dagtopologymanager.New( + ppm.databaseContext, + reachabilityManagers[i], + blockRelationStores[i], + ghostdagDataStores[i]) + + ghostdagManagers[i] = ghostdagmanager.New( + ppm.databaseContext, + dagTopologyManagers[i], + ghostdagDataStores[i], + blockHeaderStore, + ppm.k, + ppm.genesisHash) + } + + return reachabilityManagers, dagTopologyManagers, ghostdagManagers +} + +func (ppm *pruningProofManager) ghostdagDataWithoutPrunedBlocks(stagingArea *model.StagingArea, targetReachabilityDataStore model.ReachabilityDataStore, + data *externalapi.BlockGHOSTDAGData) (*externalapi.BlockGHOSTDAGData, bool, error) { + + changed := false + mergeSetBlues := make([]*externalapi.DomainHash, 0, len(data.MergeSetBlues())) + for _, blockHash := range data.MergeSetBlues() { + hasReachabilityData, err := targetReachabilityDataStore.HasReachabilityData(ppm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, false, err + } + if !hasReachabilityData { + changed = true + if data.SelectedParent().Equal(blockHash) { + mergeSetBlues = append(mergeSetBlues, model.VirtualGenesisBlockHash) + } + continue + } + + mergeSetBlues = append(mergeSetBlues, blockHash) + } + + mergeSetReds := make([]*externalapi.DomainHash, 0, len(data.MergeSetReds())) + for _, blockHash := range data.MergeSetReds() { + hasReachabilityData, err := targetReachabilityDataStore.HasReachabilityData(ppm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, false, err + } + if !hasReachabilityData { + changed = true + continue + } + + mergeSetReds = append(mergeSetReds, blockHash) + } + + selectedParent := data.SelectedParent() + hasReachabilityData, err := targetReachabilityDataStore.HasReachabilityData(ppm.databaseContext, stagingArea, data.SelectedParent()) + if err != nil { + return nil, false, err + } + + if !hasReachabilityData { + changed = true + selectedParent = model.VirtualGenesisBlockHash + } + + return externalapi.NewBlockGHOSTDAGData( + data.BlueScore(), + data.BlueWork(), + selectedParent, + mergeSetBlues, + mergeSetReds, + data.BluesAnticoneSizes(), + ), changed, nil +} + +func (ppm *pruningProofManager) populateProofReachabilityAndHeaders(pruningPointProof *externalapi.PruningPointProof, + targetReachabilityDataStore model.ReachabilityDataStore) error { + // We build a DAG of all multi-level relations between blocks in the proof. We make a upHeap of all blocks, so we can iterate + // over them in a topological way, and then build a DAG where we use all multi-level parents of a block to create edges, except + // parents that are already in the past of another parent (This can happen between two levels). We run GHOSTDAG on each block of + // that DAG, because GHOSTDAG is a requirement to calculate reachability. We then dismiss the GHOSTDAG data because it's not related + // to the GHOSTDAG data of the real DAG, and was used only for reachability. + + // We need two staging areas: stagingArea which is used to commit the reachability data, and tmpStagingArea for the GHOSTDAG data + // of allProofBlocksUpHeap. The reason we need two areas is that we use the real GHOSTDAG data in order to order the heap in a topological + // way, and fake GHOSTDAG data for calculating reachability. + stagingArea := model.NewStagingArea() + tmpStagingArea := model.NewStagingArea() + + bucket := consensusDB.MakeBucket([]byte("TMP")) + ghostdagDataStoreForTargetReachabilityManager := ghostdagdatastore.New(bucket, 0, false) + ghostdagDataStoreForTargetReachabilityManager.Stage(stagingArea, model.VirtualGenesisBlockHash, externalapi.NewBlockGHOSTDAGData( + 0, + big.NewInt(0), + nil, + nil, + nil, + nil, + ), false) + targetReachabilityManager := reachabilitymanager.New(ppm.databaseContext, ghostdagDataStoreForTargetReachabilityManager, targetReachabilityDataStore) + blockRelationStoreForTargetReachabilityManager := blockrelationstore.New(bucket, 0, false) + dagTopologyManagerForTargetReachabilityManager := dagtopologymanager.New(ppm.databaseContext, targetReachabilityManager, blockRelationStoreForTargetReachabilityManager, nil) + ghostdagManagerForTargetReachabilityManager := ghostdagmanager.New(ppm.databaseContext, dagTopologyManagerForTargetReachabilityManager, ghostdagDataStoreForTargetReachabilityManager, ppm.blockHeaderStore, 0, nil) + err := dagTopologyManagerForTargetReachabilityManager.SetParents(stagingArea, model.VirtualGenesisBlockHash, nil) + if err != nil { + return err + } + + dagTopologyManager := dagtopologymanager.New(ppm.databaseContext, targetReachabilityManager, nil, nil) + ghostdagDataStore := ghostdagdatastore.New(bucket, 0, false) + tmpGHOSTDAGManager := ghostdagmanager.New(ppm.databaseContext, nil, ghostdagDataStore, nil, 0, nil) + dagTraversalManager := dagtraversalmanager.New(ppm.databaseContext, nil, ghostdagDataStore, nil, tmpGHOSTDAGManager, nil, nil, nil, 0) + allProofBlocksUpHeap := dagTraversalManager.NewUpHeap(tmpStagingArea) + dag := make(map[externalapi.DomainHash]struct { + parents hashset.HashSet + header externalapi.BlockHeader + }) + for _, headers := range pruningPointProof.Headers { + for _, header := range headers { + blockHash := consensushashing.HeaderHash(header) + if _, ok := dag[*blockHash]; ok { + continue + } + + dag[*blockHash] = struct { + parents hashset.HashSet + header externalapi.BlockHeader + }{parents: hashset.New(), header: header} + + for level := 0; level <= ppm.maxBlockLevel; level++ { + for _, parent := range ppm.parentsManager.ParentsAtLevel(header, level) { + parent := parent + dag[*blockHash].parents.Add(parent) + } + } + + // We stage temporary GHOSTDAG data that is needed in order to sort allProofBlocksUpHeap. + ghostdagDataStore.Stage(tmpStagingArea, blockHash, externalapi.NewBlockGHOSTDAGData(header.BlueScore(), header.BlueWork(), nil, nil, nil, nil), false) + err := allProofBlocksUpHeap.Push(blockHash) + if err != nil { + return err + } + } + } + + var selectedTip *externalapi.DomainHash + for allProofBlocksUpHeap.Len() > 0 { + blockHash := allProofBlocksUpHeap.Pop() + block := dag[*blockHash] + parentsHeap := dagTraversalManager.NewDownHeap(tmpStagingArea) + for parent := range block.parents { + parent := parent + if _, ok := dag[parent]; !ok { + continue + } + + err := parentsHeap.Push(&parent) + if err != nil { + return err + } + } + + fakeParents := []*externalapi.DomainHash{} + for parentsHeap.Len() > 0 { + parent := parentsHeap.Pop() + isAncestorOfAny, err := dagTopologyManager.IsAncestorOfAny(stagingArea, parent, fakeParents) + if err != nil { + return err + } + + if isAncestorOfAny { + continue + } + + fakeParents = append(fakeParents, parent) + } + + if len(fakeParents) == 0 { + fakeParents = append(fakeParents, model.VirtualGenesisBlockHash) + } + + err := dagTopologyManagerForTargetReachabilityManager.SetParents(stagingArea, blockHash, fakeParents) + if err != nil { + return err + } + + err = ghostdagManagerForTargetReachabilityManager.GHOSTDAG(stagingArea, blockHash) + if err != nil { + return err + } + + err = targetReachabilityManager.AddBlock(stagingArea, blockHash) + if err != nil { + return err + } + + if selectedTip == nil { + selectedTip = blockHash + } else { + selectedTip, err = ghostdagManagerForTargetReachabilityManager.ChooseSelectedParent(stagingArea, selectedTip, blockHash) + if err != nil { + return err + } + } + + if selectedTip.Equal(blockHash) { + err := targetReachabilityManager.UpdateReindexRoot(stagingArea, selectedTip) + if err != nil { + return err + } + } + } + + ghostdagDataStoreForTargetReachabilityManager.UnstageAll(stagingArea) + blockRelationStoreForTargetReachabilityManager.UnstageAll(stagingArea) + err = staging.CommitAllChanges(ppm.databaseContext, stagingArea) + if err != nil { + return err + } + return nil +} + +// ApplyPruningPointProof applies the given pruning proof to the current consensus. Specifically, +// it's meant to be used against the StagingConsensus during headers-proof IBD. Note that for +// performance reasons this operation is NOT atomic. If the process fails for whatever reason +// (e.g. the process was killed) then the database for this consensus MUST be discarded. +func (ppm *pruningProofManager) ApplyPruningPointProof(pruningPointProof *externalapi.PruningPointProof) error { + onEnd := logger.LogAndMeasureExecutionTime(log, "ApplyPruningPointProof") + defer onEnd() + + stagingArea := model.NewStagingArea() + for _, headers := range pruningPointProof.Headers { + for _, header := range headers { + blockHash := consensushashing.HeaderHash(header) + ppm.blockHeaderStore.Stage(stagingArea, blockHash, header) + } + } + err := staging.CommitAllChanges(ppm.databaseContext, stagingArea) + if err != nil { + return err + } + + err = ppm.populateProofReachabilityAndHeaders(pruningPointProof, ppm.reachabilityDataStore) + if err != nil { + return err + } + + for blockLevel, headers := range pruningPointProof.Headers { + log.Infof("Applying level %d from the pruning point proof", blockLevel) + for i, header := range headers { + if i%1000 == 0 { + log.Infof("Applying level %d from the pruning point proof - applied %d headers out of %d", blockLevel, i, len(headers)) + } + stagingArea := model.NewStagingArea() + + blockHash := consensushashing.HeaderHash(header) + if header.BlockLevel(ppm.maxBlockLevel) < blockLevel { + return errors.Wrapf(ruleerrors.ErrPruningProofWrongBlockLevel, "block %s level is %d when it's "+ + "expected to be at least %d", blockHash, header.BlockLevel(ppm.maxBlockLevel), blockLevel) + } + + ppm.blockHeaderStore.Stage(stagingArea, blockHash, header) + + var parents []*externalapi.DomainHash + for _, parent := range ppm.parentsManager.ParentsAtLevel(header, blockLevel) { + _, err := ppm.ghostdagDataStores[blockLevel].Get(ppm.databaseContext, stagingArea, parent, false) + if database.IsNotFoundError(err) { + continue + } + if err != nil { + return err + } + + parents = append(parents, parent) + } + + if len(parents) == 0 { + if i != 0 { + return errors.Wrapf(ruleerrors.ErrPruningProofHeaderWithNoKnownParents, "the proof header "+ + "%s is missing known parents", blockHash) + } + parents = append(parents, model.VirtualGenesisBlockHash) + } + + err := ppm.dagTopologyManagers[blockLevel].SetParents(stagingArea, blockHash, parents) + if err != nil { + return err + } + + err = ppm.ghostdagManagers[blockLevel].GHOSTDAG(stagingArea, blockHash) + if err != nil { + return err + } + + if blockLevel == 0 { + // Override the ghostdag data with the real blue score and blue work + ghostdagData, err := ppm.ghostdagDataStores[0].Get(ppm.databaseContext, stagingArea, blockHash, false) + if err != nil { + return err + } + + ppm.ghostdagDataStores[0].Stage(stagingArea, blockHash, externalapi.NewBlockGHOSTDAGData( + header.BlueScore(), + header.BlueWork(), + ghostdagData.SelectedParent(), + ghostdagData.MergeSetBlues(), + ghostdagData.MergeSetReds(), + ghostdagData.BluesAnticoneSizes(), + ), false) + + ppm.finalityStore.StageFinalityPoint(stagingArea, blockHash, model.VirtualGenesisBlockHash) + ppm.blockStatusStore.Stage(stagingArea, blockHash, externalapi.StatusHeaderOnly) + } + + err = staging.CommitAllChanges(ppm.databaseContext, stagingArea) + if err != nil { + return err + } + } + } + + pruningPointHeader := pruningPointProof.Headers[0][len(pruningPointProof.Headers[0])-1] + pruningPoint := consensushashing.HeaderHash(pruningPointHeader) + + stagingArea = model.NewStagingArea() + ppm.consensusStateStore.StageTips(stagingArea, []*externalapi.DomainHash{pruningPoint}) + return staging.CommitAllChanges(ppm.databaseContext, stagingArea) +} diff --git a/domain/consensus/processes/reachabilitymanager/fetch.go b/domain/consensus/processes/reachabilitymanager/fetch.go new file mode 100644 index 0000000..055bfbf --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/fetch.go @@ -0,0 +1,66 @@ +package reachabilitymanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/reachabilitydata" +) + +func (rt *reachabilityManager) reachabilityDataForInsertion(stagingArea *model.StagingArea, + blockHash *externalapi.DomainHash) (model.MutableReachabilityData, error) { + data, err := rt.reachabilityDataStore.ReachabilityData(rt.databaseContext, stagingArea, blockHash) + if err == nil { + return data.CloneMutable(), nil + } + + if errors.Is(err, database.ErrNotFound) { + return reachabilitydata.EmptyReachabilityData(), nil + } + return nil, err +} + +func (rt *reachabilityManager) futureCoveringSet(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.FutureCoveringTreeNodeSet, error) { + data, err := rt.reachabilityDataStore.ReachabilityData(rt.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + return data.FutureCoveringSet(), nil +} + +func (rt *reachabilityManager) interval(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (*model.ReachabilityInterval, error) { + data, err := rt.reachabilityDataStore.ReachabilityData(rt.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + return data.Interval(), nil +} + +func (rt *reachabilityManager) children(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + []*externalapi.DomainHash, error) { + + data, err := rt.reachabilityDataStore.ReachabilityData(rt.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + return data.Children(), nil +} + +func (rt *reachabilityManager) parent(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) ( + *externalapi.DomainHash, error) { + + data, err := rt.reachabilityDataStore.ReachabilityData(rt.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + + return data.Parent(), nil +} + +func (rt *reachabilityManager) reindexRoot(stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + return rt.reachabilityDataStore.ReachabilityReindexRoot(rt.databaseContext, stagingArea) +} diff --git a/domain/consensus/processes/reachabilitymanager/future_covering_set.go b/domain/consensus/processes/reachabilitymanager/future_covering_set.go new file mode 100644 index 0000000..04e8e5b --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/future_covering_set.go @@ -0,0 +1,105 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// insertToFutureCoveringSet inserts the given block into this node's FutureCoveringSet +// while keeping it ordered by interval. +// If a block B ∈ node.FutureCoveringSet exists such that its interval +// contains block's interval, block need not be added. If block's +// interval contains B's interval, it replaces it. +// +// Notes: +// - Intervals never intersect unless one contains the other +// (this follows from the tree structure and the indexing rule). +// - Since node.FutureCoveringSet is kept ordered, a binary search can be +// used for insertion/queries. +// - Although reindexing may change a block's interval, the +// is-superset relation will by definition +// be always preserved. +func (rt *reachabilityManager) insertToFutureCoveringSet(stagingArea *model.StagingArea, node, futureNode *externalapi.DomainHash) error { + reachabilityData, err := rt.reachabilityDataForInsertion(stagingArea, node) + if err != nil { + return err + } + futureCoveringSet := reachabilityData.FutureCoveringSet() + + ancestorIndex, ok, err := rt.findAncestorIndexOfNode(stagingArea, orderedTreeNodeSet(futureCoveringSet), futureNode) + if err != nil { + return err + } + + var newSet []*externalapi.DomainHash + if !ok { + newSet = append([]*externalapi.DomainHash{futureNode}, futureCoveringSet...) + } else { + candidate := futureCoveringSet[ancestorIndex] + candidateIsAncestorOfFutureNode, err := rt.IsReachabilityTreeAncestorOf(stagingArea, candidate, futureNode) + if err != nil { + return err + } + + if candidateIsAncestorOfFutureNode { + // candidate is an ancestor of futureNode, no need to insert + return nil + } + + futureNodeIsAncestorOfCandidate, err := rt.IsReachabilityTreeAncestorOf(stagingArea, futureNode, candidate) + if err != nil { + return err + } + + if futureNodeIsAncestorOfCandidate { + // futureNode is an ancestor of candidate, and can thus replace it + newSet := make([]*externalapi.DomainHash, len(futureCoveringSet)) + copy(newSet, futureCoveringSet) + newSet[ancestorIndex] = futureNode + + return rt.stageFutureCoveringSet(stagingArea, node, newSet) + } + + // Insert futureNode in the correct index to maintain futureCoveringTreeNodeSet as + // a sorted-by-interval list. + // Note that ancestorIndex might be equal to len(futureCoveringTreeNodeSet) + left := futureCoveringSet[:ancestorIndex+1] + right := append([]*externalapi.DomainHash{futureNode}, futureCoveringSet[ancestorIndex+1:]...) + newSet = append(left, right...) + } + reachabilityData.SetFutureCoveringSet(newSet) + rt.stageData(stagingArea, node, reachabilityData) + + return nil +} + +// futureCoveringSetHasAncestorOf resolves whether the given node `other` is in the subtree of +// any node in this.FutureCoveringSet. +// See insertNode method for the complementary insertion behavior. +// +// Like the insert method, this method also relies on the fact that +// this.FutureCoveringSet is kept ordered by interval to efficiently perform a +// binary search over this.FutureCoveringSet and answer the query in +// O(log(|futureCoveringTreeNodeSet|)). +func (rt *reachabilityManager) futureCoveringSetHasAncestorOf(stagingArea *model.StagingArea, + this, other *externalapi.DomainHash) (bool, error) { + + futureCoveringSet, err := rt.futureCoveringSet(stagingArea, this) + if err != nil { + return false, err + } + + ancestorIndex, ok, err := rt.findAncestorIndexOfNode(stagingArea, orderedTreeNodeSet(futureCoveringSet), other) + if err != nil { + return false, err + } + + if !ok { + // No candidate to contain other + return false, nil + } + + candidate := futureCoveringSet[ancestorIndex] + + return rt.IsReachabilityTreeAncestorOf(stagingArea, candidate, other) +} diff --git a/domain/consensus/processes/reachabilitymanager/interval.go b/domain/consensus/processes/reachabilitymanager/interval.go new file mode 100644 index 0000000..6045585 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/interval.go @@ -0,0 +1,197 @@ +package reachabilitymanager + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" +) + +func newReachabilityInterval(start uint64, end uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{Start: start, End: end} +} + +// intervalSize returns the size of this interval. Note that intervals are +// inclusive from both sides. +func intervalSize(ri *model.ReachabilityInterval) uint64 { + return ri.End - ri.Start + 1 +} + +// intervalIncrease returns a ReachabilityInterval with offset added to start and end +func intervalIncrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start + offset, + End: ri.End + offset, + } +} + +// intervalDecrease returns a ReachabilityInterval with offset subtracted from start and end +func intervalDecrease(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start - offset, + End: ri.End - offset, + } +} + +// intervalIncreaseStart returns a ReachabilityInterval with offset added to start +func intervalIncreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start + offset, + End: ri.End, + } +} + +// intervalDecreaseStart returns a ReachabilityInterval with offset reduced from start +func intervalDecreaseStart(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start - offset, + End: ri.End, + } +} + +// intervalIncreaseEnd returns a ReachabilityInterval with offset added to end +func intervalIncreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start, + End: ri.End + offset, + } +} + +// intervalDecreaseEnd returns a ReachabilityInterval with offset subtracted from end +func intervalDecreaseEnd(ri *model.ReachabilityInterval, offset uint64) *model.ReachabilityInterval { + return &model.ReachabilityInterval{ + Start: ri.Start, + End: ri.End - offset, + } +} + +// intervalSplitInHalf splits this interval by a fraction of 0.5. +// See splitFraction for further details. +func intervalSplitInHalf(ri *model.ReachabilityInterval) ( + left *model.ReachabilityInterval, right *model.ReachabilityInterval, err error) { + + return intervalSplitFraction(ri, 0.5) +} + +// intervalSplitFraction splits this interval to two parts such that their +// union is equal to the original interval and the first (left) part +// contains the given fraction of the original interval's size. +// Note: if the split results in fractional parts, this method rounds +// the first part up and the last part down. +func intervalSplitFraction(ri *model.ReachabilityInterval, fraction float64) ( + left *model.ReachabilityInterval, right *model.ReachabilityInterval, err error) { + + if fraction < 0 || fraction > 1 { + return nil, nil, errors.Errorf("fraction must be between 0 and 1") + } + if intervalSize(ri) == 0 { + return nil, nil, errors.Errorf("cannot split an empty interval") + } + + allocationSize := uint64(math.Ceil(float64(intervalSize(ri)) * fraction)) + left = newReachabilityInterval(ri.Start, ri.Start+allocationSize-1) + right = newReachabilityInterval(ri.Start+allocationSize, ri.End) + return left, right, nil +} + +// intervalSplitExact splits this interval to exactly |sizes| parts where +// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly +// equal to the interval's size. +func intervalSplitExact(ri *model.ReachabilityInterval, sizes []uint64) ([]*model.ReachabilityInterval, error) { + sizesSum := uint64(0) + for _, size := range sizes { + sizesSum += size + } + if sizesSum != intervalSize(ri) { + return nil, errors.Errorf("sum of sizes must be equal to the interval's size") + } + + intervals := make([]*model.ReachabilityInterval, len(sizes)) + start := ri.Start + for i, size := range sizes { + intervals[i] = newReachabilityInterval(start, start+size-1) + start += size + } + return intervals, nil +} + +// intervalSplitWithExponentialBias splits this interval to |sizes| parts +// by the allocation rule described below. This method expects sum(sizes) +// to be smaller or equal to the interval's size. Every part_i is +// allocated at least sizes[i] capacity. The remaining budget is +// split by an exponentially biased rule described below. +// +// This rule follows the GHOSTDAG protocol behavior where the child +// with the largest subtree is expected to dominate the competition +// for new blocks and thus grow the most. However, we may need to +// add slack for non-largest subtrees in order to make CPU reindexing +// attacks unworthy. +func intervalSplitWithExponentialBias(ri *model.ReachabilityInterval, sizes []uint64) ([]*model.ReachabilityInterval, error) { + intervalSize := intervalSize(ri) + sizesSum := uint64(0) + for _, size := range sizes { + sizesSum += size + } + if sizesSum > intervalSize { + return nil, errors.Errorf("sum of sizes must be less than or equal to the interval's size") + } + if sizesSum == intervalSize { + return intervalSplitExact(ri, sizes) + } + + // Add a fractional bias to every size in the given sizes + totalBias := intervalSize - sizesSum + remainingBias := totalBias + biasedSizes := make([]uint64, len(sizes)) + fractions := exponentialFractions(sizes) + for i, fraction := range fractions { + var bias uint64 + if i == len(fractions)-1 { + bias = remainingBias + } else { + bias = uint64(math.Round(float64(totalBias) * fraction)) + if bias > remainingBias { + bias = remainingBias + } + } + biasedSizes[i] = sizes[i] + bias + remainingBias -= bias + } + return intervalSplitExact(ri, biasedSizes) +} + +// exponentialFractions returns a fraction of each size in sizes +// as follows: +// +// fraction[i] = 2^size[i] / sum_j(2^size[j]) +// +// In the code below the above equation is divided by 2^max(size) +// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) +// we divide 1 by potentially a very large number, which will +// result in loss of float precision. This is not a problem - all +// numbers close to 0 bear effectively the same weight. +func exponentialFractions(sizes []uint64) []float64 { + maxSize := uint64(0) + for _, size := range sizes { + if size > maxSize { + maxSize = size + } + } + fractions := make([]float64, len(sizes)) + for i, size := range sizes { + fractions[i] = 1 / math.Pow(2, float64(maxSize-size)) + } + fractionsSum := float64(0) + for _, fraction := range fractions { + fractionsSum += fraction + } + for i, fraction := range fractions { + fractions[i] = fraction / fractionsSum + } + return fractions +} + +// intervalContains returns true if ri contains other. +func intervalContains(ri *model.ReachabilityInterval, other *model.ReachabilityInterval) bool { + return ri.Start <= other.Start && other.End <= ri.End +} diff --git a/domain/consensus/processes/reachabilitymanager/log.go b/domain/consensus/processes/reachabilitymanager/log.go new file mode 100644 index 0000000..ed48c82 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/log.go @@ -0,0 +1,7 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("REAC") diff --git a/domain/consensus/processes/reachabilitymanager/main_test.go b/domain/consensus/processes/reachabilitymanager/main_test.go new file mode 100644 index 0000000..0d0731a --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/main_test.go @@ -0,0 +1,16 @@ +package reachabilitymanager_test + +import ( + "os" + "testing" + + "github.com/spectre-project/spectred/infrastructure/logger" +) + +const logLevel = logger.LevelWarn + +func TestMain(m *testing.M) { + logger.SetLogLevels(logLevel) + logger.InitLogStdout(logLevel) + os.Exit(m.Run()) +} diff --git a/domain/consensus/processes/reachabilitymanager/ordered_tree_node_set.go b/domain/consensus/processes/reachabilitymanager/ordered_tree_node_set.go new file mode 100644 index 0000000..d0c7096 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/ordered_tree_node_set.go @@ -0,0 +1,62 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// orderedTreeNodeSet is an ordered set of model.DomainHash ordered by the respectful intervals. +// Note that this type does not validate order validity. It's the +// responsibility of the caller to construct instances of this +// type properly. +type orderedTreeNodeSet []*externalapi.DomainHash + +// findAncestorOfNode finds the reachability tree ancestor of `node` +// among the nodes in `tns`. +func (rt *reachabilityManager) findAncestorOfNode(stagingArea *model.StagingArea, tns orderedTreeNodeSet, node *externalapi.DomainHash) (*externalapi.DomainHash, bool) { + ancestorIndex, ok, err := rt.findAncestorIndexOfNode(stagingArea, tns, node) + if err != nil { + return nil, false + } + + if !ok { + return nil, false + } + + return tns[ancestorIndex], true +} + +// findAncestorIndexOfNode finds the index of the reachability tree +// ancestor of `node` among the nodes in `tns`. It does so by finding +// the index of the block with the maximum start that is below the +// given block. +func (rt *reachabilityManager) findAncestorIndexOfNode(stagingArea *model.StagingArea, tns orderedTreeNodeSet, + node *externalapi.DomainHash) (int, bool, error) { + + blockInterval, err := rt.interval(stagingArea, node) + if err != nil { + return 0, false, err + } + end := blockInterval.End + + low := 0 + high := len(tns) + for low < high { + middle := (low + high) / 2 + middleInterval, err := rt.interval(stagingArea, tns[middle]) + if err != nil { + return 0, false, err + } + + if end < middleInterval.Start { + high = middle + } else { + low = middle + 1 + } + } + + if low == 0 { + return 0, false, nil + } + return low - 1, true, nil +} diff --git a/domain/consensus/processes/reachabilitymanager/reachability.go b/domain/consensus/processes/reachabilitymanager/reachability.go new file mode 100644 index 0000000..4bf16f1 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachability.go @@ -0,0 +1,31 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// IsDAGAncestorOf returns true if blockHashA is an ancestor of +// blockHashB in the DAG. +// +// Note: this method will return true if blockHashA == blockHashB +// The complexity of this method is O(log(|this.futureCoveringTreeNodeSet|)) +func (rt *reachabilityManager) IsDAGAncestorOf(stagingArea *model.StagingArea, blockHashA *externalapi.DomainHash, blockHashB *externalapi.DomainHash) (bool, error) { + // Check if this node is a reachability tree ancestor of the + // other node + isReachabilityTreeAncestor, err := rt.IsReachabilityTreeAncestorOf(stagingArea, blockHashA, blockHashB) + if err != nil { + return false, err + } + if isReachabilityTreeAncestor { + return true, nil + } + + // Otherwise, use previously registered future blocks to complete the + // reachability test + return rt.futureCoveringSetHasAncestorOf(stagingArea, blockHashA, blockHashB) +} + +func (rt *reachabilityManager) UpdateReindexRoot(stagingArea *model.StagingArea, selectedTip *externalapi.DomainHash) error { + return rt.updateReindexRoot(stagingArea, selectedTip) +} diff --git a/domain/consensus/processes/reachabilitymanager/reachabilityManager_test.go b/domain/consensus/processes/reachabilitymanager/reachabilityManager_test.go new file mode 100644 index 0000000..5442b6e --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachabilityManager_test.go @@ -0,0 +1,112 @@ +package reachabilitymanager_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestReachabilityIsDAGAncestorOf(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestReachabilityIsDAGAncestorOf") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // A <- B - - - - + // / \ \ + // genesis \ \ sharedBlock + // \ \ / + // C <- D - - - - / + genesisHash := consensusConfig.GenesisHash + blockHashA, _, err := tc.AddBlock([]*externalapi.DomainHash{genesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + blockHashB, _, err := tc.AddBlock([]*externalapi.DomainHash{blockHashA}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + blockHashC, _, err := tc.AddBlock([]*externalapi.DomainHash{genesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + blockHashD, _, err := tc.AddBlock([]*externalapi.DomainHash{blockHashA, blockHashC}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + sharedBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{blockHashB, blockHashD}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + tests := []struct { + firstBlockHash *externalapi.DomainHash + secondBlockHash *externalapi.DomainHash + expectedResult bool + }{ + { + firstBlockHash: blockHashA, + secondBlockHash: blockHashA, + expectedResult: true, + }, + { + firstBlockHash: genesisHash, + secondBlockHash: blockHashA, + expectedResult: true, + }, + { + firstBlockHash: genesisHash, + secondBlockHash: sharedBlockHash, + expectedResult: true, + }, + { + firstBlockHash: blockHashC, + secondBlockHash: blockHashD, + expectedResult: true, + }, + { + firstBlockHash: blockHashA, + secondBlockHash: blockHashD, + expectedResult: true, + }, + { + firstBlockHash: blockHashC, + secondBlockHash: blockHashB, + expectedResult: false, + }, + { + firstBlockHash: blockHashB, + secondBlockHash: blockHashD, + expectedResult: false, + }, + { + firstBlockHash: blockHashB, + secondBlockHash: blockHashA, + expectedResult: false, + }, + } + + stagingArea := model.NewStagingArea() + + for _, test := range tests { + isDAGAncestorOf, err := tc.ReachabilityManager().IsDAGAncestorOf(stagingArea, test.firstBlockHash, test.secondBlockHash) + if err != nil { + t.Fatalf("IsDAGAncestorOf: %v", err) + } + if isDAGAncestorOf != test.expectedResult { + t.Fatalf("IsDAGAncestorOf: should returns %v but got %v", test.expectedResult, isDAGAncestorOf) + } + } + }) +} diff --git a/domain/consensus/processes/reachabilitymanager/reachability_external_test.go b/domain/consensus/processes/reachabilitymanager/reachability_external_test.go new file mode 100644 index 0000000..5b045d8 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachability_external_test.go @@ -0,0 +1,350 @@ +package reachabilitymanager_test + +import ( + "math" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot(t *testing.T) { + reachabilityReindexWindow := uint64(10) + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, + "TestAddChildThatPointsDirectlyToTheSelectedParentChainBelowReindexRoot") + if err != nil { + t.Fatalf("NewTestConsensus: %+v", err) + } + defer tearDown(false) + + tc.ReachabilityManager().SetReachabilityReindexWindow(reachabilityReindexWindow) + + stagingArea := model.NewStagingArea() + + reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatalf("ReachabilityReindexRoot: %s", err) + } + + if !reindexRoot.Equal(model.VirtualGenesisBlockHash) { + t.Fatalf("reindex root is expected to initially be virtual genesis") + } + + // Add a block on top of the genesis block + chainRootBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Add chain of reachabilityReindexWindow blocks above chainRootBlock. + // This should move the reindex root + chainRootBlockTipHash := chainRootBlock + for i := uint64(0); i < reachabilityReindexWindow; i++ { + chainBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{chainRootBlockTipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + chainRootBlockTipHash = chainBlock + } + + newReindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatalf("ReachabilityReindexRoot: %s", err) + } + + if newReindexRoot.Equal(reindexRoot) { + t.Fatalf("reindex root is expected to change") + } + + // Add enough blocks over genesis to test also the case where the first + // level (genesis in this case) runs out of slack + slackSize := tc.ReachabilityManager().ReachabilityReindexSlack() + blocksToAdd := uint64(math.Log2(float64(slackSize))) + 2 + for i := uint64(0); i < blocksToAdd; i++ { + _, _, err = tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + }) +} + +func TestUpdateReindexRoot(t *testing.T) { + reachabilityReindexWindow := uint64(10) + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestUpdateReindexRoot") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + tc.ReachabilityManager().SetReachabilityReindexWindow(reachabilityReindexWindow) + + stagingArea := model.NewStagingArea() + + intervalSize := func(hash *externalapi.DomainHash) uint64 { + data, err := tc.ReachabilityDataStore().ReachabilityData(tc.DatabaseContext(), stagingArea, hash) + if err != nil { + t.Fatalf("ReachabilityData: %s", err) + } + return data.Interval().End - data.Interval().Start + 1 + } + + // Add two blocks on top of the genesis block + chain1RootBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain2RootBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Make two chains of size reachabilityReindexWindow + chain1Tip, chain2Tip := chain1RootBlock, chain2RootBlock + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + var err error + chain1Tip, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1Tip}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain2Tip, _, err = tc.AddBlock([]*externalapi.DomainHash{chain2Tip}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatalf("ReachabilityReindexRoot: %s", err) + } + + // The reindex root shouldn't move until the last two blocks + // that should move it to genesis + if i == reachabilityReindexWindow-2 { + if !reindexRoot.Equal(consensusConfig.GenesisHash) { + t.Fatalf("reindex root is expected to be genesis") + } + continue + } + + if !reindexRoot.Equal(model.VirtualGenesisBlockHash) { + t.Fatalf("reindex root unexpectedly moved") + } + + } + + // Add another block over chain1. This will move the reindex root to chain1RootBlock + _, _, err = tc.AddBlock([]*externalapi.DomainHash{chain1Tip}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Make sure that chain1RootBlock is now the reindex root + reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatalf("ReachabilityReindexRoot: %s", err) + } + + if !reindexRoot.Equal(chain1RootBlock) { + t.Fatalf("chain1RootBlock is not the reindex root after reindex") + } + + // Make sure that tight intervals have been applied to chain2. Since + // we added reachabilityReindexWindow-1 blocks to chain2, the size + // of the interval at its root should be equal to reachabilityReindexWindow + if intervalSize(chain2RootBlock) != reachabilityReindexWindow { + t.Fatalf("got unexpected chain2RootBlock interval. Want: %d, got: %d", + intervalSize(chain2RootBlock), reachabilityReindexWindow) + } + + // Make sure that the rest of the interval has been allocated to + // chain1RootNode, minus slack from both sides + expectedChain1RootIntervalSize := intervalSize(consensusConfig.GenesisHash) - 1 - + intervalSize(chain2RootBlock) - 2*tc.ReachabilityManager().ReachabilityReindexSlack() + if intervalSize(chain1RootBlock) != expectedChain1RootIntervalSize { + t.Fatalf("got unexpected chain1RootBlock interval. Want: %d, got: %d", + intervalSize(chain1RootBlock), expectedChain1RootIntervalSize) + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + }) +} + +func TestReindexIntervalsEarlierThanReindexRoot(t *testing.T) { + reachabilityReindexWindow := uint64(10) + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestUpdateReindexRoot") + if err != nil { + t.Fatalf("NewTestConsensus: %+v", err) + } + defer tearDown(false) + + tc.ReachabilityManager().SetReachabilityReindexWindow(reachabilityReindexWindow) + + stagingArea := model.NewStagingArea() + + intervalSize := func(hash *externalapi.DomainHash) uint64 { + data, err := tc.ReachabilityDataStore().ReachabilityData(tc.DatabaseContext(), stagingArea, hash) + if err != nil { + t.Fatalf("ReachabilityData: %s", err) + } + return data.Interval().End - data.Interval().Start + 1 + } + + // Add three children to the genesis: leftBlock, centerBlock, rightBlock + leftBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + centerBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + rightBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Add a chain of reachabilityReindexWindow blocks above centerBlock. + // This will move the reindex root to centerBlock + centerTipHash := centerBlock + for i := uint64(0); i < reachabilityReindexWindow; i++ { + var err error + centerTipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{centerTipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + // Make sure that centerBlock is now the reindex root + reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatalf("ReachabilityReindexRoot: %s", err) + } + + if !reindexRoot.Equal(centerBlock) { + t.Fatalf("centerBlock is not the reindex root after reindex") + } + + // Get the current interval for leftBlock. The reindex should have + // resulted in a tight interval there + if intervalSize(leftBlock) != 1 { + t.Fatalf("leftBlock interval not tight after reindex") + } + + // Get the current interval for rightBlock. The reindex should have + // resulted in a tight interval there + if intervalSize(rightBlock) != 1 { + t.Fatalf("rightBlock interval not tight after reindex") + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + + // Add a chain of reachabilityReindexWindow - 1 blocks above leftBlock. + // Each addition will trigger a low-than-reindex-root reindex. We + // expect the centerInterval to shrink by 1 each time, but its child + // to remain unaffected + + leftTipHash := leftBlock + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + var err error + leftTipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{leftTipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + } + + // Add a chain of reachabilityReindexWindow - 1 blocks above rightBlock. + // Each addition will trigger a low-than-reindex-root reindex. We + // expect the centerInterval to shrink by 1 each time, but its child + // to remain unaffected + rightTipHash := rightBlock + for i := uint64(0); i < reachabilityReindexWindow-1; i++ { + var err error + rightTipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{rightTipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + }) +} + +func TestTipsAfterReindexIntervalsEarlierThanReindexRoot(t *testing.T) { + reachabilityReindexWindow := uint64(10) + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, "TestUpdateReindexRoot") + if err != nil { + t.Fatalf("NewTestConsensus: %s", err) + } + defer tearDown(false) + + tc.ReachabilityManager().SetReachabilityReindexWindow(reachabilityReindexWindow) + + // Add a chain of reachabilityReindexWindow + 1 blocks above the genesis. + // This will set the reindex root to the child of genesis + chainTipHash := consensusConfig.GenesisHash + for i := uint64(0); i < reachabilityReindexWindow+1; i++ { + chainTipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{chainTipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + } + + // Add another block above the genesis block. This will trigger an + // earlier-than-reindex-root reindex + sideBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Add a block whose parents are the chain tip and the side block. + // We expect this not to fail + _, _, err = tc.AddBlock([]*externalapi.DomainHash{sideBlock}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + err = tc.ReachabilityManager().ValidateIntervals(consensusConfig.GenesisHash) + if err != nil { + t.Fatal(err) + } + }) +} diff --git a/domain/consensus/processes/reachabilitymanager/reachability_stretch_test.go b/domain/consensus/processes/reachabilitymanager/reachability_stretch_test.go new file mode 100644 index 0000000..b50912a --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachability_stretch_test.go @@ -0,0 +1,258 @@ +package reachabilitymanager_test + +import ( + "compress/gzip" + "fmt" + "math" + "math/rand" + "os" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +// Test configuration +const numBlocksExponent = 12 + +func initializeTest(t *testing.T, testName string) (tc testapi.TestConsensus, teardown func(keepDataDir bool)) { + t.Parallel() + consensusConfig := consensus.Config{Params: dagconfig.SimnetParams} + consensusConfig.SkipProofOfWork = true + tc, teardown, err := consensus.NewFactory().NewTestConsensus(&consensusConfig, testName) + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + return tc, teardown +} + +func buildJsonDAG(t *testing.T, tc testapi.TestConsensus, attackJson bool) (tips []*externalapi.DomainHash) { + filePrefix := "noattack" + if attackJson { + filePrefix = "attack" + } + fileName := fmt.Sprintf( + "../../testdata/reachability/%s-dag-blocks--2^%d-delay-factor--1-k--18.json.gz", + filePrefix, numBlocksExponent) + + f, err := os.Open(fileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + gzipReader, err := gzip.NewReader(f) + if err != nil { + t.Fatal(err) + } + defer gzipReader.Close() + + tips, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOInvalidHeader) + if err != nil { + t.Fatal(err) + } + + err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash) + if err != nil { + t.Fatal(err) + } + + return tips +} + +func addArbitraryBlocks(t *testing.T, tc testapi.TestConsensus) { + // After loading json, add arbitrary blocks all over the DAG to stretch + // reindex logic, and validate intervals post each addition + + blocks, err := tc.ReachabilityManager().GetAllNodes(tc.DAGParams().GenesisHash) + if err != nil { + t.Fatal(err) + } + + numChainsToAdd := len(blocks) / 2 // Multiply the size of the DAG with arbitrary blocks + maxBlocksInChain := 20 + validationFreq := int(math.Max(1, float64(numChainsToAdd/100))) + + randSource := rand.New(rand.NewSource(33233)) + + for i := 0; i < numChainsToAdd; i++ { + randomIndex := randSource.Intn(len(blocks)) + randomParent := blocks[randomIndex] + newBlock, _, err := tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{randomParent}) + if err != nil { + t.Fatal(err) + } + blocks = append(blocks, newBlock) + // Add a random-length chain every few blocks + if randSource.Intn(8) == 0 { + numBlocksInChain := randSource.Intn(maxBlocksInChain) + chainBlock := newBlock + for j := 0; j < numBlocksInChain; j++ { + chainBlock, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainBlock}) + if err != nil { + t.Fatal(err) + } + blocks = append(blocks, chainBlock) + } + } + // Normally, validate intervals for new chain only + validationRoot := newBlock + // However every 'validation frequency' blocks validate intervals for entire DAG + if i%validationFreq == 0 || i == numChainsToAdd-1 { + validationRoot = tc.DAGParams().GenesisHash + } + err = tc.ReachabilityManager().ValidateIntervals(validationRoot) + if err != nil { + t.Fatal(err) + } + } +} + +func addAlternatingReorgBlocks(t *testing.T, tc testapi.TestConsensus, tips []*externalapi.DomainHash) { + stagingArea := model.NewStagingArea() + + // Create alternating reorgs to test the cases where + // reindex root is out of current header selected tip chain + + reindexRoot, err := tc.ReachabilityDataStore().ReachabilityReindexRoot(tc.DatabaseContext(), stagingArea) + if err != nil { + t.Fatal(err) + } + + // Try finding two tips; one which has reindex root on it's chain (chainTip), and one which + // does not (reorgTip). The latter is expected to exist in json attack files. + var chainTip, reorgTip *externalapi.DomainHash + for _, block := range tips { + isRootAncestorOfTip, err := tc.ReachabilityManager().IsReachabilityTreeAncestorOf(stagingArea, reindexRoot, block) + if err != nil { + t.Fatal(err) + } + if isRootAncestorOfTip { + chainTip = block + } else { + reorgTip = block + } + } + + if reorgTip == nil { + t.Fatal(errors.Errorf("DAG from jsom file is expected to contain a tip " + + "disagreeing with reindex root chain")) + } + + if chainTip == nil { + t.Fatal(errors.Errorf("reindex root is not on any header tip chain, this is unexpected behavior")) + } + + chainTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, chainTip, false) + if err != nil { + t.Fatal(err) + } + + reorgTipGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, reorgTip, false) + if err != nil { + t.Fatal(err) + } + + // Get both chains close to each other (we care about blue score and not + // blue work because we have SkipProofOfWork=true) + if chainTipGHOSTDAGData.BlueScore() > reorgTipGHOSTDAGData.BlueScore() { + blueScoreDiff := int(chainTipGHOSTDAGData.BlueScore() - reorgTipGHOSTDAGData.BlueScore()) + for i := 0; i < blueScoreDiff+5; i++ { + reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip}) + if err != nil { + t.Fatal(err) + } + } + } else { + blueScoreDiff := int(reorgTipGHOSTDAGData.BlueScore() - chainTipGHOSTDAGData.BlueScore()) + for i := 0; i < blueScoreDiff+5; i++ { + chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip}) + if err != nil { + t.Fatal(err) + } + } + } + + err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash) + if err != nil { + t.Fatal(err) + } + + // Alternate between the chains 200 times + for i := 0; i < 200; i++ { + if i%2 == 0 { + for j := 0; j < 10; j++ { + chainTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{chainTip}) + if err != nil { + t.Fatal(err) + } + } + } else { + for j := 0; j < 10; j++ { + reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip}) + if err != nil { + t.Fatal(err) + } + } + } + + err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash) + if err != nil { + t.Fatal(err) + } + } + + // Since current logic switches reindex root chain with reindex slack threshold - at last make the switch happen + for i := 0; i < int(tc.ReachabilityManager().ReachabilityReindexSlack())+10; i++ { + reorgTip, _, err = tc.AddUTXOInvalidHeader([]*externalapi.DomainHash{reorgTip}) + if err != nil { + t.Fatal(err) + } + } + + err = tc.ReachabilityManager().ValidateIntervals(tc.DAGParams().GenesisHash) + if err != nil { + t.Fatal(err) + } +} + +func TestNoAttack(t *testing.T) { + tc, teardown := initializeTest(t, "TestNoAttack") + defer teardown(false) + buildJsonDAG(t, tc, false) +} + +func TestAttack(t *testing.T) { + tc, teardown := initializeTest(t, "TestAttack") + defer teardown(false) + buildJsonDAG(t, tc, true) +} + +func TestNoAttackFuzzy(t *testing.T) { + tc, teardown := initializeTest(t, "TestNoAttackFuzzy") + defer teardown(false) + tc.ReachabilityManager().SetReachabilityReindexSlack(10) + buildJsonDAG(t, tc, false) + addArbitraryBlocks(t, tc) +} + +func TestAttackFuzzy(t *testing.T) { + tc, teardown := initializeTest(t, "TestAttackFuzzy") + defer teardown(false) + tc.ReachabilityManager().SetReachabilityReindexSlack(10) + buildJsonDAG(t, tc, true) + addArbitraryBlocks(t, tc) +} + +func TestAttackAlternateReorg(t *testing.T) { + tc, teardown := initializeTest(t, "TestAttackAlternateReorg") + defer teardown(false) + tc.ReachabilityManager().SetReachabilityReindexSlack(256) + tips := buildJsonDAG(t, tc, true) + addAlternatingReorgBlocks(t, tc, tips) +} diff --git a/domain/consensus/processes/reachabilitymanager/reachability_test.go b/domain/consensus/processes/reachabilitymanager/reachability_test.go new file mode 100644 index 0000000..df5ed48 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachability_test.go @@ -0,0 +1,1038 @@ +package reachabilitymanager + +import ( + "encoding/binary" + "reflect" + "strings" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type reachabilityDataStoreMock struct { + reachabilityDataStaging map[externalapi.DomainHash]model.ReachabilityData + recorder map[externalapi.DomainHash]struct{} + reachabilityReindexRootStaging *externalapi.DomainHash +} + +func (r *reachabilityDataStoreMock) Delete(_ model.DBWriter) error { + panic("implement me") +} + +func (r *reachabilityDataStoreMock) Commit(_ model.DBTransaction) error { + panic("implement me") +} + +func (r *reachabilityDataStoreMock) StageReachabilityData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, reachabilityData model.ReachabilityData) { + + r.reachabilityDataStaging[*blockHash] = reachabilityData + r.recorder[*blockHash] = struct{}{} +} + +func (r *reachabilityDataStoreMock) StageReachabilityReindexRoot(stagingArea *model.StagingArea, reachabilityReindexRoot *externalapi.DomainHash) { + r.reachabilityReindexRootStaging = reachabilityReindexRoot +} + +func (r *reachabilityDataStoreMock) IsStaged(*model.StagingArea) bool { + panic("implement me") +} + +func (r *reachabilityDataStoreMock) ReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (model.ReachabilityData, error) { + + return r.reachabilityDataStaging[*blockHash], nil +} + +func (r *reachabilityDataStoreMock) HasReachabilityData(dbContext model.DBReader, stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + _, ok := r.reachabilityDataStaging[*blockHash] + return ok, nil +} + +func (r *reachabilityDataStoreMock) ReachabilityReindexRoot(dbContext model.DBReader, stagingArea *model.StagingArea) (*externalapi.DomainHash, error) { + return r.reachabilityReindexRootStaging, nil +} + +func (r *reachabilityDataStoreMock) isRecorderContainsOnly(nodes ...*externalapi.DomainHash) bool { + if len(r.recorder) != len(nodes) { + return false + } + + for _, node := range nodes { + if _, ok := r.recorder[*node]; !ok { + return false + } + } + + return true +} + +func (r *reachabilityDataStoreMock) resetRecorder() { + r.recorder = make(map[externalapi.DomainHash]struct{}) +} + +func newReachabilityDataStoreMock() *reachabilityDataStoreMock { + return &reachabilityDataStoreMock{ + reachabilityDataStaging: make(map[externalapi.DomainHash]model.ReachabilityData), + recorder: make(map[externalapi.DomainHash]struct{}), + reachabilityReindexRootStaging: nil, + } +} + +type fatalfer interface { + Fatalf(format string, args ...interface{}) +} + +type testHelper struct { + *reachabilityManager + t fatalfer + dataStore *reachabilityDataStoreMock + hashCounter uint64 +} + +func (th *testHelper) generateHash() *externalapi.DomainHash { + var hashArray [externalapi.DomainHashSize]byte + binary.LittleEndian.PutUint64(hashArray[:], th.hashCounter) + th.hashCounter++ + return externalapi.NewDomainHashFromByteArray(&hashArray) +} + +func (th *testHelper) newNode(stagingArea *model.StagingArea) *externalapi.DomainHash { + node := th.generateHash() + th.stageData(stagingArea, node, newReachabilityTreeData()) + return node +} + +func (th *testHelper) newNodeWithInterval(stagingArea *model.StagingArea, interval *model.ReachabilityInterval) *externalapi.DomainHash { + node := th.newNode(stagingArea) + err := th.stageInterval(stagingArea, node, interval) + if err != nil { + th.t.Fatalf("stageInteval: %s", err) + } + + return node +} + +func (th *testHelper) getInterval(stagingArea *model.StagingArea, node *externalapi.DomainHash) *model.ReachabilityInterval { + interval, err := th.interval(stagingArea, node) + if err != nil { + th.t.Fatalf("interval: %s", err) + } + + return interval +} + +func (th *testHelper) getIntervalSize(stagingArea *model.StagingArea, node *externalapi.DomainHash) uint64 { + return intervalSize(th.getInterval(stagingArea, node)) +} + +func (th *testHelper) remainingIntervalBefore( + stagingArea *model.StagingArea, node *externalapi.DomainHash) *model.ReachabilityInterval { + + interval, err := th.reachabilityManager.remainingIntervalBefore(stagingArea, node) + if err != nil { + th.t.Fatalf("remainingIntervalBefore: %s", err) + } + + return interval +} + +func (th *testHelper) remainingIntervalAfter(stagingArea *model.StagingArea, + node *externalapi.DomainHash) *model.ReachabilityInterval { + + interval, err := th.reachabilityManager.remainingIntervalAfter(stagingArea, node) + if err != nil { + th.t.Fatalf("remainingIntervalAfter: %s", err) + } + + return interval +} + +func (th *testHelper) addChild(stagingArea *model.StagingArea, node, child, reindexRoot *externalapi.DomainHash) { + err := th.reachabilityManager.addChild(stagingArea, node, child, reindexRoot) + if err != nil { + th.t.Fatalf("addChild: %s", err) + } +} + +func (th *testHelper) isReachabilityTreeAncestorOf( + stagingArea *model.StagingArea, node, other *externalapi.DomainHash) bool { + + isReachabilityTreeAncestorOf, err := th.reachabilityManager.IsReachabilityTreeAncestorOf(stagingArea, node, other) + if err != nil { + th.t.Fatalf("IsReachabilityTreeAncestorOf: %s", err) + } + + return isReachabilityTreeAncestorOf +} + +func (th *testHelper) checkIsRecorderContainsOnly(nodes ...*externalapi.DomainHash) { + if !th.dataStore.isRecorderContainsOnly(nodes...) { + th.t.Fatalf("unexpected nodes on recorder. Want: %v, got: %v", nodes, th.dataStore.recorder) + } +} + +func (th *testHelper) resetRecorder() { + th.dataStore.resetRecorder() +} + +func newTestHelper(manager *reachabilityManager, t fatalfer, dataStore *reachabilityDataStoreMock) *testHelper { + return &testHelper{reachabilityManager: manager, t: t, dataStore: dataStore} +} + +func TestAddChild(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + // Scenario 1: test addChild in a chain + // root -> a -> b -> c... + // Create the root node of a new reachability tree + root := helper.newNode(stagingArea) + err := helper.stageInterval(stagingArea, root, newReachabilityInterval(1, 100)) + if err != nil { + t.Fatalf("stageInterval: %s", err) + } + + // Add a chain of child nodes just before a reindex occurs (2^6=64 < 100) + currentTip := root + for i := 0; i < 6; i++ { + node := helper.newNode(stagingArea) + helper.resetRecorder() + helper.addChild(stagingArea, currentTip, node, root) + + // Expect only the node and its parent to be affected + helper.checkIsRecorderContainsOnly(currentTip, node) + currentTip = node + } + + // Add another node to the tip of the chain to trigger a reindex (100 < 2^7=128) + lastChild := helper.newNode(stagingArea) + helper.resetRecorder() + helper.addChild(stagingArea, currentTip, lastChild, root) + + // Expect more than just the node and its parent to be modified but not + // all the nodes + if len(helper.dataStore.recorder) <= 2 && len(helper.dataStore.recorder) >= 7 { + t.Fatalf("TestAddChild: unexpected amount of staged nodes") + } + + // Expect the tip to have an interval of 1 and remaining interval of 0 both before and after + tipIntervalSize := helper.getIntervalSize(stagingArea, lastChild) + if tipIntervalSize != 1 { + t.Fatalf("TestAddChild: unexpected tip interval size: want: 1, got: %d", tipIntervalSize) + } + + tipRemainingIntervalBefore := helper.remainingIntervalBefore(stagingArea, lastChild) + + if intervalSize(tipRemainingIntervalBefore) != 0 { + t.Fatalf("TestAddChild: unexpected tip interval before size: want: 0, got: %d", intervalSize(tipRemainingIntervalBefore)) + } + + tipRemainingIntervalAfter := helper.remainingIntervalAfter(stagingArea, lastChild) + if intervalSize(tipRemainingIntervalAfter) != 0 { + t.Fatalf("TestAddChild: unexpected tip interval after size: want: 0, got: %d", intervalSize(tipRemainingIntervalAfter)) + } + + // Expect all nodes to be descendant nodes of root + currentNode := currentTip + for currentNode != root { + isReachabilityTreeAncestorOf, err := helper.IsReachabilityTreeAncestorOf(stagingArea, root, currentNode) + if err != nil { + t.Fatalf("IsReachabilityTreeAncestorOf: %s", err) + } + if !isReachabilityTreeAncestorOf { + t.Fatalf("TestAddChild: currentNode is not a descendant of root") + } + + currentNode, err = helper.parent(stagingArea, currentNode) + if err != nil { + t.Fatalf("parent: %s", err) + } + } + + err = manager.validateIntervals(stagingArea, root) + if err != nil { + t.Fatal(err) + } + + // Scenario 2: test addChild where all nodes are direct descendants of root + // root -> a, b, c... + // Create the root node of a new reachability tree + root = helper.newNode(stagingArea) + err = helper.stageInterval(stagingArea, root, newReachabilityInterval(1, 100)) + if err != nil { + t.Fatalf("stageInterval: %s", err) + } + + // Add child nodes to root just before a reindex occurs (2^6=64 < 100) + childNodes := make([]*externalapi.DomainHash, 6) + for i := 0; i < len(childNodes); i++ { + childNodes[i] = helper.newNode(stagingArea) + helper.resetRecorder() + helper.addChild(stagingArea, root, childNodes[i], root) + + // Expect only the node and the root to be affected + helper.checkIsRecorderContainsOnly(root, childNodes[i]) + } + + // Add another node to the root to trigger a reindex (100 < 2^7=128) + lastChild = helper.newNode(stagingArea) + helper.resetRecorder() + helper.addChild(stagingArea, root, lastChild, root) + + // Expect more than just the node and the root to be modified but not + // all the nodes + if len(helper.dataStore.recorder) <= 2 && len(helper.dataStore.recorder) >= 7 { + t.Fatalf("TestAddChild: unexpected amount of modifiedNodes.") + } + + // Expect the last-added child to have an interval of 1 and remaining interval of 0 both before and after + lastChildInterval, err := helper.interval(stagingArea, lastChild) + if err != nil { + t.Fatalf("interval: %s", err) + } + + if intervalSize(lastChildInterval) != 1 { + t.Fatalf("TestAddChild: unexpected lastChild interval size: want: 1, got: %d", intervalSize(lastChildInterval)) + } + lastChildRemainingIntervalBeforeSize := intervalSize(helper.remainingIntervalBefore(stagingArea, lastChild)) + if lastChildRemainingIntervalBeforeSize != 0 { + t.Fatalf("TestAddChild: unexpected lastChild interval before size: want: 0, got: %d", lastChildRemainingIntervalBeforeSize) + } + lastChildRemainingIntervalAfterSize := intervalSize(helper.remainingIntervalAfter(stagingArea, lastChild)) + if lastChildRemainingIntervalAfterSize != 0 { + t.Fatalf("TestAddChild: unexpected lastChild interval after size: want: 0, got: %d", lastChildRemainingIntervalAfterSize) + } + + // Expect all nodes to be descendant nodes of root + for _, childNode := range childNodes { + isReachabilityTreeAncestorOf, err := helper.IsReachabilityTreeAncestorOf(stagingArea, root, childNode) + if err != nil { + t.Fatalf("IsReachabilityTreeAncestorOf: %s", err) + } + + if !isReachabilityTreeAncestorOf { + t.Fatalf("TestAddChild: childNode is not a descendant of root") + } + } + + err = manager.validateIntervals(stagingArea, root) + if err != nil { + t.Fatal(err) + } +} + +func TestReachabilityTreeNodeIsAncestorOf(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + root := helper.newNode(stagingArea) + currentTip := root + const numberOfDescendants = 6 + descendants := make([]*externalapi.DomainHash, numberOfDescendants) + for i := 0; i < numberOfDescendants; i++ { + node := helper.newNode(stagingArea) + helper.addChild(stagingArea, currentTip, node, root) + descendants[i] = node + currentTip = node + } + + // Expect all descendants to be in the future of root + for _, node := range descendants { + if !helper.isReachabilityTreeAncestorOf(stagingArea, root, node) { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: node is not a descendant of root") + } + } + + if !helper.isReachabilityTreeAncestorOf(stagingArea, root, root) { + t.Fatalf("TestReachabilityTreeNodeIsAncestorOf: root is expected to be an ancestor of root") + } + + err := manager.validateIntervals(stagingArea, root) + if err != nil { + t.Fatal(err) + } +} + +func TestIntervalContains(t *testing.T) { + tests := []struct { + name string + this, other *model.ReachabilityInterval + thisContainsOther bool + }{ + { + name: "this == other", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 100), + thisContainsOther: true, + }, + { + name: "this.start == other.start && this.end < other.end", + this: newReachabilityInterval(10, 90), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, + }, + { + name: "this.start == other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(10, 90), + thisContainsOther: true, + }, + { + name: "this.start > other.start && this.end == other.end", + this: newReachabilityInterval(20, 100), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, + }, + { + name: "this.start < other.start && this.end == other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 100), + thisContainsOther: true, + }, + { + name: "this.start > other.start && this.end < other.end", + this: newReachabilityInterval(20, 90), + other: newReachabilityInterval(10, 100), + thisContainsOther: false, + }, + { + name: "this.start < other.start && this.end > other.end", + this: newReachabilityInterval(10, 100), + other: newReachabilityInterval(20, 90), + thisContainsOther: true, + }, + } + + for _, test := range tests { + if thisContainsOther := intervalContains(test.this, test.other); thisContainsOther != test.thisContainsOther { + t.Errorf("test.this.contains(test.other) is expected to be %t but got %t", + test.thisContainsOther, thisContainsOther) + } + } +} + +func TestSplitFraction(t *testing.T) { + tests := []struct { + interval *model.ReachabilityInterval + fraction float64 + expectedLeft *model.ReachabilityInterval + expectedRight *model.ReachabilityInterval + }{ + { + interval: newReachabilityInterval(1, 100), + fraction: 0.5, + expectedLeft: newReachabilityInterval(1, 50), + expectedRight: newReachabilityInterval(51, 100), + }, + { + interval: newReachabilityInterval(2, 100), + fraction: 0.5, + expectedLeft: newReachabilityInterval(2, 51), + expectedRight: newReachabilityInterval(52, 100), + }, + { + interval: newReachabilityInterval(1, 99), + fraction: 0.5, + expectedLeft: newReachabilityInterval(1, 50), + expectedRight: newReachabilityInterval(51, 99), + }, + { + interval: newReachabilityInterval(1, 100), + fraction: 0.2, + expectedLeft: newReachabilityInterval(1, 20), + expectedRight: newReachabilityInterval(21, 100), + }, + { + interval: newReachabilityInterval(1, 100), + fraction: 0, + expectedLeft: newReachabilityInterval(1, 0), + expectedRight: newReachabilityInterval(1, 100), + }, + { + interval: newReachabilityInterval(1, 100), + fraction: 1, + expectedLeft: newReachabilityInterval(1, 100), + expectedRight: newReachabilityInterval(101, 100), + }, + } + + for i, test := range tests { + left, right, err := intervalSplitFraction(test.interval, test.fraction) + if err != nil { + t.Fatalf("TestSplitFraction: splitFraction unexpectedly failed in test #%d: %s", i, err) + } + if !reflect.DeepEqual(left, test.expectedLeft) { + t.Errorf("TestSplitFraction: unexpected left in test #%d. "+ + "want: %s, got: %s", i, test.expectedLeft, left) + } + if !reflect.DeepEqual(right, test.expectedRight) { + t.Errorf("TestSplitFraction: unexpected right in test #%d. "+ + "want: %s, got: %s", i, test.expectedRight, right) + } + } +} + +func TestSplitExact(t *testing.T) { + tests := []struct { + interval *model.ReachabilityInterval + sizes []uint64 + expectedIntervals []*model.ReachabilityInterval + }{ + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{100}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{50, 50}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 50), + newReachabilityInterval(51, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{10, 20, 30, 40}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 10), + newReachabilityInterval(11, 30), + newReachabilityInterval(31, 60), + newReachabilityInterval(61, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{0, 100}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 0), + newReachabilityInterval(1, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{100, 0}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 100), + newReachabilityInterval(101, 100), + }, + }, + } + + for i, test := range tests { + intervals, err := intervalSplitExact(test.interval, test.sizes) + if err != nil { + t.Fatalf("TestSplitExact: splitExact unexpectedly failed in test #%d: %s", i, err) + } + if !reflect.DeepEqual(intervals, test.expectedIntervals) { + t.Errorf("TestSplitExact: unexpected intervals in test #%d. "+ + "want: %s, got: %s", i, test.expectedIntervals, intervals) + } + } +} + +func TestSplitWithExponentialBias(t *testing.T) { + tests := []struct { + interval *model.ReachabilityInterval + sizes []uint64 + expectedIntervals []*model.ReachabilityInterval + }{ + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{100}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{50, 50}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 50), + newReachabilityInterval(51, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{10, 20, 30, 40}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 10), + newReachabilityInterval(11, 30), + newReachabilityInterval(31, 60), + newReachabilityInterval(61, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{25, 25}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 50), + newReachabilityInterval(51, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{1, 1}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 50), + newReachabilityInterval(51, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{33, 33, 33}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 33), + newReachabilityInterval(34, 66), + newReachabilityInterval(67, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{10, 15, 25}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 10), + newReachabilityInterval(11, 25), + newReachabilityInterval(26, 100), + }, + }, + { + interval: newReachabilityInterval(1, 100), + sizes: []uint64{25, 15, 10}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 75), + newReachabilityInterval(76, 90), + newReachabilityInterval(91, 100), + }, + }, + { + interval: newReachabilityInterval(1, 10_000), + sizes: []uint64{10, 10, 20}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 20), + newReachabilityInterval(21, 40), + newReachabilityInterval(41, 10_000), + }, + }, + { + interval: newReachabilityInterval(1, 100_000), + sizes: []uint64{31_000, 31_000, 30_001}, + expectedIntervals: []*model.ReachabilityInterval{ + newReachabilityInterval(1, 35_000), + newReachabilityInterval(35_001, 69_999), + newReachabilityInterval(70_000, 100_000), + }, + }, + } + + for i, test := range tests { + intervals, err := intervalSplitWithExponentialBias(test.interval, test.sizes) + if err != nil { + t.Fatalf("TestSplitWithExponentialBias: splitWithExponentialBias unexpectedly failed in test #%d: %s", i, err) + } + if !reflect.DeepEqual(intervals, test.expectedIntervals) { + t.Errorf("TestSplitWithExponentialBias: unexpected intervals in test #%d. "+ + "want: %s, got: %s", i, test.expectedIntervals, intervals) + } + } +} + +func TestHasAncestorOf(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + futureCoveringTreeNodeSet := model.FutureCoveringTreeNodeSet{ + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(2, 3)), + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(4, 67)), + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(67, 77)), + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(657, 789)), + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(1000, 1000)), + helper.newNodeWithInterval(stagingArea, newReachabilityInterval(1920, 1921)), + } + + nodeWithFutureCoveringTreeNodeSet := helper.newNode(stagingArea) + err := helper.stageFutureCoveringSet(stagingArea, nodeWithFutureCoveringTreeNodeSet, futureCoveringTreeNodeSet) + if err != nil { + t.Fatalf("stageFutureCoveringSet: %s", err) + } + + tests := []struct { + treeNode *externalapi.DomainHash + expectedResult bool + }{ + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(1, 1)), + expectedResult: false, + }, + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(5, 7)), + expectedResult: true, + }, + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(67, 76)), + expectedResult: true, + }, + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(78, 100)), + expectedResult: false, + }, + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(1980, 2000)), + expectedResult: false, + }, + { + treeNode: helper.newNodeWithInterval(stagingArea, newReachabilityInterval(1920, 1920)), + expectedResult: true, + }, + } + + for i, test := range tests { + result, err := helper.futureCoveringSetHasAncestorOf(stagingArea, nodeWithFutureCoveringTreeNodeSet, test.treeNode) + if err != nil { + t.Fatalf("futureCoveringSetHasAncestorOf: %s", err) + } + + if result != test.expectedResult { + t.Errorf("TestHasAncestorOf: unexpected result in test #%d. Want: %t, got: %t", + i, test.expectedResult, result) + } + } +} + +func TestInsertToFutureCoveringSet(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + nodeByIntervalMap := make(map[model.ReachabilityInterval]*externalapi.DomainHash) + nodeByInterval := func(interval *model.ReachabilityInterval) *externalapi.DomainHash { + if node, ok := nodeByIntervalMap[*interval]; ok { + return node + } + + nodeByIntervalMap[*interval] = helper.newNodeWithInterval(stagingArea, interval) + return nodeByIntervalMap[*interval] + } + + futureCoveringTreeNodeSet := model.FutureCoveringTreeNodeSet{ + nodeByInterval(newReachabilityInterval(1, 3)), + nodeByInterval(newReachabilityInterval(4, 67)), + nodeByInterval(newReachabilityInterval(67, 77)), + nodeByInterval(newReachabilityInterval(657, 789)), + nodeByInterval(newReachabilityInterval(1000, 1000)), + nodeByInterval(newReachabilityInterval(1920, 1921)), + } + + tests := []struct { + toInsert []*externalapi.DomainHash + expectedResult model.FutureCoveringTreeNodeSet + }{ + { + toInsert: []*externalapi.DomainHash{ + nodeByInterval(newReachabilityInterval(5, 7)), + }, + expectedResult: model.FutureCoveringTreeNodeSet{ + nodeByInterval(newReachabilityInterval(1, 3)), + nodeByInterval(newReachabilityInterval(4, 67)), + nodeByInterval(newReachabilityInterval(67, 77)), + nodeByInterval(newReachabilityInterval(657, 789)), + nodeByInterval(newReachabilityInterval(1000, 1000)), + nodeByInterval(newReachabilityInterval(1920, 1921)), + }, + }, + { + toInsert: []*externalapi.DomainHash{ + nodeByInterval(newReachabilityInterval(65, 78)), + }, + expectedResult: model.FutureCoveringTreeNodeSet{ + nodeByInterval(newReachabilityInterval(1, 3)), + nodeByInterval(newReachabilityInterval(4, 67)), + nodeByInterval(newReachabilityInterval(65, 78)), + nodeByInterval(newReachabilityInterval(657, 789)), + nodeByInterval(newReachabilityInterval(1000, 1000)), + nodeByInterval(newReachabilityInterval(1920, 1921)), + }, + }, + { + toInsert: []*externalapi.DomainHash{ + nodeByInterval(newReachabilityInterval(88, 97)), + }, + expectedResult: model.FutureCoveringTreeNodeSet{ + nodeByInterval(newReachabilityInterval(1, 3)), + nodeByInterval(newReachabilityInterval(4, 67)), + nodeByInterval(newReachabilityInterval(67, 77)), + nodeByInterval(newReachabilityInterval(88, 97)), + nodeByInterval(newReachabilityInterval(657, 789)), + nodeByInterval(newReachabilityInterval(1000, 1000)), + nodeByInterval(newReachabilityInterval(1920, 1921)), + }, + }, + { + toInsert: []*externalapi.DomainHash{ + nodeByInterval(newReachabilityInterval(88, 97)), + nodeByInterval(newReachabilityInterval(3000, 3010)), + }, + expectedResult: model.FutureCoveringTreeNodeSet{ + nodeByInterval(newReachabilityInterval(1, 3)), + nodeByInterval(newReachabilityInterval(4, 67)), + nodeByInterval(newReachabilityInterval(67, 77)), + nodeByInterval(newReachabilityInterval(88, 97)), + nodeByInterval(newReachabilityInterval(657, 789)), + nodeByInterval(newReachabilityInterval(1000, 1000)), + nodeByInterval(newReachabilityInterval(1920, 1921)), + nodeByInterval(newReachabilityInterval(3000, 3010)), + }, + }, + } + + for i, test := range tests { + // Create a clone of treeNodes so that we have a clean start for every test + futureCoveringTreeNodeSetClone := make(model.FutureCoveringTreeNodeSet, len(futureCoveringTreeNodeSet)) + copy(futureCoveringTreeNodeSetClone, futureCoveringTreeNodeSet) + + node := helper.newNode(stagingArea) + err := helper.stageFutureCoveringSet(stagingArea, node, futureCoveringTreeNodeSetClone) + if err != nil { + t.Fatalf("stageFutureCoveringSet: %s", err) + } + + for _, treeNode := range test.toInsert { + err := helper.insertToFutureCoveringSet(stagingArea, node, treeNode) + if err != nil { + t.Fatalf("insertToFutureCoveringSet: %s", err) + } + } + + resultFutureCoveringTreeNodeSet, err := helper.futureCoveringSet(stagingArea, node) + if err != nil { + t.Fatalf("futureCoveringSet: %s", err) + } + if !reflect.DeepEqual(resultFutureCoveringTreeNodeSet, test.expectedResult) { + t.Errorf("TestInsertToFutureCoveringSet: unexpected result in test #%d. Want: %s, got: %s", + i, test.expectedResult, resultFutureCoveringTreeNodeSet) + } + } +} + +func TestSplitFractionErrors(t *testing.T) { + interval := newReachabilityInterval(100, 200) + + // Negative fraction + _, _, err := intervalSplitFraction(interval, -0.5) + if err == nil { + t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " + + "didn't return an error for a negative fraction") + } + expectedErrSubstring := "fraction must be between 0 and 1" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+ + "for a negative fraction. "+ + "Want: %s, got: %s", expectedErrSubstring, err) + } + + // Fraction > 1 + _, _, err = intervalSplitFraction(interval, 1.5) + if err == nil { + t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " + + "didn't return an error for a fraction greater than 1") + } + expectedErrSubstring = "fraction must be between 0 and 1" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+ + "for a fraction greater than 1. "+ + "Want: %s, got: %s", expectedErrSubstring, err) + } + + // Splitting an empty interval + emptyInterval := newReachabilityInterval(1, 0) + _, _, err = intervalSplitFraction(emptyInterval, 0.5) + if err == nil { + t.Fatalf("TestSplitFractionErrors: splitFraction unexpectedly " + + "didn't return an error for an empty interval") + } + expectedErrSubstring = "cannot split an empty interval" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitFractionErrors: splitFraction returned wrong error "+ + "for an empty interval. "+ + "Want: %s, got: %s", expectedErrSubstring, err) + } +} + +func TestSplitExactErrors(t *testing.T) { + interval := newReachabilityInterval(100, 199) + + // Sum of sizes greater than the size of the interval + sizes := []uint64{50, 51} + _, err := intervalSplitExact(interval, sizes) + if err == nil { + t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " + + "didn't return an error for (sum of sizes) > (size of interval)") + } + expectedErrSubstring := "sum of sizes must be equal to the interval's size" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+ + "for (sum of sizes) > (size of interval). "+ + "Want: %s, got: %s", expectedErrSubstring, err) + } + + // Sum of sizes smaller than the size of the interval + sizes = []uint64{50, 49} + _, err = intervalSplitExact(interval, sizes) + if err == nil { + t.Fatalf("TestSplitExactErrors: splitExact unexpectedly " + + "didn't return an error for (sum of sizes) < (size of interval)") + } + expectedErrSubstring = "sum of sizes must be equal to the interval's size" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitExactErrors: splitExact returned wrong error "+ + "for (sum of sizes) < (size of interval). "+ + "Want: %s, got: %s", expectedErrSubstring, err) + } +} + +func TestSplitWithExponentialBiasErrors(t *testing.T) { + interval := newReachabilityInterval(100, 199) + + // Sum of sizes greater than the size of the interval + sizes := []uint64{50, 51} + _, err := intervalSplitWithExponentialBias(interval, sizes) + if err == nil { + t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias " + + "unexpectedly didn't return an error") + } + expectedErrSubstring := "sum of sizes must be less than or equal to the interval's size" + if !strings.Contains(err.Error(), expectedErrSubstring) { + t.Fatalf("TestSplitWithExponentialBiasErrors: splitWithExponentialBias "+ + "returned wrong error. Want: %s, got: %s", expectedErrSubstring, err) + } +} + +func TestReindexIntervalErrors(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + // Create a treeNode and give it size = 100 + treeNode := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(0, 99)) + + // Add a chain of 100 child treeNodes to treeNode + var err error + currentTreeNode := treeNode + for i := 0; i < 100; i++ { + childTreeNode := helper.newNode(stagingArea) + err = helper.reachabilityManager.addChild(stagingArea, currentTreeNode, childTreeNode, treeNode) + if err != nil { + break + } + currentTreeNode = childTreeNode + } + + // At the 100th addChild we expect a reindex. This reindex should + // fail because our initial treeNode only has size = 100, and the + // reindex requires size > 100. + // This simulates the case when (somehow) there's more than 2^64 + // blocks in the DAG, since the genesis block has size = 2^64. + if err == nil { + t.Fatalf("TestReindexIntervalErrors: reindexIntervals " + + "unexpectedly didn't return an error") + } + if !strings.Contains(err.Error(), "missing tree parent during reindexing") { + t.Fatalf("TestReindexIntervalErrors: reindexIntervals "+ + "returned an expected error: %s", err) + } +} + +func BenchmarkReindexInterval(b *testing.B) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, b, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + for i := 0; i < b.N; i++ { + b.StopTimer() + + const subTreeSize = 70000 + // We set the interval of the root to subTreeSize*2 because + // its first child gets half of the interval, so a reindex + // from the root should happen after adding subTreeSize + // nodes. + root := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(0, subTreeSize*2)) + + currentTreeNode := root + for i := 0; i < subTreeSize; i++ { + childTreeNode := helper.newNode(stagingArea) + helper.addChild(stagingArea, currentTreeNode, childTreeNode, root) + + currentTreeNode = childTreeNode + } + + originalRemainingInterval := helper.remainingIntervalAfter(stagingArea, root).Clone() + // After we added subTreeSize nodes, adding the next + // node should lead to a reindex from root. + fullReindexTriggeringNode := helper.newNode(stagingArea) + b.StartTimer() + err := helper.reachabilityManager.addChild(stagingArea, currentTreeNode, fullReindexTriggeringNode, root) + b.StopTimer() + if err != nil { + b.Fatalf("addChild: %s", err) + } + + if helper.remainingIntervalAfter(stagingArea, root).Equal(originalRemainingInterval) { + b.Fatal("Expected a reindex from root, but it didn't happen") + } + } +} + +func TestReachabilityTreeNodeString(t *testing.T) { + reachabilityDataStore := newReachabilityDataStoreMock() + manager := New(nil, nil, reachabilityDataStore).(*reachabilityManager) + helper := newTestHelper(manager, t, reachabilityDataStore) + + stagingArea := model.NewStagingArea() + + treeNodeA := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(100, 199)) + treeNodeB1 := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(100, 150)) + treeNodeB2 := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(150, 199)) + treeNodeC := helper.newNodeWithInterval(stagingArea, newReachabilityInterval(100, 149)) + + err := helper.stageAddChild(stagingArea, treeNodeA, treeNodeB1) + if err != nil { + t.Fatalf("stageAddChild: %s", err) + } + + err = helper.stageAddChild(stagingArea, treeNodeA, treeNodeB2) + if err != nil { + t.Fatalf("stageAddChild: %s", err) + } + + err = helper.stageAddChild(stagingArea, treeNodeB2, treeNodeC) + if err != nil { + t.Fatalf("stageAddChild: %s", err) + } + + str, err := manager.String(stagingArea, treeNodeA) + if err != nil { + t.Fatalf("String: %s", err) + } + expectedStr := "[100,149]\n[100,150][150,199]\n[100,199]" + if str != expectedStr { + t.Fatalf("TestReachabilityTreeNodeString: unexpected "+ + "string. Want: %s, got: %s", expectedStr, str) + } +} diff --git a/domain/consensus/processes/reachabilitymanager/reachabilitymanager.go b/domain/consensus/processes/reachabilitymanager/reachabilitymanager.go new file mode 100644 index 0000000..e30b90d --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reachabilitymanager.go @@ -0,0 +1,86 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// reachabilityManager maintains a structure that allows to answer +// reachability queries in sub-linear time +type reachabilityManager struct { + databaseContext model.DBReader + reachabilityDataStore model.ReachabilityDataStore + ghostdagDataStore model.GHOSTDAGDataStore + reindexSlack uint64 + reindexWindow uint64 +} + +// New instantiates a new reachabilityManager +func New( + databaseContext model.DBReader, + ghostdagDataStore model.GHOSTDAGDataStore, + reachabilityDataStore model.ReachabilityDataStore, +) model.ReachabilityManager { + return &reachabilityManager{ + databaseContext: databaseContext, + ghostdagDataStore: ghostdagDataStore, + reachabilityDataStore: reachabilityDataStore, + reindexSlack: defaultReindexSlack, + reindexWindow: defaultReindexWindow, + } +} + +// AddBlock adds the block with the given blockHash into the reachability tree. +func (rt *reachabilityManager) AddBlock(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) error { + // Allocate a new reachability data + newReachabilityData := newReachabilityTreeData() + rt.stageData(stagingArea, blockHash, newReachabilityData) + + ghostdagData, err := rt.ghostdagDataStore.Get(rt.databaseContext, stagingArea, blockHash, false) + if err != nil { + return err + } + + reindexRoot, err := rt.reindexRoot(stagingArea) + if err != nil { + return err + } + + // Insert the node into the selected parent's reachability tree + err = rt.addChild(stagingArea, ghostdagData.SelectedParent(), blockHash, reindexRoot) + if err != nil { + return err + } + + // Add the block to the futureCoveringSets of all the blocks + // in the merget set + mergeSet := make([]*externalapi.DomainHash, len(ghostdagData.MergeSetBlues())+len(ghostdagData.MergeSetReds())) + copy(mergeSet, ghostdagData.MergeSetBlues()) + copy(mergeSet[len(ghostdagData.MergeSetBlues()):], ghostdagData.MergeSetReds()) + + for _, current := range mergeSet { + err = rt.insertToFutureCoveringSet(stagingArea, current, blockHash) + if err != nil { + return err + } + } + + return nil +} + +func (rt *reachabilityManager) Init(stagingArea *model.StagingArea) error { + hasReachabilityData, err := rt.reachabilityDataStore.HasReachabilityData(rt.databaseContext, stagingArea, model.VirtualGenesisBlockHash) + if err != nil { + return err + } + + if hasReachabilityData { + return nil + } + + newReachabilityData := newReachabilityTreeData() + rt.stageData(stagingArea, model.VirtualGenesisBlockHash, newReachabilityData) + rt.stageReindexRoot(stagingArea, model.VirtualGenesisBlockHash) + + return nil +} diff --git a/domain/consensus/processes/reachabilitymanager/reindex_context.go b/domain/consensus/processes/reachabilitymanager/reindex_context.go new file mode 100644 index 0000000..0ced784 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/reindex_context.go @@ -0,0 +1,808 @@ +package reachabilitymanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +var ( + // defaultReindexWindow is the default target window size for reachability + // reindexes. Note that this is not a constant for testing purposes. + defaultReindexWindow uint64 = 200 + + // defaultReindexSlack is default the slack interval given to reachability + // tree nodes not in the selected parent chain. Note that this is not + // a constant for testing purposes. + defaultReindexSlack uint64 = 1 << 12 +) + +// reindexContext is a struct used during reindex operations. It represents a temporary context +// for caching subtree information during the *current* reindex operation only +type reindexContext struct { + manager *reachabilityManager + subTreeSizesCache map[externalapi.DomainHash]uint64 +} + +// newReindexContext creates a new empty reindex context +func newReindexContext(rt *reachabilityManager) reindexContext { + return reindexContext{ + manager: rt, + subTreeSizesCache: make(map[externalapi.DomainHash]uint64), + } +} + +/* + +Core (BFS) algorithms used during reindexing + +*/ + +// countSubtrees counts the size of each subtree under this node, +// and populates the provided subTreeSizeMap with the results. +// It is equivalent to the following recursive implementation: +// +// func (rt *reachabilityManager) countSubtrees(node *model.ReachabilityTreeNode) uint64 { +// subtreeSize := uint64(0) +// for _, child := range node.children { +// subtreeSize += child.countSubtrees() +// } +// return subtreeSize + 1 +// } +// +// However, we are expecting (linearly) deep trees, and so a +// recursive stack-based approach is inefficient and will hit +// recursion limits. Instead, the same logic was implemented +// using a (queue-based) BFS method. At a high level, the +// algorithm uses BFS for reaching all leaves and pushes +// intermediate updates from leaves via parent chains until all +// size information is gathered at the root of the operation +// (i.e. at node). +func (rc *reindexContext) countSubtrees(stagingArea *model.StagingArea, node *externalapi.DomainHash) error { + + if _, ok := rc.subTreeSizesCache[*node]; ok { + return nil + } + + queue := []*externalapi.DomainHash{node} + calculatedChildrenCount := make(map[externalapi.DomainHash]uint64) + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + children, err := rc.manager.children(stagingArea, current) + if err != nil { + return err + } + + if len(children) == 0 { + // We reached a leaf + rc.subTreeSizesCache[*current] = 1 + } else if _, ok := rc.subTreeSizesCache[*current]; !ok { + // We haven't yet calculated the subtree size of + // the current node. Add all its children to the + // queue + queue = append(queue, children...) + continue + } + + // We reached a leaf or a pre-calculated subtree. + // Push information up + for !current.Equal(node) { + current, err = rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + // If the current is now nil, it means that the previous + // `current` was the genesis block -- the only block that + // does not have parents + if current == nil { + break + } + + calculatedChildrenCount[*current]++ + + children, err := rc.manager.children(stagingArea, current) + if err != nil { + return err + } + + if calculatedChildrenCount[*current] != uint64(len(children)) { + // Not all subtrees of the current node are ready + break + } + // All children of `current` have calculated their subtree size. + // Sum them all together and add 1 to get the sub tree size of + // `current`. + childSubtreeSizeSum := uint64(0) + for _, child := range children { + childSubtreeSizeSum += rc.subTreeSizesCache[*child] + } + rc.subTreeSizesCache[*current] = childSubtreeSizeSum + 1 + } + } + + return nil +} + +// propagateInterval propagates the new interval using a BFS traversal. +// Subtree intervals are recursively allocated according to subtree sizes and +// the allocation rule in splitWithExponentialBias. +func (rc *reindexContext) propagateInterval(stagingArea *model.StagingArea, node *externalapi.DomainHash) error { + + // Make sure subtrees are counted before propagating + err := rc.countSubtrees(stagingArea, node) + if err != nil { + return err + } + + queue := []*externalapi.DomainHash{node} + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + + children, err := rc.manager.children(stagingArea, current) + if err != nil { + return err + } + + if len(children) > 0 { + sizes := make([]uint64, len(children)) + for i, child := range children { + sizes[i] = rc.subTreeSizesCache[*child] + } + + interval, err := rc.manager.intervalRangeForChildAllocation(stagingArea, current) + if err != nil { + return err + } + + intervals, err := intervalSplitWithExponentialBias(interval, sizes) + if err != nil { + return err + } + + for i, child := range children { + childInterval := intervals[i] + err = rc.manager.stageInterval(stagingArea, child, childInterval) + if err != nil { + return err + } + queue = append(queue, child) + } + } + } + return nil +} + +/* + +Functions for handling reindex triggered by adding child block + +*/ + +// reindexIntervals traverses the reachability subtree that's +// defined by the new child node and reallocates reachability interval space +// such that another reindexing is unlikely to occur shortly +// thereafter. It does this by traversing down the reachability +// tree until it finds a node with a subtree size that's greater than +// its interval size. See propagateInterval for further details. +func (rc *reindexContext) reindexIntervals(stagingArea *model.StagingArea, newChild, reindexRoot *externalapi.DomainHash) error { + current := newChild + // Search for the first ancestor with sufficient interval space + for { + currentInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + currentIntervalSize := intervalSize(currentInterval) + + err = rc.countSubtrees(stagingArea, current) + if err != nil { + return err + } + + currentSubtreeSize := rc.subTreeSizesCache[*current] + + // Current has sufficient space, break and propagate + if currentIntervalSize >= currentSubtreeSize { + break + } + + parent, err := rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + if parent == nil { + // If we ended up here it means that there are more + // than 2^64 blocks, which shouldn't ever happen. + return errors.Errorf("missing tree " + + "parent during reindexing. Theoretically, this " + + "should only ever happen if there are more " + + "than 2^64 blocks in the DAG.") + } + + if current.Equal(reindexRoot) { + // Reindex root is expected to hold enough capacity as long as there are less + // than ~2^52 blocks in the DAG, which should never happen in our lifetimes + // even if block rate per second is above 100. The calculation follows from the allocation of + // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. + return errors.Errorf("unexpected behavior: reindex root %s is out of capacity"+ + "during reindexing. Theoretically, this "+ + "should only ever happen if there are more "+ + "than ~2^52 blocks in the DAG.", reindexRoot.String()) + } + + isParentStrictAncestorOfRoot, err := rc.manager.isStrictAncestorOf(stagingArea, parent, reindexRoot) + if err != nil { + return err + } + + if isParentStrictAncestorOfRoot { + // In this case parent is guaranteed to have sufficient interval space, + // however we avoid reindexing the entire subtree above parent + // (which includes root and thus majority of blocks mined since) + // and use slacks along the chain up from parent to reindex root. + // Notes: + // 1. we set requiredAllocation=currentSubtreeSize in order to double the + // current interval capacity + // 2. it might be the case that current is the `newChild` itself + return rc.reindexIntervalsEarlierThanRoot(stagingArea, current, reindexRoot, parent, currentSubtreeSize) + } + + current = parent + } + + // Propagate the interval to the subtree + return rc.propagateInterval(stagingArea, current) +} + +// reindexIntervalsEarlierThanRoot implements the reindex algorithm for the case where the +// new child node is not in reindex root's subtree. The function is expected to allocate +// `requiredAllocation` to be added to interval of `allocationNode`. `commonAncestor` is +// expected to be a direct parent of `allocationNode` and an ancestor of `reindexRoot`. +func (rc *reindexContext) reindexIntervalsEarlierThanRoot(stagingArea *model.StagingArea, + allocationNode, reindexRoot, commonAncestor *externalapi.DomainHash, requiredAllocation uint64) error { + + // The chosen child is: + // a. A reachability tree child of `commonAncestor` + // b. A reachability tree ancestor of `reindexRoot` or `reindexRoot` itself + chosenChild, err := rc.manager.FindNextAncestor(stagingArea, reindexRoot, commonAncestor) + if err != nil { + return err + } + + nodeInterval, err := rc.manager.interval(stagingArea, allocationNode) + if err != nil { + return err + } + + chosenInterval, err := rc.manager.interval(stagingArea, chosenChild) + if err != nil { + return err + } + + if nodeInterval.Start < chosenInterval.Start { + // allocationNode is in the subtree before the chosen child + return rc.reclaimIntervalBefore(stagingArea, allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation) + } + + // allocationNode is in the subtree after the chosen child + return rc.reclaimIntervalAfter(stagingArea, allocationNode, commonAncestor, chosenChild, reindexRoot, requiredAllocation) +} + +func (rc *reindexContext) reclaimIntervalBefore(stagingArea *model.StagingArea, + allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error { + + var slackSum uint64 = 0 + var pathLen uint64 = 0 + var pathSlackAlloc uint64 = 0 + + var err error + current := chosenChild + + // Walk up the chain from common ancestor's chosen child towards reindex root + for { + if current.Equal(reindexRoot) { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + + previousInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum + err = rc.manager.stageInterval(stagingArea, current, intervalIncreaseStart(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, current) + if err != nil { + return err + } + + err = rc.offsetSiblingsBefore(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + + // Set the slack for each chain block to be reserved below during the chain walk-down + pathSlackAlloc = rc.manager.reindexSlack + break + } + + slackBeforeCurrent, err := rc.manager.remainingSlackBefore(stagingArea, current) + if err != nil { + return err + } + slackSum += slackBeforeCurrent + + if slackSum >= requiredAllocation { + previousInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + // Set offset to be just enough to satisfy required allocation + offset := slackBeforeCurrent - (slackSum - requiredAllocation) + + err = rc.manager.stageInterval(stagingArea, current, intervalIncreaseStart(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.offsetSiblingsBefore(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + + break + } + + current, err = rc.manager.FindNextAncestor(stagingArea, reindexRoot, current) + if err != nil { + return err + } + + pathLen++ + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current node with an interval that is smaller. + // This is to make room for the required allocation. + for { + current, err = rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + if current.Equal(commonAncestor) { + break + } + + originalInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + slackBeforeCurrent, err := rc.manager.remainingSlackBefore(stagingArea, current) + if err != nil { + return err + } + + offset := slackBeforeCurrent - pathSlackAlloc + err = rc.manager.stageInterval(stagingArea, current, intervalIncreaseStart(originalInterval, offset)) + if err != nil { + return err + } + + err = rc.offsetSiblingsBefore(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + } + + return nil +} + +func (rc *reindexContext) offsetSiblingsBefore(stagingArea *model.StagingArea, + allocationNode, current *externalapi.DomainHash, offset uint64) error { + + parent, err := rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + siblingsBefore, _, err := rc.manager.splitChildren(stagingArea, parent, current) + if err != nil { + return err + } + + // Iterate over the slice in reverse order in order to break if reaching `allocationNode` + for i := len(siblingsBefore) - 1; i >= 0; i-- { + sibling := siblingsBefore[i] + if sibling.Equal(allocationNode) { + // We reached our final destination, allocate `offset` to `allocationNode` by increasing end and break + previousInterval, err := rc.manager.interval(stagingArea, allocationNode) + if err != nil { + return err + } + + err = rc.manager.stageInterval(stagingArea, allocationNode, intervalIncreaseEnd(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, allocationNode) + if err != nil { + return err + } + + break + } + + previousInterval, err := rc.manager.interval(stagingArea, sibling) + if err != nil { + return err + } + + err = rc.manager.stageInterval(stagingArea, sibling, intervalIncrease(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, sibling) + if err != nil { + return err + } + } + + return nil +} + +func (rc *reindexContext) reclaimIntervalAfter(stagingArea *model.StagingArea, + allocationNode, commonAncestor, chosenChild, reindexRoot *externalapi.DomainHash, requiredAllocation uint64) error { + + var slackSum uint64 = 0 + var pathLen uint64 = 0 + var pathSlackAlloc uint64 = 0 + + var err error + current := chosenChild + + // Walk up the chain from common ancestor's chosen child towards reindex root + for { + if current.Equal(reindexRoot) { + // Reached reindex root. In this case, since we reached (the unlimited) root, + // we also re-allocate new slack for the chain we just traversed + + previousInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + offset := requiredAllocation + rc.manager.reindexSlack*pathLen - slackSum + err = rc.manager.stageInterval(stagingArea, current, intervalDecreaseEnd(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, current) + if err != nil { + return err + } + + err = rc.offsetSiblingsAfter(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + + // Set the slack for each chain block to be reserved below during the chain walk-down + pathSlackAlloc = rc.manager.reindexSlack + break + } + + slackAfterCurrent, err := rc.manager.remainingSlackAfter(stagingArea, current) + if err != nil { + return err + } + slackSum += slackAfterCurrent + + if slackSum >= requiredAllocation { + previousInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + // Set offset to be just enough to satisfy required allocation + offset := slackAfterCurrent - (slackSum - requiredAllocation) + + err = rc.manager.stageInterval(stagingArea, current, intervalDecreaseEnd(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.offsetSiblingsAfter(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + + break + } + + current, err = rc.manager.FindNextAncestor(stagingArea, reindexRoot, current) + if err != nil { + return err + } + + pathLen++ + } + + // Go back down the reachability tree towards the common ancestor. + // On every hop we reindex the reachability subtree before the + // current node with an interval that is smaller. + // This is to make room for the required allocation. + for { + current, err = rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + if current.Equal(commonAncestor) { + break + } + + originalInterval, err := rc.manager.interval(stagingArea, current) + if err != nil { + return err + } + + slackAfterCurrent, err := rc.manager.remainingSlackAfter(stagingArea, current) + if err != nil { + return err + } + + offset := slackAfterCurrent - pathSlackAlloc + err = rc.manager.stageInterval(stagingArea, current, intervalDecreaseEnd(originalInterval, offset)) + if err != nil { + return err + } + + err = rc.offsetSiblingsAfter(stagingArea, allocationNode, current, offset) + if err != nil { + return err + } + } + + return nil +} + +func (rc *reindexContext) offsetSiblingsAfter(stagingArea *model.StagingArea, + allocationNode, current *externalapi.DomainHash, offset uint64) error { + + parent, err := rc.manager.parent(stagingArea, current) + if err != nil { + return err + } + + _, siblingsAfter, err := rc.manager.splitChildren(stagingArea, parent, current) + if err != nil { + return err + } + + for _, sibling := range siblingsAfter { + if sibling.Equal(allocationNode) { + // We reached our final destination, allocate `offset` to `allocationNode` by decreasing start and break + previousInterval, err := rc.manager.interval(stagingArea, allocationNode) + if err != nil { + return err + } + + err = rc.manager.stageInterval(stagingArea, allocationNode, intervalDecreaseStart(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, allocationNode) + if err != nil { + return err + } + + break + } + + previousInterval, err := rc.manager.interval(stagingArea, sibling) + if err != nil { + return err + } + + err = rc.manager.stageInterval(stagingArea, sibling, intervalDecrease(previousInterval, offset)) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, sibling) + if err != nil { + return err + } + } + + return nil +} + +/* + +Functions for handling reindex triggered by moving reindex root + +*/ + +func (rc *reindexContext) concentrateInterval(stagingArea *model.StagingArea, + reindexRoot, chosenChild *externalapi.DomainHash, isFinalReindexRoot bool) error { + + siblingsBeforeChosen, siblingsAfterChosen, err := rc.manager.splitChildren(stagingArea, reindexRoot, chosenChild) + if err != nil { + return err + } + + siblingsBeforeSizesSum, err := rc.tightenIntervalsBefore(stagingArea, reindexRoot, siblingsBeforeChosen) + if err != nil { + return err + } + + siblingsAfterSizesSum, err := rc.tightenIntervalsAfter(stagingArea, reindexRoot, siblingsAfterChosen) + if err != nil { + return err + } + + err = rc.expandIntervalToChosen(stagingArea, + reindexRoot, chosenChild, siblingsBeforeSizesSum, siblingsAfterSizesSum, isFinalReindexRoot) + if err != nil { + return err + } + + return nil +} + +func (rc *reindexContext) tightenIntervalsBefore(stagingArea *model.StagingArea, + reindexRoot *externalapi.DomainHash, siblingsBeforeChosen []*externalapi.DomainHash) (sizesSum uint64, err error) { + + siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(stagingArea, siblingsBeforeChosen) + + rootInterval, err := rc.manager.interval(stagingArea, reindexRoot) + if err != nil { + return 0, err + } + + intervalBeforeChosen := newReachabilityInterval( + rootInterval.Start+rc.manager.reindexSlack, + rootInterval.Start+rc.manager.reindexSlack+sizesSum-1, + ) + + err = rc.propagateChildrenIntervals(stagingArea, intervalBeforeChosen, siblingsBeforeChosen, siblingSubtreeSizes) + if err != nil { + return 0, err + } + + return sizesSum, nil +} + +func (rc *reindexContext) tightenIntervalsAfter(stagingArea *model.StagingArea, + reindexRoot *externalapi.DomainHash, siblingsAfterChosen []*externalapi.DomainHash) (sizesSum uint64, err error) { + + siblingSubtreeSizes, sizesSum := rc.countChildrenSubtrees(stagingArea, siblingsAfterChosen) + + rootInterval, err := rc.manager.interval(stagingArea, reindexRoot) + if err != nil { + return 0, err + } + + intervalAfterChosen := newReachabilityInterval( + rootInterval.End-rc.manager.reindexSlack-sizesSum, + rootInterval.End-rc.manager.reindexSlack-1, + ) + + err = rc.propagateChildrenIntervals(stagingArea, intervalAfterChosen, siblingsAfterChosen, siblingSubtreeSizes) + if err != nil { + return 0, err + } + + return sizesSum, nil +} + +func (rc *reindexContext) expandIntervalToChosen(stagingArea *model.StagingArea, + reindexRoot, chosenChild *externalapi.DomainHash, sizesSumBefore, sizesSumAfter uint64, isFinalReindexRoot bool) error { + + rootInterval, err := rc.manager.interval(stagingArea, reindexRoot) + if err != nil { + return err + } + + newChosenInterval := newReachabilityInterval( + rootInterval.Start+sizesSumBefore+rc.manager.reindexSlack, + rootInterval.End-sizesSumAfter-rc.manager.reindexSlack-1, + ) + + currentChosenInterval, err := rc.manager.interval(stagingArea, chosenChild) + if err != nil { + return err + } + + // Propagate interval only if chosenChild is the final reindex root + if isFinalReindexRoot && !intervalContains(newChosenInterval, currentChosenInterval) { + // New interval doesn't contain the previous one, propagation is required + + // We assign slack on both sides as an optimization. Were we to + // assign a tight interval, the next time the reindex root moves we + // would need to propagate intervals again. That is to say, when we + // do allocate slack, next time + // expandIntervalToChosen is called (next time the + // reindex root moves), newChosenInterval is likely to + // contain currentChosenInterval. + err := rc.manager.stageInterval(stagingArea, chosenChild, newReachabilityInterval( + newChosenInterval.Start+rc.manager.reindexSlack, + newChosenInterval.End-rc.manager.reindexSlack, + )) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, chosenChild) + if err != nil { + return err + } + } + + err = rc.manager.stageInterval(stagingArea, chosenChild, newChosenInterval) + if err != nil { + return err + } + + return nil +} + +func (rc *reindexContext) countChildrenSubtrees(stagingArea *model.StagingArea, children []*externalapi.DomainHash) ( + sizes []uint64, sum uint64) { + + sizes = make([]uint64, len(children)) + sum = 0 + for i, node := range children { + err := rc.countSubtrees(stagingArea, node) + if err != nil { + return nil, 0 + } + + subtreeSize := rc.subTreeSizesCache[*node] + sizes[i] = subtreeSize + sum += subtreeSize + } + return sizes, sum +} + +func (rc *reindexContext) propagateChildrenIntervals(stagingArea *model.StagingArea, + interval *model.ReachabilityInterval, children []*externalapi.DomainHash, sizes []uint64) error { + + childIntervals, err := intervalSplitExact(interval, sizes) + if err != nil { + return err + } + + for i, child := range children { + childInterval := childIntervals[i] + err := rc.manager.stageInterval(stagingArea, child, childInterval) + if err != nil { + return err + } + + err = rc.propagateInterval(stagingArea, child) + if err != nil { + return err + } + } + + return nil +} diff --git a/domain/consensus/processes/reachabilitymanager/stage.go b/domain/consensus/processes/reachabilitymanager/stage.go new file mode 100644 index 0000000..d55d487 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/stage.go @@ -0,0 +1,60 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (rt *reachabilityManager) stageData(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, data model.ReachabilityData) { + rt.reachabilityDataStore.StageReachabilityData(stagingArea, blockHash, data) +} + +func (rt *reachabilityManager) stageFutureCoveringSet(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, set model.FutureCoveringTreeNodeSet) error { + data, err := rt.reachabilityDataForInsertion(stagingArea, blockHash) + if err != nil { + return err + } + + data.SetFutureCoveringSet(set) + + rt.reachabilityDataStore.StageReachabilityData(stagingArea, blockHash, data) + return nil +} + +func (rt *reachabilityManager) stageReindexRoot(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) { + rt.reachabilityDataStore.StageReachabilityReindexRoot(stagingArea, blockHash) +} + +func (rt *reachabilityManager) stageAddChild(stagingArea *model.StagingArea, node, child *externalapi.DomainHash) error { + nodeData, err := rt.reachabilityDataForInsertion(stagingArea, node) + if err != nil { + return err + } + + nodeData.AddChild(child) + rt.stageData(stagingArea, node, nodeData) + + return nil +} + +func (rt *reachabilityManager) stageParent(stagingArea *model.StagingArea, node, parent *externalapi.DomainHash) error { + nodeData, err := rt.reachabilityDataForInsertion(stagingArea, node) + if err != nil { + return err + } + nodeData.SetParent(parent) + rt.stageData(stagingArea, node, nodeData) + + return nil +} + +func (rt *reachabilityManager) stageInterval(stagingArea *model.StagingArea, node *externalapi.DomainHash, interval *model.ReachabilityInterval) error { + nodeData, err := rt.reachabilityDataForInsertion(stagingArea, node) + if err != nil { + return err + } + nodeData.SetInterval(interval) + rt.stageData(stagingArea, node, nodeData) + + return nil +} diff --git a/domain/consensus/processes/reachabilitymanager/test_reachabilitymanager.go b/domain/consensus/processes/reachabilitymanager/test_reachabilitymanager.go new file mode 100644 index 0000000..89ca913 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/test_reachabilitymanager.go @@ -0,0 +1,40 @@ +package reachabilitymanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" +) + +type testReachabilityManager struct { + *reachabilityManager +} + +func (t *testReachabilityManager) ReachabilityReindexSlack() uint64 { + return t.reachabilityManager.reindexSlack +} + +func (t *testReachabilityManager) SetReachabilityReindexSlack(reindexSlack uint64) { + t.reachabilityManager.reindexSlack = reindexSlack +} + +func (t *testReachabilityManager) SetReachabilityReindexWindow(reindexWindow uint64) { + t.reachabilityManager.reindexWindow = reindexWindow +} + +func (t *testReachabilityManager) ValidateIntervals(root *externalapi.DomainHash) error { + stagingArea := model.NewStagingArea() + + return t.reachabilityManager.validateIntervals(stagingArea, root) +} + +func (t *testReachabilityManager) GetAllNodes(root *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + stagingArea := model.NewStagingArea() + + return t.reachabilityManager.getAllNodes(stagingArea, root) +} + +// NewTestReachabilityManager creates an instance of a TestReachabilityManager +func NewTestReachabilityManager(manager model.ReachabilityManager) testapi.TestReachabilityManager { + return &testReachabilityManager{reachabilityManager: manager.(*reachabilityManager)} +} diff --git a/domain/consensus/processes/reachabilitymanager/tree.go b/domain/consensus/processes/reachabilitymanager/tree.go new file mode 100644 index 0000000..acd4836 --- /dev/null +++ b/domain/consensus/processes/reachabilitymanager/tree.go @@ -0,0 +1,550 @@ +package reachabilitymanager + +import ( + "math" + "strings" + "time" + + "github.com/spectre-project/spectred/domain/consensus/utils/reachabilitydata" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/pkg/errors" +) + +func newReachabilityTreeData() model.ReachabilityData { + // Please see the comment above model.ReachabilityTreeNode to understand why + // we use these initial values. + interval := newReachabilityInterval(1, math.MaxUint64-1) + data := reachabilitydata.EmptyReachabilityData() + data.SetInterval(interval) + + return data +} + +/* + +Interval helper functions + +*/ + +func (rt *reachabilityManager) intervalRangeForChildAllocation(stagingArea *model.StagingArea, + node *externalapi.DomainHash) (*model.ReachabilityInterval, error) { + + interval, err := rt.interval(stagingArea, node) + if err != nil { + return nil, err + } + + // We subtract 1 from the end of the range to prevent the node from allocating + // the entire interval to its child, so its interval would *strictly* contain the interval of its child. + return newReachabilityInterval(interval.Start, interval.End-1), nil +} + +func (rt *reachabilityManager) remainingIntervalBefore(stagingArea *model.StagingArea, node *externalapi.DomainHash) (*model.ReachabilityInterval, error) { + childrenRange, err := rt.intervalRangeForChildAllocation(stagingArea, node) + if err != nil { + return nil, err + } + + children, err := rt.children(stagingArea, node) + if err != nil { + return nil, err + } + + if len(children) == 0 { + return childrenRange, nil + } + + firstChildInterval, err := rt.interval(stagingArea, children[0]) + if err != nil { + return nil, err + } + + return newReachabilityInterval(childrenRange.Start, firstChildInterval.Start-1), nil +} + +func (rt *reachabilityManager) remainingIntervalAfter(stagingArea *model.StagingArea, node *externalapi.DomainHash) (*model.ReachabilityInterval, error) { + childrenRange, err := rt.intervalRangeForChildAllocation(stagingArea, node) + if err != nil { + return nil, err + } + + children, err := rt.children(stagingArea, node) + if err != nil { + return nil, err + } + + if len(children) == 0 { + return childrenRange, nil + } + + lastChildInterval, err := rt.interval(stagingArea, children[len(children)-1]) + if err != nil { + return nil, err + } + + return newReachabilityInterval(lastChildInterval.End+1, childrenRange.End), nil +} + +func (rt *reachabilityManager) remainingSlackBefore(stagingArea *model.StagingArea, node *externalapi.DomainHash) (uint64, error) { + interval, err := rt.remainingIntervalBefore(stagingArea, node) + if err != nil { + return 0, err + } + + return intervalSize(interval), nil +} + +func (rt *reachabilityManager) remainingSlackAfter(stagingArea *model.StagingArea, node *externalapi.DomainHash) (uint64, error) { + interval, err := rt.remainingIntervalAfter(stagingArea, node) + if err != nil { + return 0, err + } + + return intervalSize(interval), nil +} + +func (rt *reachabilityManager) hasSlackIntervalBefore(stagingArea *model.StagingArea, node *externalapi.DomainHash) (bool, error) { + interval, err := rt.remainingIntervalBefore(stagingArea, node) + if err != nil { + return false, err + } + + return intervalSize(interval) > 0, nil +} + +func (rt *reachabilityManager) hasSlackIntervalAfter(stagingArea *model.StagingArea, node *externalapi.DomainHash) (bool, error) { + interval, err := rt.remainingIntervalAfter(stagingArea, node) + if err != nil { + return false, err + } + + return intervalSize(interval) > 0, nil +} + +/* + +ReachabilityManager API functions + +*/ + +// IsReachabilityTreeAncestorOf checks if this node is a reachability tree ancestor +// of the other node. Note that we use the graph theory convention +// here which defines that node is also an ancestor of itself. +func (rt *reachabilityManager) IsReachabilityTreeAncestorOf(stagingArea *model.StagingArea, node, other *externalapi.DomainHash) (bool, error) { + nodeInterval, err := rt.interval(stagingArea, node) + if err != nil { + return false, err + } + + otherInterval, err := rt.interval(stagingArea, other) + if err != nil { + return false, err + } + + return intervalContains(nodeInterval, otherInterval), nil +} + +// FindNextAncestor finds the reachability tree child +// of 'ancestor' which is also an ancestor of 'descendant'. +func (rt *reachabilityManager) FindNextAncestor(stagingArea *model.StagingArea, + descendant, ancestor *externalapi.DomainHash) (*externalapi.DomainHash, error) { + + if ancestor.Equal(descendant) { + return nil, errors.Errorf("ancestor is equal to descendant") + } + + childrenOfAncestor, err := rt.children(stagingArea, ancestor) + if err != nil { + return nil, err + } + + nextAncestor, ok := rt.findAncestorOfNode(stagingArea, childrenOfAncestor, descendant) + if !ok { + return nil, errors.Errorf("ancestor is not an ancestor of descendant") + } + + return nextAncestor, nil +} + +// String returns a string representation of a reachability tree node +// and its children. +func (rt *reachabilityManager) String(stagingArea *model.StagingArea, node *externalapi.DomainHash) (string, error) { + queue := []*externalapi.DomainHash{node} + nodeInterval, err := rt.interval(stagingArea, node) + if err != nil { + return "", err + } + + lines := []string{nodeInterval.String()} + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + children, err := rt.children(stagingArea, current) + if err != nil { + return "", err + } + + if len(children) == 0 { + continue + } + + line := "" + for _, child := range children { + childInterval, err := rt.interval(stagingArea, child) + if err != nil { + return "", err + } + + line += childInterval.String() + queue = append(queue, child) + } + lines = append([]string{line}, lines...) + } + return strings.Join(lines, "\n"), nil +} + +/* + +Tree helper functions + +*/ + +func (rt *reachabilityManager) isStrictAncestorOf(stagingArea *model.StagingArea, node, other *externalapi.DomainHash) ( + bool, error) { + + if node.Equal(other) { + return false, nil + } + + return rt.IsReachabilityTreeAncestorOf(stagingArea, node, other) +} + +// findCommonAncestor finds the most recent reachability +// tree ancestor common to both node and the given reindex root. Note +// that we assume that almost always the chain between the reindex root +// and the common ancestor is longer than the chain between node and the +// common ancestor. +func (rt *reachabilityManager) findCommonAncestor(stagingArea *model.StagingArea, + node, root *externalapi.DomainHash) (*externalapi.DomainHash, error) { + + current := node + for { + isAncestorOf, err := rt.IsReachabilityTreeAncestorOf(stagingArea, current, root) + if err != nil { + return nil, err + } + + if isAncestorOf { + return current, nil + } + + current, err = rt.parent(stagingArea, current) + if err != nil { + return nil, err + } + } +} + +// splitChildren splits `node` children into two slices: the nodes that are before +// `pivot` and the nodes that are after. +func (rt *reachabilityManager) splitChildren(stagingArea *model.StagingArea, node, pivot *externalapi.DomainHash) ( + nodesBeforePivot, nodesAfterPivot []*externalapi.DomainHash, err error) { + + children, err := rt.children(stagingArea, node) + if err != nil { + return nil, nil, err + } + + for i, child := range children { + if child.Equal(pivot) { + return children[:i], children[i+1:], nil + } + } + return nil, nil, errors.Errorf("pivot not a pivot of node") +} + +/* + +Internal reachabilityManager API + +*/ + +// addChild adds child to this tree node. If this node has no +// remaining interval to allocate, a reindexing is triggered. When a reindexing +// is triggered, the reindex root point is used within the +// reindex algorithm's logic +func (rt *reachabilityManager) addChild(stagingArea *model.StagingArea, node, child, reindexRoot *externalapi.DomainHash) error { + remaining, err := rt.remainingIntervalAfter(stagingArea, node) + if err != nil { + return err + } + + // Set the parent-child relationship + err = rt.stageAddChild(stagingArea, node, child) + if err != nil { + return err + } + + err = rt.stageParent(stagingArea, child, node) + if err != nil { + return err + } + + // No allocation space left at parent -- reindex + if intervalSize(remaining) == 0 { + + // Initially set the child's interval to the empty remaining interval. + // This is done since in some cases, the underlying algorithm will + // allocate space around this point and call intervalIncreaseEnd or + // intervalDecreaseStart making for intervalSize > 0 + err = rt.stageInterval(stagingArea, child, remaining) + if err != nil { + return err + } + + rc := newReindexContext(rt) + + reindexStartTime := time.Now() + err := rc.reindexIntervals(stagingArea, child, reindexRoot) + if err != nil { + return err + } + + reindexTimeElapsed := time.Since(reindexStartTime) + log.Tracef("Reachability reindex triggered for "+ + "block %s. Took %dms.", + node, reindexTimeElapsed.Milliseconds()) + + return nil + } + + // Allocate from the remaining space + allocated, _, err := intervalSplitInHalf(remaining) + if err != nil { + return err + } + + return rt.stageInterval(stagingArea, child, allocated) +} + +func (rt *reachabilityManager) updateReindexRoot(stagingArea *model.StagingArea, + selectedTip *externalapi.DomainHash) error { + + currentReindexRoot, err := rt.reindexRoot(stagingArea) + if err != nil { + return err + } + + // First, find the new root + reindexRootAncestor, newReindexRoot, err := rt.findNextReindexRoot(stagingArea, currentReindexRoot, selectedTip) + if err != nil { + return err + } + + // No update to root, return + if currentReindexRoot.Equal(newReindexRoot) { + return nil + } + + rc := newReindexContext(rt) + + if !newReindexRoot.Equal(reindexRootAncestor) { + log.Tracef("Concentrating the intervals towards the new reindex root") + // Iterate from reindexRootAncestor towards newReindexRoot + for { + chosenChild, err := rt.FindNextAncestor(stagingArea, selectedTip, reindexRootAncestor) + if err != nil { + return err + } + + isFinalReindexRoot := chosenChild.Equal(newReindexRoot) + + // Concentrate interval from current ancestor to its chosen child + err = rc.concentrateInterval(stagingArea, reindexRootAncestor, chosenChild, isFinalReindexRoot) + if err != nil { + return err + } + + if isFinalReindexRoot { + break + } + + reindexRootAncestor = chosenChild + } + } else { + log.Tracef("newReindexRoot is the same as reindexRootAncestor. Skipping concentration...") + } + + // Update reindex root data store + rt.stageReindexRoot(stagingArea, newReindexRoot) + log.Tracef("Updated the reindex root to %s", newReindexRoot) + return nil +} + +// findNextReindexRoot finds the new reindex root based on the current one and the new selected tip. +// The function also returns the common ancestor between the current and new reindex roots (possibly current root itself). +// This ancestor should be used as a starting point for concentrating the interval towards the new root. +func (rt *reachabilityManager) findNextReindexRoot(stagingArea *model.StagingArea, currentReindexRoot, + selectedTip *externalapi.DomainHash) (reindexRootAncestor, newReindexRoot *externalapi.DomainHash, err error) { + + reindexRootAncestor = currentReindexRoot + newReindexRoot = currentReindexRoot + + selectedTipGHOSTDAGData, err := rt.ghostdagDataStore.Get(rt.databaseContext, stagingArea, selectedTip, false) + if err != nil { + return nil, nil, err + } + + isCurrentAncestorOfTip, err := rt.IsReachabilityTreeAncestorOf(stagingArea, currentReindexRoot, selectedTip) + if err != nil { + return nil, nil, err + } + + // Test if current root is ancestor of selected tip - if not, this is a reorg case + if !isCurrentAncestorOfTip { + currentRootGHOSTDAGData, err := rt.ghostdagDataStore.Get(rt.databaseContext, stagingArea, currentReindexRoot, false) + if err != nil { + return nil, nil, err + } + + // We have reindex root out of selected tip chain, however we switch chains only after a sufficient + // threshold of reindexSlack score in order to address possible alternating reorg attacks. + // The reindexSlack constant is used as an heuristic for a large enough constant on the one hand, but + // one which will not harm performance on the other hand - given the available slack at the chain split point. + // + // Note: In some cases the blue score selected tip can be lower than the current reindex root blue score. + // If that's the case we keep the reindex root unchanged. + if selectedTipGHOSTDAGData.BlueScore() < currentRootGHOSTDAGData.BlueScore() || + selectedTipGHOSTDAGData.BlueScore()-currentRootGHOSTDAGData.BlueScore() < rt.reindexSlack { + // Return current - this indicates no change + return currentReindexRoot, currentReindexRoot, nil + } + + // The common ancestor is where we should start concentrating the interval from + commonAncestor, err := rt.findCommonAncestor(stagingArea, selectedTip, currentReindexRoot) + if err != nil { + return nil, nil, err + } + + reindexRootAncestor = commonAncestor + newReindexRoot = commonAncestor + } + + // Iterate from ancestor towards selected tip until passing the reindexWindow threshold, + // for finding the new reindex root + for { + chosenChild, err := rt.FindNextAncestor(stagingArea, selectedTip, newReindexRoot) + if err != nil { + return nil, nil, err + } + + chosenChildGHOSTDAGData, err := rt.ghostdagDataStore.Get(rt.databaseContext, stagingArea, chosenChild, false) + if err != nil { + return nil, nil, err + } + + if selectedTipGHOSTDAGData.BlueScore() < chosenChildGHOSTDAGData.BlueScore() { + return nil, nil, errors.Errorf("chosen child %s has blue score greater "+ + "than %s although it's in its selected parent chain", chosenChild, selectedTip) + } + + if selectedTipGHOSTDAGData.BlueScore()-chosenChildGHOSTDAGData.BlueScore() < rt.reindexWindow { + break + } + + newReindexRoot = chosenChild + } + + return reindexRootAncestor, newReindexRoot, nil +} + +/* + +Test helper functions + +*/ + +// Helper function (for testing purposes) to validate that all tree intervals +// under a specified subtree root are allocated correctly and as expected +func (rt *reachabilityManager) validateIntervals(stagingArea *model.StagingArea, root *externalapi.DomainHash) error { + queue := []*externalapi.DomainHash{root} + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + + children, err := rt.children(stagingArea, current) + if err != nil { + return err + } + + if len(children) > 0 { + queue = append(queue, children...) + } + + currentInterval, err := rt.interval(stagingArea, current) + if err != nil { + return err + } + + if currentInterval.Start > currentInterval.End { + err := errors.Errorf("Interval allocation is empty") + return err + } + + for i, child := range children { + childInterval, err := rt.interval(stagingArea, child) + if err != nil { + return err + } + + if i > 0 { + siblingInterval, err := rt.interval(stagingArea, children[i-1]) + if err != nil { + return err + } + + if siblingInterval.End+1 != childInterval.Start { + err := errors.Errorf("Child intervals are expected be right after each other") + return err + } + } + + if childInterval.Start < currentInterval.Start { + err := errors.Errorf("Child interval to the left of parent") + return err + } + + if childInterval.End >= currentInterval.End { + err := errors.Errorf("Child interval to the right of parent") + return err + } + } + } + + return nil +} + +// Helper function (for testing purposes) to get all nodes under a specified subtree root +func (rt *reachabilityManager) getAllNodes(stagingArea *model.StagingArea, root *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + queue := []*externalapi.DomainHash{root} + nodes := []*externalapi.DomainHash{root} + for len(queue) > 0 { + var current *externalapi.DomainHash + current, queue = queue[0], queue[1:] + + children, err := rt.children(stagingArea, current) + if err != nil { + return nil, err + } + + if len(children) > 0 { + queue = append(queue, children...) + nodes = append(nodes, children...) + } + } + + return nodes, nil +} diff --git a/domain/consensus/processes/syncmanager/antipast.go b/domain/consensus/processes/syncmanager/antipast.go new file mode 100644 index 0000000..de85dad --- /dev/null +++ b/domain/consensus/processes/syncmanager/antipast.go @@ -0,0 +1,195 @@ +package syncmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// antiPastHashesBetween returns the hashes of the blocks between the +// lowHash's antiPast and highHash's antiPast, or up to `maxBlocks`, if non-zero. +// The result excludes lowHash and includes highHash. If lowHash == highHash, returns nothing. +// If maxBlocks != 0 then maxBlocks MUST be >= MergeSetSizeLimit + 1 +// because it returns blocks with MergeSet granularity, +// so if MergeSet > maxBlocks, function will return nothing +func (sm *syncManager) antiPastHashesBetween(stagingArea *model.StagingArea, lowHash, highHash *externalapi.DomainHash, + maxBlocks uint64) (hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) { + + // Sanity check, for debugging only + if maxBlocks != 0 && maxBlocks < sm.mergeSetSizeLimit+1 { + return nil, nil, + errors.Errorf("maxBlocks (%d) MUST be >= MergeSetSizeLimit + 1 (%d)", maxBlocks, sm.mergeSetSizeLimit+1) + } + + // If lowHash is not in the selectedParentChain of highHash - SelectedChildIterator will fail. + // Therefore, we traverse down lowHash's selectedParentChain until we reach a block that is in + // highHash's selectedParentChain. + // We keep originalLowHash to filter out blocks in it's past later down the road + originalLowHash := lowHash + lowHash, err = sm.findLowHashInHighHashSelectedParentChain(stagingArea, lowHash, highHash) + if err != nil { + return nil, nil, err + } + + lowBlockGHOSTDAGData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, lowHash, false) + if err != nil { + return nil, nil, err + } + highBlockGHOSTDAGData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, highHash, false) + if err != nil { + return nil, nil, err + } + if lowBlockGHOSTDAGData.BlueScore() > highBlockGHOSTDAGData.BlueScore() { + return nil, nil, errors.Errorf("low hash blueScore > high hash blueScore (%d > %d)", + lowBlockGHOSTDAGData.BlueScore(), highBlockGHOSTDAGData.BlueScore()) + } + + // Collect all hashes by concatenating the merge-sets of all blocks between highHash and lowHash + blockHashes := []*externalapi.DomainHash{} + iterator, err := sm.dagTraversalManager.SelectedChildIterator(stagingArea, highHash, lowHash, false) + if err != nil { + return nil, nil, err + } + defer iterator.Close() + for ok := iterator.First(); ok; ok = iterator.Next() { + current, err := iterator.Get() + if err != nil { + return nil, nil, err + } + // Both blue and red merge sets are topologically sorted, but not the concatenation of the two. + // We require the blocks to be topologically sorted. In addition, for optimal performance, + // we want the selectedParent to be first. + // Since the rest of the merge set is in the anticone of selectedParent, it's position in the list does not + // matter, even though it's blue score is the highest, we can arbitrarily decide it comes first. + // Therefore we first append the selectedParent, then the rest of blocks in ghostdag order. + sortedMergeSet, err := sm.ghostdagManager.GetSortedMergeSet(stagingArea, current) + if err != nil { + return nil, nil, err + } + + if maxBlocks != 0 && uint64(len(blockHashes)+len(sortedMergeSet)) > maxBlocks { + break + } + + highHash = current + + // append to blockHashes all blocks in sortedMergeSet which are not in the past of originalLowHash + for _, blockHash := range sortedMergeSet { + isInPastOfOriginalLowHash, err := sm.dagTopologyManager.IsAncestorOf(stagingArea, blockHash, originalLowHash) + if err != nil { + return nil, nil, err + } + if isInPastOfOriginalLowHash { + continue + } + blockHashes = append(blockHashes, blockHash) + } + } + + // The process above doesn't return highHash, so include it explicitly, unless highHash == lowHash + if !lowHash.Equal(highHash) { + blockHashes = append(blockHashes, highHash) + } + + return blockHashes, highHash, nil +} + +func (sm *syncManager) findLowHashInHighHashSelectedParentChain(stagingArea *model.StagingArea, + lowHash *externalapi.DomainHash, highHash *externalapi.DomainHash) (*externalapi.DomainHash, error) { + for { + isInSelectedParentChain, err := sm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, lowHash, highHash) + if err != nil { + return nil, err + } + if isInSelectedParentChain { + break + } + lowBlockGHOSTDAGData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, lowHash, false) + if err != nil { + return nil, err + } + lowHash = lowBlockGHOSTDAGData.SelectedParent() + } + return lowHash, nil +} + +func (sm *syncManager) missingBlockBodyHashes(stagingArea *model.StagingArea, highHash *externalapi.DomainHash) ( + []*externalapi.DomainHash, error) { + + pruningPoint, err := sm.pruningStore.PruningPoint(sm.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + selectedChildIterator, err := sm.dagTraversalManager.SelectedChildIterator(stagingArea, highHash, pruningPoint, false) + if err != nil { + return nil, err + } + defer selectedChildIterator.Close() + + lowHash := pruningPoint + foundHeaderOnlyBlock := false + for ok := selectedChildIterator.First(); ok; ok = selectedChildIterator.Next() { + selectedChild, err := selectedChildIterator.Get() + if err != nil { + return nil, err + } + blockStatus, err := sm.blockStatusStore.Get(sm.databaseContext, stagingArea, selectedChild) + if err != nil { + return nil, err + } + if blockStatus == externalapi.StatusHeaderOnly { + foundHeaderOnlyBlock = true + break + } + lowHash = selectedChild + } + if !foundHeaderOnlyBlock { + if lowHash.Equal(highHash) { + // Blocks can be inserted inside the DAG during IBD if those were requested before IBD started. + // In rare cases, all the IBD blocks might be already inserted by the time we reach this point. + // In these cases - return an empty list of blocks to sync + return []*externalapi.DomainHash{}, nil + } + // TODO: Once block children are fixed (https://github.com/spectre-project/spectred/issues/1499), + // this error should be returned rather the logged + log.Errorf("No header-only blocks between %s and %s", + lowHash, highHash) + } + + hashesBetween, _, err := sm.antiPastHashesBetween(stagingArea, lowHash, highHash, 0) + if err != nil { + return nil, err + } + + missingBlocks := make([]*externalapi.DomainHash, 0, len(hashesBetween)) + for _, blockHash := range hashesBetween { + blockStatus, err := sm.blockStatusStore.Get(sm.databaseContext, stagingArea, blockHash) + if err != nil { + return nil, err + } + if blockStatus == externalapi.StatusHeaderOnly { + missingBlocks = append(missingBlocks, blockHash) + } + } + + return missingBlocks, nil +} + +func (sm *syncManager) isHeaderOnlyBlock(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash) (bool, error) { + exists, err := sm.blockStatusStore.Exists(sm.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + if !exists { + return false, nil + } + + status, err := sm.blockStatusStore.Get(sm.databaseContext, stagingArea, blockHash) + if err != nil { + return false, err + } + + return status == externalapi.StatusHeaderOnly, nil +} diff --git a/domain/consensus/processes/syncmanager/blocklocator.go b/domain/consensus/processes/syncmanager/blocklocator.go new file mode 100644 index 0000000..a2bcdd6 --- /dev/null +++ b/domain/consensus/processes/syncmanager/blocklocator.go @@ -0,0 +1,122 @@ +package syncmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// createBlockLocator creates a block locator for the passed high and low hashes. +// See the BlockLocator type comments for more details. +func (sm *syncManager) createBlockLocator(stagingArea *model.StagingArea, lowHash, highHash *externalapi.DomainHash, + limit uint32) (externalapi.BlockLocator, error) { + + lowBlockGHOSTDAGData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, lowHash, false) + if err != nil { + return nil, err + } + lowBlockBlueScore := lowBlockGHOSTDAGData.BlueScore() + + currentHash := highHash + step := uint64(1) + locator := make(externalapi.BlockLocator, 0) + // The loop will break if we reached the limit or if we got to lowHash. + for { + locator = append(locator, currentHash) + + // Stop if we've reached the limit (if it's set) + if limit > 0 && uint32(len(locator)) == limit { + break + } + + currentBlockGHOSTDAGData, err := sm.ghostdagDataStore.Get(sm.databaseContext, stagingArea, currentHash, false) + if err != nil { + return nil, err + } + currentBlockBlueScore := currentBlockGHOSTDAGData.BlueScore() + + // Nothing more to add once the low node has been added. + if currentBlockBlueScore <= lowBlockBlueScore { + isCurrentHashInSelectedParentChainOfLowHash, err := + sm.dagTopologyManager.IsInSelectedParentChainOf(stagingArea, currentHash, lowHash) + if err != nil { + return nil, err + } + if !isCurrentHashInSelectedParentChainOfLowHash { + return nil, errors.Errorf("highHash and lowHash are " + + "not in the same selected parent chain.") + } + break + } + + // Calculate blueScore of previous node to include ensuring the + // final node is lowNode. + nextBlueScore := currentBlockBlueScore - step + if currentBlockBlueScore < step || nextBlueScore < lowBlockGHOSTDAGData.BlueScore() { + nextBlueScore = lowBlockGHOSTDAGData.BlueScore() + } + + // Walk down currentHash's selected parent chain to the appropriate ancestor + currentHash, err = sm.dagTraversalManager.LowestChainBlockAboveOrEqualToBlueScore(stagingArea, currentHash, nextBlueScore) + if err != nil { + return nil, err + } + + // Double the distance between included hashes + step *= 2 + } + + return locator, nil +} + +func (sm *syncManager) createHeadersSelectedChainBlockLocator(stagingArea *model.StagingArea, + lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) { + + if highHash.Equal(sm.genesisBlockHash) && lowHash.Equal(sm.genesisBlockHash) { + return externalapi.BlockLocator{sm.genesisBlockHash}, nil + } + + lowHashIndex, err := sm.headersSelectedChainStore.GetIndexByHash(sm.databaseContext, stagingArea, lowHash) + if err != nil { + if database.IsNotFoundError(err) { + return nil, errors.Wrapf(model.ErrBlockNotInSelectedParentChain, + "LowHash %s is not in selected parent chain", lowHash) + } + return nil, err + } + + highHashIndex, err := sm.headersSelectedChainStore.GetIndexByHash(sm.databaseContext, stagingArea, highHash) + if err != nil { + if database.IsNotFoundError(err) { + return nil, errors.Wrapf(model.ErrBlockNotInSelectedParentChain, + "LowHash %s is not in selected parent chain", lowHash) + } + return nil, err + } + + if highHashIndex < lowHashIndex { + return nil, errors.Errorf("cannot build block locator while highHash is lower than lowHash") + } + + locator := externalapi.BlockLocator{} + currentIndex := highHashIndex + step := uint64(1) + for currentIndex > lowHashIndex { + blockHash, err := sm.headersSelectedChainStore.GetHashByIndex(sm.databaseContext, stagingArea, currentIndex) + if err != nil { + return nil, err + } + + locator = append(locator, blockHash) + if currentIndex < step { + break + } + + currentIndex -= step + step *= 2 + } + + locator = append(locator, lowHash) + return locator, nil +} diff --git a/domain/consensus/processes/syncmanager/blocklocator_test.go b/domain/consensus/processes/syncmanager/blocklocator_test.go new file mode 100644 index 0000000..7dafc57 --- /dev/null +++ b/domain/consensus/processes/syncmanager/blocklocator_test.go @@ -0,0 +1,228 @@ +package syncmanager_test + +import ( + "strings" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func TestCreateBlockLocator(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, + "TestCreateBlockLocator") + if err != nil { + t.Fatalf("NewTestConsensus: %+v", err) + } + defer tearDown(false) + + stagingArea := model.NewStagingArea() + + chain := []*externalapi.DomainHash{consensusConfig.GenesisHash} + tipHash := consensusConfig.GenesisHash + for i := 0; i < 20; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + } + + sideChainTipHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Check a situation where low hash is not on the exact step blue score + locator, err := tc.SyncManager().CreateBlockLocator(stagingArea, consensusConfig.GenesisHash, tipHash, 0) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[20], + chain[19], + chain[17], + chain[13], + chain[5], + chain[0], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a situation where low hash is on the exact step blue score + locator, err = tc.SyncManager().CreateBlockLocator(stagingArea, chain[5], tipHash, 0) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[20], + chain[19], + chain[17], + chain[13], + chain[5], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check block locator with limit + locator, err = tc.SyncManager().CreateBlockLocator(stagingArea, consensusConfig.GenesisHash, tipHash, 3) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[20], + chain[19], + chain[17], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a block locator from genesis to genesis + locator, err = tc.SyncManager().CreateBlockLocator(stagingArea, consensusConfig.GenesisHash, consensusConfig.GenesisHash, 0) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + consensusConfig.GenesisHash, + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a block locator from one block to the same block + locator, err = tc.SyncManager().CreateBlockLocator(stagingArea, chain[7], chain[7], 0) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[7], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check block locator with incompatible blocks + _, err = tc.SyncManager().CreateBlockLocator(stagingArea, sideChainTipHash, tipHash, 0) + expectedErr := "highHash and lowHash are not in the same selected parent chain" + if err == nil || !strings.Contains(err.Error(), expectedErr) { + t.Fatalf("expected error '%s' but got '%s'", expectedErr, err) + } + + // Check block locator with non exist blocks + _, err = tc.CreateBlockLocatorFromPruningPoint(&externalapi.DomainHash{}, 0) + expectedErr = "does not exist" + if err == nil || !strings.Contains(err.Error(), expectedErr) { + t.Fatalf("expected error '%s' but got '%s'", expectedErr, err) + } + }) +} + +func TestCreateHeadersSelectedChainBlockLocator(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, + "TestCreateHeadersSelectedChainBlockLocator") + if err != nil { + t.Fatalf("NewTestConsensus: %+v", err) + } + defer tearDown(false) + + chain := []*externalapi.DomainHash{consensusConfig.GenesisHash} + tipHash := consensusConfig.GenesisHash + for i := 0; i < 20; i++ { + var err error + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + chain = append(chain, tipHash) + } + + sideChainTipHash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + // Check a situation where low hash is not on the exact step + locator, err := tc.CreateHeadersSelectedChainBlockLocator(consensusConfig.GenesisHash, tipHash) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[20], + chain[19], + chain[17], + chain[13], + chain[5], + chain[0], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a situation where low hash is on the exact step + locator, err = tc.CreateHeadersSelectedChainBlockLocator(chain[5], tipHash) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[20], + chain[19], + chain[17], + chain[13], + chain[5], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a block locator from genesis to genesis + locator, err = tc.CreateHeadersSelectedChainBlockLocator(consensusConfig.GenesisHash, consensusConfig.GenesisHash) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + consensusConfig.GenesisHash, + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check a block locator from one block to the same block + locator, err = tc.CreateHeadersSelectedChainBlockLocator(chain[7], chain[7]) + if err != nil { + t.Fatalf("CreateBlockLocatorFromPruningPoint: %+v", err) + } + + if !externalapi.HashesEqual(locator, []*externalapi.DomainHash{ + chain[7], + }) { + t.Fatalf("unexpected block locator %s", locator) + } + + // Check block locator with low hash higher than high hash + _, err = tc.CreateHeadersSelectedChainBlockLocator(chain[20], chain[19]) + expectedErr := "cannot build block locator while highHash is lower than lowHash" + if err == nil || !strings.Contains(err.Error(), expectedErr) { + t.Fatalf("expected error '%s' but got '%s'", expectedErr, err) + } + + // Check block locator with non chain blocks + _, err = tc.CreateHeadersSelectedChainBlockLocator(consensusConfig.GenesisHash, sideChainTipHash) + if !errors.Is(err, model.ErrBlockNotInSelectedParentChain) { + t.Fatalf("expected error '%s' but got '%s'", database.ErrNotFound, err) + } + }) +} diff --git a/domain/consensus/processes/syncmanager/log.go b/domain/consensus/processes/syncmanager/log.go new file mode 100644 index 0000000..20874e7 --- /dev/null +++ b/domain/consensus/processes/syncmanager/log.go @@ -0,0 +1,7 @@ +package syncmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("SYNC") diff --git a/domain/consensus/processes/syncmanager/syncinfo.go b/domain/consensus/processes/syncmanager/syncinfo.go new file mode 100644 index 0000000..a658eb0 --- /dev/null +++ b/domain/consensus/processes/syncmanager/syncinfo.go @@ -0,0 +1,24 @@ +package syncmanager + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (sm *syncManager) syncInfo(stagingArea *model.StagingArea) (*externalapi.SyncInfo, error) { + headerCount := sm.getHeaderCount(stagingArea) + blockCount := sm.getBlockCount(stagingArea) + + return &externalapi.SyncInfo{ + HeaderCount: headerCount, + BlockCount: blockCount, + }, nil +} + +func (sm *syncManager) getHeaderCount(stagingArea *model.StagingArea) uint64 { + return sm.blockHeaderStore.Count(stagingArea) +} + +func (sm *syncManager) getBlockCount(stagingArea *model.StagingArea) uint64 { + return sm.blockStore.Count(stagingArea) +} diff --git a/domain/consensus/processes/syncmanager/syncmanager.go b/domain/consensus/processes/syncmanager/syncmanager.go new file mode 100644 index 0000000..5b1b442 --- /dev/null +++ b/domain/consensus/processes/syncmanager/syncmanager.go @@ -0,0 +1,118 @@ +package syncmanager + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +type syncManager struct { + databaseContext model.DBReader + genesisBlockHash *externalapi.DomainHash + + dagTraversalManager model.DAGTraversalManager + dagTopologyManager model.DAGTopologyManager + ghostdagManager model.GHOSTDAGManager + pruningManager model.PruningManager + + ghostdagDataStore model.GHOSTDAGDataStore + blockStatusStore model.BlockStatusStore + blockHeaderStore model.BlockHeaderStore + blockStore model.BlockStore + pruningStore model.PruningStore + headersSelectedChainStore model.HeadersSelectedChainStore + + mergeSetSizeLimit uint64 +} + +// New instantiates a new SyncManager +func New( + databaseContext model.DBReader, + genesisBlockHash *externalapi.DomainHash, + mergeSetSizeLimit uint64, + dagTraversalManager model.DAGTraversalManager, + dagTopologyManager model.DAGTopologyManager, + ghostdagManager model.GHOSTDAGManager, + pruningManager model.PruningManager, + + ghostdagDataStore model.GHOSTDAGDataStore, + blockStatusStore model.BlockStatusStore, + blockHeaderStore model.BlockHeaderStore, + blockStore model.BlockStore, + pruningStore model.PruningStore, + headersSelectedChainStore model.HeadersSelectedChainStore) model.SyncManager { + + return &syncManager{ + databaseContext: databaseContext, + genesisBlockHash: genesisBlockHash, + + dagTraversalManager: dagTraversalManager, + dagTopologyManager: dagTopologyManager, + ghostdagManager: ghostdagManager, + pruningManager: pruningManager, + headersSelectedChainStore: headersSelectedChainStore, + + ghostdagDataStore: ghostdagDataStore, + blockStatusStore: blockStatusStore, + blockHeaderStore: blockHeaderStore, + blockStore: blockStore, + pruningStore: pruningStore, + } +} + +func (sm *syncManager) GetHashesBetween(stagingArea *model.StagingArea, lowHash, highHash *externalapi.DomainHash, + maxBlocks uint64) (hashes []*externalapi.DomainHash, actualHighHash *externalapi.DomainHash, err error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "GetHashesBetween") + defer onEnd() + + return sm.antiPastHashesBetween(stagingArea, lowHash, highHash, maxBlocks) +} + +func (sm *syncManager) GetAnticone(stagingArea *model.StagingArea, blockHash, contextHash *externalapi.DomainHash, maxBlocks uint64) (hashes []*externalapi.DomainHash, err error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "GetAnticone") + defer onEnd() + isContextAncestorOfBlock, err := sm.dagTopologyManager.IsAncestorOf(stagingArea, contextHash, blockHash) + if err != nil { + return nil, err + } + if isContextAncestorOfBlock { + return nil, errors.Errorf("expected block %s to not be in future of %s", + blockHash, + contextHash) + } + return sm.dagTraversalManager.AnticoneFromBlocks(stagingArea, []*externalapi.DomainHash{contextHash}, blockHash, maxBlocks) +} + +func (sm *syncManager) GetMissingBlockBodyHashes(stagingArea *model.StagingArea, highHash *externalapi.DomainHash) ([]*externalapi.DomainHash, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "GetMissingBlockBodyHashes") + defer onEnd() + + return sm.missingBlockBodyHashes(stagingArea, highHash) +} + +func (sm *syncManager) CreateBlockLocator(stagingArea *model.StagingArea, + lowHash, highHash *externalapi.DomainHash, limit uint32) (externalapi.BlockLocator, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "CreateBlockLocatorFromPruningPoint") + defer onEnd() + + return sm.createBlockLocator(stagingArea, lowHash, highHash, limit) +} + +func (sm *syncManager) CreateHeadersSelectedChainBlockLocator(stagingArea *model.StagingArea, + lowHash, highHash *externalapi.DomainHash) (externalapi.BlockLocator, error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, "CreateHeadersSelectedChainBlockLocator") + defer onEnd() + + return sm.createHeadersSelectedChainBlockLocator(stagingArea, lowHash, highHash) +} + +func (sm *syncManager) GetSyncInfo(stagingArea *model.StagingArea) (*externalapi.SyncInfo, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "GetSyncInfo") + defer onEnd() + + return sm.syncInfo(stagingArea) +} diff --git a/domain/consensus/processes/syncmanager/syncmanager_test.go b/domain/consensus/processes/syncmanager/syncmanager_test.go new file mode 100644 index 0000000..a0b3f32 --- /dev/null +++ b/domain/consensus/processes/syncmanager/syncmanager_test.go @@ -0,0 +1,81 @@ +package syncmanager_test + +import ( + "math" + "reflect" + "sort" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" +) + +func TestSyncManager_GetHashesBetween(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + stagingArea := model.NewStagingArea() + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestSyncManager_GetHashesBetween") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + // Create a DAG with the following structure: + // merging block + // / | \ + // split1 split2 split3 + // \ | / + // merging block + // / | \ + // split1 split2 split3 + // \ | / + // etc. + expectedOrder := make([]*externalapi.DomainHash, 0, 40) + mergingBlock := consensusConfig.GenesisHash + for i := 0; i < 10; i++ { + splitBlocks := make([]*externalapi.DomainHash, 0, 3) + for j := 0; j < 3; j++ { + splitBlock, _, err := tc.AddBlock([]*externalapi.DomainHash{mergingBlock}, nil, nil) + if err != nil { + t.Fatalf("Failed adding block: %v", err) + } + splitBlocks = append(splitBlocks, splitBlock) + } + + sort.Sort(sort.Reverse(testutils.NewTestGhostDAGSorter(stagingArea, splitBlocks, tc, t))) + restOfSplitBlocks, selectedParent := splitBlocks[:len(splitBlocks)-1], splitBlocks[len(splitBlocks)-1] + expectedOrder = append(expectedOrder, selectedParent) + expectedOrder = append(expectedOrder, restOfSplitBlocks...) + + mergingBlock, _, err = tc.AddBlock(splitBlocks, nil, nil) + if err != nil { + t.Fatalf("Failed adding block: %v", err) + } + expectedOrder = append(expectedOrder, mergingBlock) + } + + for i, blockHash := range expectedOrder { + empty, _, err := tc.SyncManager().GetHashesBetween(stagingArea, blockHash, blockHash, math.MaxUint64) + if err != nil { + t.Fatalf("TestSyncManager_GetHashesBetween failed returning 0 hashes on the %d'th block: %v", i, err) + } + if len(empty) != 0 { + t.Fatalf("Expected lowHash=highHash to return empty on the %d'th block, instead found: %v", i, empty) + } + } + + actualOrder, _, err := tc.SyncManager().GetHashesBetween( + stagingArea, consensusConfig.GenesisHash, expectedOrder[len(expectedOrder)-1], math.MaxUint64) + if err != nil { + t.Fatalf("TestSyncManager_GetHashesBetween failed returning actualOrder: %v", err) + } + + if !reflect.DeepEqual(actualOrder, expectedOrder) { + t.Fatalf("TestSyncManager_GetHashesBetween expected: \n%s\nactual:\n%s\n", expectedOrder, actualOrder) + } + }) +} diff --git a/domain/consensus/processes/transactionvalidator/mass.go b/domain/consensus/processes/transactionvalidator/mass.go new file mode 100644 index 0000000..3271c6c --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/mass.go @@ -0,0 +1,13 @@ +package transactionvalidator + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// PopulateMass calculates and populates the mass of the given transaction +func (v *transactionValidator) PopulateMass(transaction *externalapi.DomainTransaction) { + if transaction.Mass != 0 { + return + } + transaction.Mass = v.txMassCalculator.CalculateTransactionMass(transaction) +} diff --git a/domain/consensus/processes/transactionvalidator/test_transaction_validator.go b/domain/consensus/processes/transactionvalidator/test_transaction_validator.go new file mode 100644 index 0000000..4051267 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/test_transaction_validator.go @@ -0,0 +1,24 @@ +package transactionvalidator + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +type testTransactionValidator struct { + *transactionValidator +} + +// NewTestTransactionValidator creates an instance of a TestTransactionValidator +func NewTestTransactionValidator(baseTransactionValidator model.TransactionValidator) testapi.TestTransactionValidator { + return &testTransactionValidator{transactionValidator: baseTransactionValidator.(*transactionValidator)} +} + +func (tbv *testTransactionValidator) SigCache() *txscript.SigCache { + return tbv.sigCache +} + +func (tbv *testTransactionValidator) SetSigCache(sigCache *txscript.SigCache) { + tbv.sigCache = sigCache +} diff --git a/domain/consensus/processes/transactionvalidator/transaction_in_context.go b/domain/consensus/processes/transactionvalidator/transaction_in_context.go new file mode 100644 index 0000000..484c265 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transaction_in_context.go @@ -0,0 +1,361 @@ +package transactionvalidator + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +// IsFinalizedTransaction determines whether or not a transaction is finalized. +func (v *transactionValidator) IsFinalizedTransaction(tx *externalapi.DomainTransaction, blockDAAScore uint64, blockTime int64) bool { + // Lock time of zero means the transaction is finalized. + lockTime := tx.LockTime + if lockTime == 0 { + return true + } + + // The lock time field of a transaction is either a block DAA score at + // which the transaction is finalized or a timestamp depending on if the + // value is before the constants.LockTimeThreshold. When it is under the + // threshold it is a DAA score. + blockTimeOrBlueScore := uint64(0) + if lockTime < constants.LockTimeThreshold { + blockTimeOrBlueScore = blockDAAScore + } else { + blockTimeOrBlueScore = uint64(blockTime) + } + if lockTime < blockTimeOrBlueScore { + return true + } + + // At this point, the transaction's lock time hasn't occurred yet, but + // the transaction might still be finalized if the sequence number + // for all transaction inputs is maxed out. + for _, input := range tx.Inputs { + if input.Sequence != math.MaxUint64 { + return false + } + } + return true +} + +// ValidateTransactionInContextIgnoringUTXO validates the transaction with consensus context but ignoring UTXO +func (v *transactionValidator) ValidateTransactionInContextIgnoringUTXO(stagingArea *model.StagingArea, tx *externalapi.DomainTransaction, + povBlockHash *externalapi.DomainHash, povBlockPastMedianTime int64) error { + + povBlockDAAScore, err := v.daaBlocksStore.DAAScore(v.databaseContext, stagingArea, povBlockHash) + if err != nil { + return err + } + if isFinalized := v.IsFinalizedTransaction(tx, povBlockDAAScore, povBlockPastMedianTime); !isFinalized { + return errors.Wrapf(ruleerrors.ErrUnfinalizedTx, "unfinalized transaction %v", tx) + } + + return nil +} + +// ValidateTransactionInContextAndPopulateFee validates the transaction against its referenced UTXO, and +// populates its fee field. +// +// Note: if the function fails, there's no guarantee that the transaction fee field will remain unaffected. +func (v *transactionValidator) ValidateTransactionInContextAndPopulateFee(stagingArea *model.StagingArea, + tx *externalapi.DomainTransaction, povBlockHash *externalapi.DomainHash) error { + + err := v.checkTransactionCoinbaseMaturity(stagingArea, povBlockHash, tx) + if err != nil { + return err + } + + totalSompiIn, err := v.checkTransactionInputAmounts(tx) + if err != nil { + return err + } + + totalSompiOut, err := v.checkTransactionOutputAmounts(tx, totalSompiIn) + if err != nil { + return err + } + + tx.Fee = totalSompiIn - totalSompiOut + + err = v.checkTransactionSequenceLock(stagingArea, povBlockHash, tx) + if err != nil { + return err + } + + err = v.validateTransactionSigOpCounts(tx) + if err != nil { + return err + } + + err = v.validateTransactionScripts(tx) + if err != nil { + return err + } + + return nil +} + +func (v *transactionValidator) checkTransactionCoinbaseMaturity(stagingArea *model.StagingArea, + povBlockHash *externalapi.DomainHash, tx *externalapi.DomainTransaction) error { + + povDAAScore, err := v.daaBlocksStore.DAAScore(v.databaseContext, stagingArea, povBlockHash) + if err != nil { + return err + } + + var missingOutpoints []*externalapi.DomainOutpoint + for _, input := range tx.Inputs { + utxoEntry := input.UTXOEntry + if utxoEntry == nil { + missingOutpoints = append(missingOutpoints, &input.PreviousOutpoint) + } else if utxoEntry.IsCoinbase() { + originDAAScore := utxoEntry.BlockDAAScore() + if originDAAScore+v.blockCoinbaseMaturity > povDAAScore { + return errors.Wrapf(ruleerrors.ErrImmatureSpend, "tried to spend coinbase "+ + "transaction output %s from DAA score %d "+ + "to DAA score %d before required maturity "+ + "of %d", input.PreviousOutpoint, + originDAAScore, povDAAScore, + v.blockCoinbaseMaturity) + } + } + } + if len(missingOutpoints) > 0 { + return ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + + return nil +} + +func (v *transactionValidator) checkTransactionInputAmounts(tx *externalapi.DomainTransaction) (totalSompiIn uint64, err error) { + totalSompiIn = 0 + + var missingOutpoints []*externalapi.DomainOutpoint + for _, input := range tx.Inputs { + utxoEntry := input.UTXOEntry + if utxoEntry == nil { + missingOutpoints = append(missingOutpoints, &input.PreviousOutpoint) + continue + } + + // Ensure the transaction amounts are in range. Each of the + // output values of the input transactions must not be negative + // or more than the max allowed per transaction. All amounts in + // a transaction are in a unit value known as a sompi. One + // spectre is a quantity of sompi as defined by the + // SompiPerSpectre constant. + totalSompiIn, err = v.checkEntryAmounts(utxoEntry, totalSompiIn) + if err != nil { + return 0, err + } + } + + if len(missingOutpoints) > 0 { + return 0, ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + + return totalSompiIn, nil +} + +func (v *transactionValidator) checkEntryAmounts(entry externalapi.UTXOEntry, totalSompiInBefore uint64) (totalSompiInAfter uint64, err error) { + // The total of all outputs must not be more than the max + // allowed per transaction. Also, we could potentially overflow + // the accumulator so check for overflow. + + originTxSompi := entry.Amount() + totalSompiInAfter = totalSompiInBefore + originTxSompi + if totalSompiInAfter < totalSompiInBefore || + totalSompiInAfter > constants.MaxSompi { + return 0, errors.Wrapf(ruleerrors.ErrBadTxOutValue, "total value of all transaction "+ + "inputs is %d which is higher than max "+ + "allowed value of %d", totalSompiInBefore, + constants.MaxSompi) + } + return totalSompiInAfter, nil +} + +func (v *transactionValidator) checkTransactionOutputAmounts(tx *externalapi.DomainTransaction, totalSompiIn uint64) (uint64, error) { + totalSompiOut := uint64(0) + // Calculate the total output amount for this transaction. It is safe + // to ignore overflow and out of range errors here because those error + // conditions would have already been caught by checkTransactionAmountRanges. + for _, output := range tx.Outputs { + totalSompiOut += output.Value + } + + // Ensure the transaction does not spend more than its inputs. + if totalSompiIn < totalSompiOut { + return 0, errors.Wrapf(ruleerrors.ErrSpendTooHigh, "total value of all transaction inputs for "+ + "the transaction is %d which is less than the amount "+ + "spent of %d", totalSompiIn, totalSompiOut) + } + return totalSompiOut, nil +} + +func (v *transactionValidator) checkTransactionSequenceLock(stagingArea *model.StagingArea, + povBlockHash *externalapi.DomainHash, tx *externalapi.DomainTransaction) error { + + // A transaction can only be included within a block + // once the sequence locks of *all* its inputs are + // active. + sequenceLock, err := v.calcTxSequenceLockFromReferencedUTXOEntries(stagingArea, povBlockHash, tx) + if err != nil { + return err + } + + daaScore, err := v.daaBlocksStore.DAAScore(v.databaseContext, stagingArea, povBlockHash) + if err != nil { + return err + } + + if !v.sequenceLockActive(sequenceLock, daaScore) { + return errors.Wrapf(ruleerrors.ErrUnfinalizedTx, "block contains "+ + "transaction whose input sequence "+ + "locks are not met") + } + + return nil +} + +func (v *transactionValidator) validateTransactionScripts(tx *externalapi.DomainTransaction) error { + var missingOutpoints []*externalapi.DomainOutpoint + sighashReusedValues := &consensushashing.SighashReusedValues{} + + for i, input := range tx.Inputs { + // Create a new script engine for the script pair. + sigScript := input.SignatureScript + utxoEntry := input.UTXOEntry + if utxoEntry == nil { + missingOutpoints = append(missingOutpoints, &input.PreviousOutpoint) + continue + } + + scriptPubKey := utxoEntry.ScriptPublicKey() + vm, err := txscript.NewEngine(scriptPubKey, tx, i, txscript.ScriptNoFlags, v.sigCache, v.sigCacheECDSA, sighashReusedValues) + if err != nil { + return errors.Wrapf(ruleerrors.ErrScriptMalformed, "failed to parse input "+ + "%d which references output %s - "+ + "%s (input script bytes %x, prev "+ + "output script bytes %x)", + i, + input.PreviousOutpoint, err, sigScript, scriptPubKey) + } + + // Execute the script pair. + if err := vm.Execute(); err != nil { + return errors.Wrapf(ruleerrors.ErrScriptValidation, "failed to validate input "+ + "%d which references output %s - "+ + "%s (input script bytes %x, prev output "+ + "script bytes %x)", + i, + input.PreviousOutpoint, err, sigScript, scriptPubKey) + } + } + if len(missingOutpoints) > 0 { + return ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + return nil +} + +func (v *transactionValidator) calcTxSequenceLockFromReferencedUTXOEntries(stagingArea *model.StagingArea, + povBlockHash *externalapi.DomainHash, tx *externalapi.DomainTransaction) (*sequenceLock, error) { + + // A value of -1 represents a relative timelock value that will allow a transaction to be + //included in a block at any given DAA score. + sequenceLock := &sequenceLock{BlockDAAScore: -1} + + // Sequence locks don't apply to coinbase transactions Therefore, we + // return sequence lock values of -1 indicating that this transaction + // can be included within a block at any given DAA score. + if transactionhelper.IsCoinBase(tx) { + return sequenceLock, nil + } + + var missingOutpoints []*externalapi.DomainOutpoint + for _, input := range tx.Inputs { + utxoEntry := input.UTXOEntry + if utxoEntry == nil { + missingOutpoints = append(missingOutpoints, &input.PreviousOutpoint) + continue + } + + inputDAAScore := utxoEntry.BlockDAAScore() + + // Given a sequence number, we apply the relative time lock + // mask in order to obtain the time lock delta required before + // this input can be spent. + sequenceNum := input.Sequence + relativeLock := int64(sequenceNum & constants.SequenceLockTimeMask) + + // Relative time locks are disabled for this input, so we can + // skip any further calculation. + if sequenceNum&constants.SequenceLockTimeDisabled == constants.SequenceLockTimeDisabled { + continue + } + // The relative lock-time for this input is expressed + // in blocks so we calculate the relative offset from + // the input's DAA score as its converted absolute + // lock-time. We subtract one from the relative lock in + // order to maintain the original lockTime semantics. + blockDAAScore := int64(inputDAAScore) + relativeLock - 1 + if blockDAAScore > sequenceLock.BlockDAAScore { + sequenceLock.BlockDAAScore = blockDAAScore + } + } + if len(missingOutpoints) > 0 { + return nil, ruleerrors.NewErrMissingTxOut(missingOutpoints) + } + + return sequenceLock, nil +} + +// sequenceLock represents the converted relative lock-time in +// absolute block-daa-score for a transaction input's relative lock-times. +// According to sequenceLock, after the referenced input has been confirmed +// within a block, a transaction spending that input can be included into a +// block either after the 'BlockDAAScore' has been reached. +type sequenceLock struct { + BlockDAAScore int64 +} + +// sequenceLockActive determines if a transaction's sequence locks have been +// met, meaning that all the inputs of a given transaction have reached a +// DAA score sufficient for their relative lock-time maturity. +func (v *transactionValidator) sequenceLockActive(sequenceLock *sequenceLock, blockDAAScore uint64) bool { + + // If (DAA score) relative-lock time has not yet + // reached, then the transaction is not yet mature according to its + // sequence locks. + if sequenceLock.BlockDAAScore >= int64(blockDAAScore) { + return false + } + + return true +} + +func (v *transactionValidator) validateTransactionSigOpCounts(tx *externalapi.DomainTransaction) error { + for i, input := range tx.Inputs { + utxoEntry := input.UTXOEntry + + // Count the precise number of signature operations in the + // referenced public key script. + sigScript := input.SignatureScript + isP2SH := txscript.IsPayToScriptHash(utxoEntry.ScriptPublicKey()) + sigOpCount := txscript.GetPreciseSigOpCount(sigScript, utxoEntry.ScriptPublicKey(), isP2SH) + + if sigOpCount != int(input.SigOpCount) { + return errors.Wrapf(ruleerrors.ErrWrongSigOpCount, + "input %d specifies SigOpCount %d while actual SigOpCount is %d", + i, input.SigOpCount, sigOpCount) + } + } + return nil +} diff --git a/domain/consensus/processes/transactionvalidator/transaction_in_context_test.go b/domain/consensus/processes/transactionvalidator/transaction_in_context_test.go new file mode 100644 index 0000000..d22e966 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transaction_in_context_test.go @@ -0,0 +1,33 @@ +package transactionvalidator + +import ( + "testing" +) + +// TestSequenceLocksActive tests the SequenceLockActive function to ensure it +// works as expected in all possible combinations/scenarios. +func TestSequenceLocksActive(t *testing.T) { + tests := []struct { + seqLock sequenceLock + blockDAAScore uint64 + + want bool + }{ + // Block based sequence lock with equal block DAA score. + {seqLock: sequenceLock{1000}, blockDAAScore: 1001, want: true}, + + // Block based sequence lock with current DAA score below seq lock block DAA score. + {seqLock: sequenceLock{1000}, blockDAAScore: 90, want: false}, + + // Block based sequence lock at the same DAA score, so shouldn't yet be active. + {seqLock: sequenceLock{1000}, blockDAAScore: 1000, want: false}, + } + + validator := transactionValidator{} + for i, test := range tests { + got := validator.sequenceLockActive(&test.seqLock, test.blockDAAScore) + if got != test.want { + t.Fatalf("SequenceLockActive #%d got %v want %v", i, got, test.want) + } + } +} diff --git a/domain/consensus/processes/transactionvalidator/transaction_in_isolation.go b/domain/consensus/processes/transactionvalidator/transaction_in_isolation.go new file mode 100644 index 0000000..f05fb01 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transaction_in_isolation.go @@ -0,0 +1,196 @@ +package transactionvalidator + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +// ValidateTransactionInIsolation validates the parts of the transaction that can be validated context-free +func (v *transactionValidator) ValidateTransactionInIsolation(tx *externalapi.DomainTransaction, povDAAScore uint64) error { + err := v.checkTransactionInputCount(tx) + if err != nil { + return err + } + err = v.checkTransactionAmountRanges(tx) + if err != nil { + return err + } + err = v.checkDuplicateTransactionInputs(tx) + if err != nil { + return err + } + err = v.checkCoinbaseInIsolation(tx) + if err != nil { + return err + } + err = v.checkGasInBuiltInOrNativeTransactions(tx) + if err != nil { + return err + } + err = v.checkSubnetworkRegistryTransaction(tx) + if err != nil { + return err + } + + err = v.checkNativeTransactionPayload(tx) + if err != nil { + return err + } + + // TODO: fill it with the node's subnetwork id. + err = v.checkTransactionSubnetwork(tx, nil) + if err != nil { + return err + } + + if tx.Version > constants.MaxTransactionVersion { + return errors.Wrapf(ruleerrors.ErrTransactionVersionIsUnknown, "validation failed: unknown transaction version. ") + } + + return nil +} + +func (v *transactionValidator) checkTransactionInputCount(tx *externalapi.DomainTransaction) error { + // A non-coinbase transaction must have at least one input. + if !transactionhelper.IsCoinBase(tx) && len(tx.Inputs) == 0 { + return errors.Wrapf(ruleerrors.ErrNoTxInputs, "transaction has no inputs") + } + return nil +} + +func (v *transactionValidator) checkTransactionAmountRanges(tx *externalapi.DomainTransaction) error { + // Ensure the transaction amounts are in range. Each transaction + // output must not be negative or more than the max allowed per + // transaction. Also, the total of all outputs must abide by the same + // restrictions. All amounts in a transaction are in a unit value known + // as a sompi. One spectre is a quantity of sompi as defined by the + // sompiPerSpectre constant. + var totalSompi uint64 + for _, txOut := range tx.Outputs { + sompi := txOut.Value + if sompi == 0 { + return errors.Wrap(ruleerrors.ErrTxOutValueZero, "zero value outputs are forbidden") + } + + if sompi > constants.MaxSompi { + return errors.Wrapf(ruleerrors.ErrBadTxOutValue, "transaction output value of %d is "+ + "higher than max allowed value of %d", sompi, constants.MaxSompi) + } + + // Binary arithmetic guarantees that any overflow is detected and reported. + // This is impossible for Spectre, but perhaps possible if an alt increases + // the total money supply. + newTotalSompi := totalSompi + sompi + if newTotalSompi < totalSompi { + return errors.Wrapf(ruleerrors.ErrBadTxOutValue, "total value of all transaction "+ + "outputs exceeds max allowed value of %d", + constants.MaxSompi) + } + totalSompi = newTotalSompi + if totalSompi > constants.MaxSompi { + return errors.Wrapf(ruleerrors.ErrBadTxOutValue, "total value of all transaction "+ + "outputs is %d which is higher than max "+ + "allowed value of %d", totalSompi, + constants.MaxSompi) + } + } + + return nil +} + +func (v *transactionValidator) checkDuplicateTransactionInputs(tx *externalapi.DomainTransaction) error { + existingTxOut := make(map[externalapi.DomainOutpoint]struct{}) + for _, txIn := range tx.Inputs { + if _, exists := existingTxOut[txIn.PreviousOutpoint]; exists { + return errors.Wrapf(ruleerrors.ErrDuplicateTxInputs, "transaction "+ + "contains duplicate inputs") + } + existingTxOut[txIn.PreviousOutpoint] = struct{}{} + } + return nil +} + +func (v *transactionValidator) checkCoinbaseInIsolation(tx *externalapi.DomainTransaction) error { + if !transactionhelper.IsCoinBase(tx) { + return nil + } + + // Coinbase payload length must not exceed the max length. + payloadLen := len(tx.Payload) + if uint64(payloadLen) > v.maxCoinbasePayloadLength { + return errors.Wrapf(ruleerrors.ErrBadCoinbasePayloadLen, "coinbase transaction payload length "+ + "of %d is out of range (max: %d)", + payloadLen, v.maxCoinbasePayloadLength) + } + + if len(tx.Inputs) != 0 { + return errors.Wrap(ruleerrors.ErrCoinbaseWithInputs, "coinbase has inputs") + } + + outputsLimit := uint64(v.ghostdagK) + 2 + if uint64(len(tx.Outputs)) > outputsLimit { + return errors.Wrapf(ruleerrors.ErrCoinbaseTooManyOutputs, "coinbase has too many outputs: got %d where the limit is %d", len(tx.Outputs), outputsLimit) + } + + for i, output := range tx.Outputs { + if len(output.ScriptPublicKey.Script) > int(v.coinbasePayloadScriptPublicKeyMaxLength) { + return errors.Wrapf(ruleerrors.ErrCoinbaseTooLongScriptPublicKey, "coinbase output %d has a too long script public key", i) + + } + } + + return nil +} + +func (v *transactionValidator) checkGasInBuiltInOrNativeTransactions(tx *externalapi.DomainTransaction) error { + // Transactions in native, registry and coinbase subnetworks must have Gas = 0 + if subnetworks.IsBuiltInOrNative(tx.SubnetworkID) && tx.Gas > 0 { + return errors.Wrapf(ruleerrors.ErrInvalidGas, "transaction in the native or "+ + "registry subnetworks has gas > 0 ") + } + return nil +} + +func (v *transactionValidator) checkSubnetworkRegistryTransaction(tx *externalapi.DomainTransaction) error { + if tx.SubnetworkID != subnetworks.SubnetworkIDRegistry { + return nil + } + + if len(tx.Payload) != 8 { + return errors.Wrapf(ruleerrors.ErrSubnetworkRegistry, "validation failed: subnetwork registry "+ + "tx has an invalid payload") + } + return nil +} + +func (v *transactionValidator) checkNativeTransactionPayload(tx *externalapi.DomainTransaction) error { + if tx.SubnetworkID == subnetworks.SubnetworkIDNative && len(tx.Payload) > 0 { + return errors.Wrapf(ruleerrors.ErrInvalidPayload, "transaction in the native subnetwork "+ + "includes a payload") + } + return nil +} + +func (v *transactionValidator) checkTransactionSubnetwork(tx *externalapi.DomainTransaction, + localNodeSubnetworkID *externalapi.DomainSubnetworkID) error { + if !v.enableNonNativeSubnetworks && tx.SubnetworkID != subnetworks.SubnetworkIDNative && + tx.SubnetworkID != subnetworks.SubnetworkIDCoinbase { + return errors.Wrapf(ruleerrors.ErrSubnetworksDisabled, "transaction has non native or coinbase "+ + "subnetwork ID") + } + + // If we are a partial node, only transactions on built in subnetworks + // or our own subnetwork may have a payload + isLocalNodeFull := localNodeSubnetworkID == nil + shouldTxBeFull := subnetworks.IsBuiltIn(tx.SubnetworkID) || tx.SubnetworkID.Equal(localNodeSubnetworkID) + if !isLocalNodeFull && !shouldTxBeFull && len(tx.Payload) > 0 { + return errors.Wrapf(ruleerrors.ErrInvalidPayload, + "transaction that was expected to be partial has a payload "+ + "with length > 0") + } + return nil +} diff --git a/domain/consensus/processes/transactionvalidator/transaction_in_isolation_test.go b/domain/consensus/processes/transactionvalidator/transaction_in_isolation_test.go new file mode 100644 index 0000000..546f501 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transaction_in_isolation_test.go @@ -0,0 +1,160 @@ +package transactionvalidator_test + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +type txSubnetworkData struct { + subnetworkID externalapi.DomainSubnetworkID + gas uint64 + payload []byte +} + +func TestValidateTransactionInIsolationAndPopulateMass(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + cfg := *consensusConfig + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(&cfg, "TestValidateTransactionInIsolationAndPopulateMass") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + tests := []struct { + name string + numInputs uint32 + numOutputs uint32 + outputValue uint64 + nodeSubnetworkID externalapi.DomainSubnetworkID + txSubnetworkData *txSubnetworkData + extraModificationsFunc func(*externalapi.DomainTransaction) + expectedErr error + daaScore uint64 + }{ + {"good one", 1, 1, 1, subnetworks.SubnetworkIDNative, nil, nil, nil, 0}, + {"no inputs", 0, 1, 1, subnetworks.SubnetworkIDNative, nil, nil, ruleerrors.ErrNoTxInputs, 0}, + {"no outputs", 1, 0, 1, subnetworks.SubnetworkIDNative, nil, nil, nil, 0}, + {"too much sompi in one output", 1, 1, constants.MaxSompi + 1, + subnetworks.SubnetworkIDNative, + nil, + nil, + ruleerrors.ErrBadTxOutValue, 0}, + {"too much sompi before- valid now", 1, 1, 21e14 + 1, + subnetworks.SubnetworkIDNative, + nil, + nil, + nil, 0}, + {"too much sompi in one output - after hf", 1, 1, constants.MaxSompi + 1, + subnetworks.SubnetworkIDNative, + nil, + nil, + ruleerrors.ErrBadTxOutValue, 0}, + {"too much sompi in one output", 1, 1, constants.MaxSompi + 1, + subnetworks.SubnetworkIDNative, + nil, + nil, + ruleerrors.ErrBadTxOutValue, 0}, + {"duplicate inputs", 2, 1, 1, + subnetworks.SubnetworkIDNative, + nil, + func(tx *externalapi.DomainTransaction) { tx.Inputs[1].PreviousOutpoint.Index = 0 }, + ruleerrors.ErrDuplicateTxInputs, 0}, + {"1 input coinbase", + 1, + 1, + 1, + subnetworks.SubnetworkIDNative, + &txSubnetworkData{subnetworks.SubnetworkIDCoinbase, 0, nil}, + nil, + ruleerrors.ErrCoinbaseWithInputs, 0}, + {"no inputs coinbase", + 0, + 1, + 1, + subnetworks.SubnetworkIDNative, + &txSubnetworkData{subnetworks.SubnetworkIDCoinbase, 0, nil}, + nil, + nil, 0}, + {"too long payload coinbase", + 1, + 1, + 1, + subnetworks.SubnetworkIDNative, + &txSubnetworkData{subnetworks.SubnetworkIDCoinbase, 0, make([]byte, consensusConfig.MaxCoinbasePayloadLength+1)}, + nil, + ruleerrors.ErrBadCoinbasePayloadLen, 0}, + {"non-zero gas in Spectre", 1, 1, 1, + subnetworks.SubnetworkIDNative, + nil, + func(tx *externalapi.DomainTransaction) { + tx.Gas = 1 + }, + ruleerrors.ErrInvalidGas, 0}, + {"non-zero gas in subnetwork registry", 1, 1, 1, + subnetworks.SubnetworkIDRegistry, + &txSubnetworkData{subnetworks.SubnetworkIDRegistry, 1, []byte{}}, + nil, + ruleerrors.ErrInvalidGas, 0}, + {"non-zero payload in Spectre", 1, 1, 1, + subnetworks.SubnetworkIDNative, + nil, + func(tx *externalapi.DomainTransaction) { + tx.Payload = []byte{1} + }, + ruleerrors.ErrInvalidPayload, 0}, + } + + for _, test := range tests { + tx := createTxForTest(test.numInputs, test.numOutputs, test.outputValue, test.txSubnetworkData) + + if test.extraModificationsFunc != nil { + test.extraModificationsFunc(tx) + } + + err := tc.TransactionValidator().ValidateTransactionInIsolation(tx, test.daaScore) + if !errors.Is(err, test.expectedErr) { + t.Errorf("TestValidateTransactionInIsolationAndPopulateMass: '%s': unexpected error %+v", test.name, err) + } + } + }) +} + +func createTxForTest(numInputs uint32, numOutputs uint32, outputValue uint64, subnetworkData *txSubnetworkData) *externalapi.DomainTransaction { + txIns := []*externalapi.DomainTransactionInput{} + txOuts := []*externalapi.DomainTransactionOutput{} + + for i := uint32(0); i < numInputs; i++ { + txIns = append(txIns, &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: i, + }, + SignatureScript: []byte{}, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + }) + } + + for i := uint32(0); i < numOutputs; i++ { + txOuts = append(txOuts, &externalapi.DomainTransactionOutput{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: []byte{}, Version: 0}, + Value: outputValue, + }) + } + + if subnetworkData != nil { + return transactionhelper.NewSubnetworkTransaction(constants.MaxTransactionVersion, txIns, txOuts, &subnetworkData.subnetworkID, subnetworkData.gas, subnetworkData.payload) + } + + return transactionhelper.NewNativeTransaction(constants.MaxTransactionVersion, txIns, txOuts) +} diff --git a/domain/consensus/processes/transactionvalidator/transactionvalidator.go b/domain/consensus/processes/transactionvalidator/transactionvalidator.go new file mode 100644 index 0000000..322a559 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transactionvalidator.go @@ -0,0 +1,55 @@ +package transactionvalidator + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util/txmass" +) + +const sigCacheSize = 10_000 + +// transactionValidator exposes a set of validation classes, after which +// it's possible to determine whether either a transaction is valid +type transactionValidator struct { + blockCoinbaseMaturity uint64 + databaseContext model.DBReader + pastMedianTimeManager model.PastMedianTimeManager + ghostdagDataStore model.GHOSTDAGDataStore + daaBlocksStore model.DAABlocksStore + enableNonNativeSubnetworks bool + maxCoinbasePayloadLength uint64 + ghostdagK externalapi.KType + coinbasePayloadScriptPublicKeyMaxLength uint8 + sigCache *txscript.SigCache + sigCacheECDSA *txscript.SigCacheECDSA + txMassCalculator *txmass.Calculator +} + +// New instantiates a new TransactionValidator +func New(blockCoinbaseMaturity uint64, + enableNonNativeSubnetworks bool, + maxCoinbasePayloadLength uint64, + ghostdagK externalapi.KType, + coinbasePayloadScriptPublicKeyMaxLength uint8, + databaseContext model.DBReader, + pastMedianTimeManager model.PastMedianTimeManager, + ghostdagDataStore model.GHOSTDAGDataStore, + daaBlocksStore model.DAABlocksStore, + txMassCalculator *txmass.Calculator) model.TransactionValidator { + + return &transactionValidator{ + blockCoinbaseMaturity: blockCoinbaseMaturity, + enableNonNativeSubnetworks: enableNonNativeSubnetworks, + maxCoinbasePayloadLength: maxCoinbasePayloadLength, + ghostdagK: ghostdagK, + coinbasePayloadScriptPublicKeyMaxLength: coinbasePayloadScriptPublicKeyMaxLength, + databaseContext: databaseContext, + pastMedianTimeManager: pastMedianTimeManager, + ghostdagDataStore: ghostdagDataStore, + daaBlocksStore: daaBlocksStore, + sigCache: txscript.NewSigCache(sigCacheSize), + sigCacheECDSA: txscript.NewSigCacheECDSA(sigCacheSize), + txMassCalculator: txMassCalculator, + } +} diff --git a/domain/consensus/processes/transactionvalidator/transactionvalidator_test.go b/domain/consensus/processes/transactionvalidator/transactionvalidator_test.go new file mode 100644 index 0000000..7957e48 --- /dev/null +++ b/domain/consensus/processes/transactionvalidator/transactionvalidator_test.go @@ -0,0 +1,520 @@ +package transactionvalidator_test + +import ( + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/util" + + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +func TestValidateTransactionInContextAndPopulateFee(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, tearDown, err := factory.NewTestConsensus(consensusConfig, + "TestValidateTransactionInContextAndPopulateFee") + if err != nil { + t.Fatalf("Failed create a NewTestConsensus: %s", err) + } + defer tearDown(false) + + privateKey, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Fatalf("Failed to generate a private key: %v", err) + } + publicKey, err := privateKey.SchnorrPublicKey() + if err != nil { + t.Fatalf("Failed to generate a public key: %v", err) + } + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + t.Fatalf("Failed to serialize public key: %v", err) + } + addr, err := util.NewAddressPublicKey(publicKeySerialized[:], consensusConfig.Prefix) + if err != nil { + t.Fatalf("Failed to generate p2pk address: %v", err) + } + scriptPublicKey, err := txscript.PayToAddrScript(addr) + if err != nil { + t.Fatalf("PayToAddrScript: unexpected error: %v", err) + } + prevOutTxID := &externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: []byte{}, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, // 1 SPR + scriptPublicKey, + true, + uint64(5)), + } + txInputWrongSignature := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: []byte{}, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, // 1 SPR + scriptPublicKey, + true, + uint64(5)), + } + immatureCoinbaseInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: []byte{}, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry( + 100_000_000, // 1 SPR + scriptPublicKey, + true, + uint64(6)), + } + + txInputWithLargeAmount := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: []byte{}, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry( + 21e14+1, + scriptPublicKey, + false, + 0), + } + + txInputWithLargeAmountAfterHF := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: []byte{}, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry( + constants.MaxSompi+1, + scriptPublicKey, + false, + 0), + } + + txOutput := externalapi.DomainTransactionOutput{ + Value: 100000000, // 1 SPR + ScriptPublicKey: scriptPublicKey, + } + txOutputBigValue := externalapi.DomainTransactionOutput{ + Value: 200_000_000, // 2 SPR + ScriptPublicKey: scriptPublicKey, + } + + validTx := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutput}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + + for i, input := range validTx.Inputs { + signatureScript, err := txscript.SignatureScript(&validTx, i, consensushashing.SigHashAll, privateKey, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("Failed to create a sigScript: %v", err) + } + input.SignatureScript = signatureScript + } + + txWithImmatureCoinbase := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&immatureCoinbaseInput}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutput}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + txWithLargeAmountBeforeHF := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInputWithLargeAmount}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutput}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + + txWithInvalidAmount := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInputWithLargeAmountAfterHF}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutput}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + + for i, input := range txWithLargeAmountBeforeHF.Inputs { + signatureScript, err := txscript.SignatureScript(&txWithLargeAmountBeforeHF, i, consensushashing.SigHashAll, privateKey, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("Failed to create a sigScript: %v", err) + } + input.SignatureScript = signatureScript + } + + txWithBigValue := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutputBigValue}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + txWithInvalidSignature := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInputWrongSignature}, + Outputs: []*externalapi.DomainTransactionOutput{&txOutput}, + SubnetworkID: subnetworks.SubnetworkIDRegistry, + Gas: 0, + LockTime: 0} + + stagingArea := model.NewStagingArea() + + povBlockHash := externalapi.NewDomainHashFromByteArray(&[32]byte{0x01}) + tc.DAABlocksStore().StageDAAScore(stagingArea, povBlockHash, consensusConfig.BlockCoinbaseMaturity+txInput.UTXOEntry.BlockDAAScore()) + + // Just use some stub ghostdag data + tc.GHOSTDAGDataStore().Stage(stagingArea, povBlockHash, externalapi.NewBlockGHOSTDAGData( + 0, + nil, + consensusConfig.GenesisHash, + nil, + nil, + nil), false) + + tests := []struct { + name string + tx *externalapi.DomainTransaction + povBlockHash *externalapi.DomainHash + isValid bool + expectedError error + }{ + { + name: "Valid transaction", + tx: &validTx, + povBlockHash: povBlockHash, + isValid: true, + expectedError: nil, + }, + { // The calculated block coinbase maturity is smaller than the minimum expected blockCoinbaseMaturity. + // The povBlockHash DAA score is 10 and the UTXO DAA score is 5, hence the The subtraction between + // them will yield a smaller result than the required CoinbaseMaturity (currently set to 100). + name: "checkTransactionCoinbaseMaturity", + tx: &txWithImmatureCoinbase, + povBlockHash: povBlockHash, + isValid: false, + expectedError: ruleerrors.ErrImmatureSpend, + }, + { // The total inputs amount is bigger than the allowed maximum before the HF (21e14) + name: "checkTransactionInputAmounts - valid", + tx: &txWithLargeAmountBeforeHF, + povBlockHash: povBlockHash, + isValid: true, + expectedError: nil, + }, + { // The total inputs amount is bigger than the allowed maximum (constants.MaxSompi) + name: "checkTransactionInputAmounts - invalid - after HF", + tx: &txWithInvalidAmount, + povBlockHash: povBlockHash, + isValid: false, + expectedError: ruleerrors.ErrBadTxOutValue, + }, + { // The total SompiIn (sum of inputs amount) is smaller than the total SompiOut (sum of outputs value) and hence invalid. + name: "checkTransactionOutputAmounts", + tx: &txWithBigValue, + povBlockHash: povBlockHash, + isValid: false, + expectedError: ruleerrors.ErrSpendTooHigh, + }, + { // The SignatureScript is wrong + name: "checkTransactionScripts", + tx: &txWithInvalidSignature, + povBlockHash: povBlockHash, + isValid: false, + expectedError: ruleerrors.ErrScriptValidation, + }, + } + + for _, test := range tests { + err := tc.TransactionValidator().ValidateTransactionInContextAndPopulateFee(stagingArea, test.tx, test.povBlockHash) + + if test.isValid { + if err != nil { + t.Fatalf("Unexpected error on TestValidateTransactionInContextAndPopulateFee"+ + " on test '%v': %+v", test.name, err) + } + } else { + if err == nil || !errors.Is(err, test.expectedError) { + t.Fatalf("TestValidateTransactionInContextAndPopulateFee: test %v:"+ + " Unexpected error: Expected to: %v, but got : %+v", test.name, test.expectedError, err) + } + } + } + }) +} + +func TestSigningTwoInputs(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestSigningTwoInputs") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + privateKey, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Fatalf("Failed to generate a private key: %v", err) + } + publicKey, err := privateKey.SchnorrPublicKey() + if err != nil { + t.Fatalf("Failed to generate a public key: %v", err) + } + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + t.Fatalf("Failed to serialize public key: %v", err) + } + addr, err := util.NewAddressPublicKey(publicKeySerialized[:], consensusConfig.Prefix) + if err != nil { + t.Fatalf("Failed to generate p2pk address: %v", err) + } + + scriptPublicKey, err := txscript.PayToAddrScript(addr) + if err != nil { + t.Fatalf("PayToAddrScript: unexpected error: %v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block2Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block3Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{block2Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block2, _, err := tc.GetBlock(block2Hash) + if err != nil { + t.Fatalf("Error getting block2: %+v", err) + } + + block3, _, err := tc.GetBlock(block3Hash) + if err != nil { + t.Fatalf("Error getting block3: %+v", err) + } + + block2Tx := block2.Transactions[0] + block2TxOut := block2Tx.Outputs[0] + + block3Tx := block3.Transactions[0] + block3TxOut := block3Tx.Outputs[0] + + tx := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block2.Transactions[0]), + Index: 0, + }, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry(block2TxOut.Value, block2TxOut.ScriptPublicKey, true, 0), + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block3.Transactions[0]), + Index: 0, + }, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry(block3TxOut.Value, block3TxOut.ScriptPublicKey, true, 0), + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 1, + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + }}, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + LockTime: 0, + } + + sighashReusedValues := &consensushashing.SighashReusedValues{} + for i, input := range tx.Inputs { + signatureScript, err := txscript.SignatureScript(tx, i, consensushashing.SigHashAll, privateKey, + sighashReusedValues) + if err != nil { + t.Fatalf("Failed to create a sigScript: %v", err) + } + input.SignatureScript = signatureScript + } + + _, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block3Hash}, nil, []*externalapi.DomainTransaction{tx}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + txOutpoint := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(tx), + Index: 0, + } + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(txOutpoint) { + t.Fatalf("tx was not accepted by the DAG") + } + }) +} + +func TestSigningTwoInputsECDSA(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestSigningTwoInputsECDSA") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + privateKey, err := secp256k1.GenerateECDSAPrivateKey() + if err != nil { + t.Fatalf("Failed to generate a private key: %v", err) + } + publicKey, err := privateKey.ECDSAPublicKey() + if err != nil { + t.Fatalf("Failed to generate a public key: %v", err) + } + publicKeySerialized, err := publicKey.Serialize() + if err != nil { + t.Fatalf("Failed to serialize public key: %v", err) + } + addr, err := util.NewAddressPublicKeyECDSA(publicKeySerialized[:], consensusConfig.Prefix) + if err != nil { + t.Fatalf("Failed to generate p2pk address: %v", err) + } + + scriptPublicKey, err := txscript.PayToAddrScript(addr) + if err != nil { + t.Fatalf("PayToAddrScript: unexpected error: %v", err) + } + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, + } + + block1Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{consensusConfig.GenesisHash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block2Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{block1Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block3Hash, _, err := tc.AddBlock([]*externalapi.DomainHash{block2Hash}, coinbaseData, nil) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + block2, _, err := tc.GetBlock(block2Hash) + if err != nil { + t.Fatalf("Error getting block2: %+v", err) + } + + block3, _, err := tc.GetBlock(block3Hash) + if err != nil { + t.Fatalf("Error getting block3: %+v", err) + } + + block2Tx := block2.Transactions[0] + block2TxOut := block2Tx.Outputs[0] + + block3Tx := block3.Transactions[0] + block3TxOut := block3Tx.Outputs[0] + + tx := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block2.Transactions[0]), + Index: 0, + }, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry(block2TxOut.Value, block2TxOut.ScriptPublicKey, true, 0), + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(block3.Transactions[0]), + Index: 0, + }, + Sequence: constants.MaxTxInSequenceNum, + SigOpCount: 1, + UTXOEntry: utxo.NewUTXOEntry(block3TxOut.Value, block3TxOut.ScriptPublicKey, true, 0), + }, + }, + Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 1, + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + }}, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + LockTime: 0, + } + + sighashReusedValues := &consensushashing.SighashReusedValues{} + for i, input := range tx.Inputs { + signatureScript, err := txscript.SignatureScriptECDSA(tx, i, consensushashing.SigHashAll, privateKey, + sighashReusedValues) + if err != nil { + t.Fatalf("Failed to create a sigScript: %v", err) + } + input.SignatureScript = signatureScript + } + + _, virtualChangeSet, err := tc.AddBlock([]*externalapi.DomainHash{block3Hash}, nil, []*externalapi.DomainTransaction{tx}) + if err != nil { + t.Fatalf("AddBlock: %+v", err) + } + + txOutpoint := &externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(tx), + Index: 0, + } + if !virtualChangeSet.VirtualUTXODiff.ToAdd().Contains(txOutpoint) { + t.Fatalf("tx was not accepted by the DAG") + } + }) +} diff --git a/domain/consensus/pruning_test.go b/domain/consensus/pruning_test.go new file mode 100644 index 0000000..258a5f8 --- /dev/null +++ b/domain/consensus/pruning_test.go @@ -0,0 +1,27 @@ +package consensus_test + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func TestPruningDepth(t *testing.T) { + expectedResult := map[string]uint64{ + dagconfig.MainnetParams.Name: 185798, + dagconfig.TestnetParams.Name: 185798, + dagconfig.DevnetParams.Name: 185798, + dagconfig.SimnetParams.Name: 132998, + } + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + expected, found := expectedResult[consensusConfig.Name] + if !found { + t.Fatalf("TestPruningDepth: expectedResult doesn't contain '%s'", consensusConfig.Name) + } + if consensusConfig.PruningDepth() != expected { + t.Errorf("pruningDepth in %s is expected to be %d but got %d", consensusConfig.Name, expected, consensusConfig.PruningDepth()) + } + }) +} diff --git a/domain/consensus/ruleerrors/rule_error.go b/domain/consensus/ruleerrors/rule_error.go new file mode 100644 index 0000000..9fcca41 --- /dev/null +++ b/domain/consensus/ruleerrors/rule_error.go @@ -0,0 +1,340 @@ +package ruleerrors + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// These constants are used to identify a specific RuleError. +var ( + // ErrDuplicateBlock indicates a block with the same hash already + // exists. + ErrDuplicateBlock = newRuleError("ErrDuplicateBlock") + + // ErrBlockVersionTooOld indicates the block version is too old and is + // no longer accepted since the majority of the network has upgraded + // to a newer version. + ErrBlockVersionTooOld = newRuleError("ErrBlockVersionTooOld") + + // ErrTimeTooOld indicates the time is either before the median time of + // the last several blocks per the DAG consensus rules. + ErrTimeTooOld = newRuleError("ErrTimeTooOld") + + //ErrTimeTooMuchInTheFuture indicates that the block timestamp is too much in the future. + ErrTimeTooMuchInTheFuture = newRuleError("ErrTimeTooMuchInTheFuture") + + // ErrNoParents indicates that the block is missing parents + ErrNoParents = newRuleError("ErrNoParents") + + // ErrUnexpectedDifficulty indicates specified bits do not align with + // the expected value either because it doesn't match the calculated + // valued based on difficulty regarted rules. + ErrUnexpectedDifficulty = newRuleError("ErrUnexpectedDifficulty") + + // ErrUnexpectedDAAScore indicates specified DAA score does not align with + // the expected value. + ErrUnexpectedDAAScore = newRuleError("ErrUnexpectedDAAScore") + + // ErrUnexpectedBlueWork indicates specified blue work does not align with + // the expected value. + ErrUnexpectedBlueWork = newRuleError("ErrUnexpectedBlueWork") + + // ErrUnexpectedFinalityPoint indicates specified finality point does not align with + // the expected value. + ErrUnexpectedFinalityPoint = newRuleError("ErrUnexpectedFinalityPoint") + + // ErrUnexpectedCoinbaseBlueScore indicates specified blue score in the coinbase does not align with + // the expected value. + ErrUnexpectedCoinbaseBlueScore = newRuleError("ErrUnexpectedCoinbaseBlueScore") + + // ErrUnexpectedHeaderBlueScore indicates specified blue score in the header does not align with + // the expected value. + ErrUnexpectedHeaderBlueScore = newRuleError("ErrUnexpectedHeaderBlueScore") + + // ErrTargetTooHigh indicates specified bits do not align with + // the expected value either because it is above the valid + // range. + ErrTargetTooHigh = newRuleError("ErrTargetTooHigh") + + // ErrUnexpectedDifficulty indicates specified bits do not align with + // the expected value either because it is negative. + ErrNegativeTarget = newRuleError("ErrNegativeTarget") + + // ErrInvalidPoW indicates that the block proof-of-work is invalid. + ErrInvalidPoW = newRuleError("ErrInvalidPoW") + + // ErrBadMerkleRoot indicates the calculated merkle root does not match + // the expected value. + ErrBadMerkleRoot = newRuleError("ErrBadMerkleRoot") + + // ErrBadUTXOCommitment indicates the calculated UTXO commitment does not match + // the expected value. + ErrBadUTXOCommitment = newRuleError("ErrBadUTXOCommitment") + + // ErrFinalityPointTimeTooOld indicates a block has a timestamp before the + // last finality point. + ErrFinalityPointTimeTooOld = newRuleError("ErrFinalityPointTimeTooOld") + + // ErrNoTransactions indicates the block does not have a least one + // transaction. A valid block must have at least the coinbase + // transaction. + ErrNoTransactions = newRuleError("ErrNoTransactions") + + // ErrNoTxInputs indicates a transaction does not have any inputs. A + // valid transaction must have at least one input. + ErrNoTxInputs = newRuleError("ErrNoTxInputs") + + // ErrBadTxOutValue indicates an output value for a transaction is + // invalid in some way such as being out of range. + ErrBadTxOutValue = newRuleError("ErrBadTxOutValue") + ErrTxOutValueZero = newRuleError("ErrTxOutValueZero") + + // ErrDuplicateTxInputs indicates a transaction references the same + // input more than once. + ErrDuplicateTxInputs = newRuleError("ErrDuplicateTxInputs") + + // ErrDoubleSpendInSameBlock indicates a transaction + // that spends an output that was already spent by another + // transaction in the same block. + ErrDoubleSpendInSameBlock = newRuleError("ErrDoubleSpendInSameBlock") + + // ErrUnfinalizedTx indicates a transaction has not been finalized. + // A valid block may only contain finalized transactions. + ErrUnfinalizedTx = newRuleError("ErrUnfinalizedTx") + + // ErrDuplicateTx indicates a block contains an identical transaction + // (or at least two transactions which hash to the same value). A + // valid block may only contain unique transactions. + ErrDuplicateTx = newRuleError("ErrDuplicateTx") + + // ErrImmatureSpend indicates a transaction is attempting to spend a + // coinbase that has not yet reached the required maturity. + ErrImmatureSpend = newRuleError("ErrImmatureSpend") + + // ErrSpendTooHigh indicates a transaction is attempting to spend more + // value than the sum of all of its inputs. + ErrSpendTooHigh = newRuleError("ErrSpendTooHigh") + + // ErrFirstTxNotCoinbase indicates the first transaction in a block + // is not a coinbase transaction. + ErrFirstTxNotCoinbase = newRuleError("ErrFirstTxNotCoinbase") + + // ErrMultipleCoinbases indicates a block contains more than one + // coinbase transaction. + ErrMultipleCoinbases = newRuleError("ErrMultipleCoinbases") + + // ErrBadCoinbasePayloadLen indicates the length of the payload + // for a coinbase transaction is too high. + ErrBadCoinbasePayloadLen = newRuleError("ErrBadCoinbasePayloadLen") + + // ErrBadCoinbaseTransaction indicates that the block's coinbase transaction is not build as expected + ErrBadCoinbaseTransaction = newRuleError("ErrBadCoinbaseTransaction") + + // ErrScriptMalformed indicates a transaction script is malformed in + // some way. For example, it might be longer than the maximum allowed + // length or fail to parse. + ErrScriptMalformed = newRuleError("ErrScriptMalformed") + + // ErrScriptValidation indicates the result of executing transaction + // script failed. The error covers any failure when executing scripts + // such signature verification failures and execution past the end of + // the stack. + ErrScriptValidation = newRuleError("ErrScriptValidation") + + // ErrInvalidAncestorBlock indicates that an ancestor of this block has + // already failed validation. + ErrInvalidAncestorBlock = newRuleError("ErrInvalidAncestorBlock") + + // ErrTransactionsNotSorted indicates that transactions in block are not + // sorted by subnetwork + ErrTransactionsNotSorted = newRuleError("ErrTransactionsNotSorted") + + // ErrInvalidGas transaction wants to use more GAS than allowed + // by subnetwork + ErrInvalidGas = newRuleError("ErrInvalidGas") + + // ErrInvalidPayload transaction includes a payload in a subnetwork that doesn't allow + // a Payload + ErrInvalidPayload = newRuleError("ErrInvalidPayload") + + // ErrWrongSigOpCount transaction input specifies an incorrect SigOpCount + ErrWrongSigOpCount = newRuleError("ErrWrongSigOpCount") + + // ErrSubnetwork indicates that a block doesn't adhere to the subnetwork + // registry rules + ErrSubnetworkRegistry = newRuleError("ErrSubnetworkRegistry") + + // ErrInvalidParentsRelation indicates that one of the parents of a block + // is also an ancestor of another parent + ErrInvalidParentsRelation = newRuleError("ErrInvalidParentsRelation") + + // ErrTooManyParents indicates that a block points to more then `MaxNumParentBlocks` parents + ErrTooManyParents = newRuleError("ErrTooManyParents") + + // ErrViolatingBoundedMergeDepth indicates that a block is violating finality from + // its own point of view + ErrViolatingBoundedMergeDepth = newRuleError("ErrViolatingBoundedMergeDepth") + + // ErrViolatingMergeLimit indicates that a block merges more than mergeLimit blocks + ErrViolatingMergeLimit = newRuleError("ErrViolatingMergeLimit") + + // ErrChainedTransactions indicates that a block contains a transaction that spends an output of a transaction + // In the same block + ErrChainedTransactions = newRuleError("ErrChainedTransactions") + + // ErrBlockMassTooHigh indicates the mass of a block exceeds the maximum + // allowed limits. + ErrBlockMassTooHigh = newRuleError("ErrBlockMassTooHigh") + + ErrKnownInvalid = newRuleError("ErrKnownInvalid") + + ErrSubnetworksDisabled = newRuleError("ErrSubnetworksDisabled") + ErrBadPruningPointUTXOSet = newRuleError("ErrBadPruningPointUTXOSet") + + ErrMalformedUTXO = newRuleError("ErrMalformedUTXO") + + ErrWrongPruningPointHash = newRuleError("ErrWrongPruningPointHash") + + //ErrPruningPointViolation indicates that the pruning point isn't in the block past. + ErrPruningPointViolation = newRuleError("ErrPruningPointViolation") + + ErrUnexpectedParents = newRuleError("ErrUnexpectedParents") + + ErrUnexpectedPruningPoint = newRuleError("ErrUnexpectedPruningPoint") + + ErrInvalidPruningPointsChain = newRuleError("ErrInvalidPruningPointsChain") + + ErrSuggestedPruningViolatesFinality = newRuleError("ErrSuggestedPruningViolatesFinality") + + //ErrBlockVersionIsUnknown indicates that the block version is unknown. + ErrBlockVersionIsUnknown = newRuleError("ErrBlockVersionIsUnknown") + + //ErrTransactionVersionIsUnknown indicates that the transaction version is unknown. + ErrTransactionVersionIsUnknown = newRuleError("ErrTransactionVersionIsUnknown") + + // ErrPrunedBlock indicates that the block currently being validated had already been pruned. + ErrPrunedBlock = newRuleError("ErrPrunedBlock") + + ErrGetVirtualUTXOsWrongVirtualParents = newRuleError("ErrGetVirtualUTXOsWrongVirtualParents") + + ErrVirtualGenesisParent = newRuleError("ErrVirtualGenesisParent") + + ErrGenesisOnInitializedConsensus = newRuleError("ErrGenesisOnInitializedConsensus") + + ErrPruningPointSelectedChildDisqualifiedFromChain = newRuleError("ErrPruningPointSelectedChildDisqualifiedFromChain") + + // ErrUnexpectedFinalityPoint indicates a block header pruning point does not align with + // the expected value. + ErrUnexpectedHeaderPruningPoint = newRuleError("ErrUnexpectedHeaderPruningPoint") + + ErrPruningProofHeaderWithNoKnownParents = newRuleError("ErrPruningProofHeaderWithNoKnownParents") + ErrPruningProofMissingBlockLevels = newRuleError("ErrPruningProofMissingBlockLevels") + ErrPruningProofWrongBlockLevel = newRuleError("ErrPruningProofWrongBlockLevel") + ErrPruningProofSelectedTipNotParentOfPruningPoint = newRuleError("ErrPruningProofSelectedTipNotParentOfPruningPoint") + ErrPruningProofSelectedTipIsNotThePruningPoint = newRuleError("ErrPruningProofSelectedTipIsNotThePruningPoint") + ErrPruningProofInsufficientBlueWork = newRuleError("ErrPruningProofInsufficientBlueWork") + ErrPruningProofMissingBlockAtDepthMFromNextLevel = newRuleError("ErrPruningProofMissingBlockAtDepthMFromNextLevel") + ErrPruningProofMissesBlocksBelowPruningPoint = newRuleError("ErrPruningProofMissesBlocksBelowPruningPoint") + ErrPruningProofEmpty = newRuleError("ErrPruningProofEmpty") + ErrWrongCoinbaseSubsidy = newRuleError("ErrWrongCoinbaseSubsidy") + ErrWrongBlockVersion = newRuleError("ErrWrongBlockVersion") + ErrCoinbaseWithInputs = newRuleError("ErrCoinbaseWithInputs") + ErrCoinbaseTooManyOutputs = newRuleError("ErrCoinbaseTooManyOutputs") + ErrCoinbaseTooLongScriptPublicKey = newRuleError("ErrCoinbaseTooLongScriptPublicKey") +) + +// RuleError identifies a rule violation. It is used to indicate that +// processing of a block or transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation. +type RuleError struct { + message string + inner error +} + +// Error satisfies the error interface and prints human-readable errors. +func (e RuleError) Error() string { + if e.inner != nil { + return e.message + ": " + e.inner.Error() + } + return e.message +} + +// Unwrap satisfies the errors.Unwrap interface +func (e RuleError) Unwrap() error { + return e.inner +} + +// Cause satisfies the github.com/pkg/errors.Cause interface +func (e RuleError) Cause() error { + return e.inner +} + +func newRuleError(message string) RuleError { + return RuleError{message: message, inner: nil} +} + +// ErrMissingTxOut indicates a transaction output referenced by an input +// either does not exist or has already been spent. +type ErrMissingTxOut struct { + MissingOutpoints []*externalapi.DomainOutpoint +} + +func (e ErrMissingTxOut) Error() string { + return fmt.Sprintf("missing the following outpoint: %v", e.MissingOutpoints) +} + +// NewErrMissingTxOut Creates a new ErrMissingTxOut error wrapped in a RuleError +func NewErrMissingTxOut(missingOutpoints []*externalapi.DomainOutpoint) error { + return errors.WithStack(RuleError{ + message: "ErrMissingTxOut", + inner: ErrMissingTxOut{missingOutpoints}, + }) +} + +// ErrMissingParents indicates a block points to unknown parent(s). +type ErrMissingParents struct { + MissingParentHashes []*externalapi.DomainHash +} + +func (e ErrMissingParents) Error() string { + return fmt.Sprintf("missing the following parent hashes: %v", e.MissingParentHashes) +} + +// NewErrMissingParents creates a new ErrMissingParents error wrapped in a RuleError +func NewErrMissingParents(missingParentHashes []*externalapi.DomainHash) error { + return errors.WithStack(RuleError{ + message: "ErrMissingParents", + inner: ErrMissingParents{missingParentHashes}, + }) +} + +// InvalidTransaction is a struct containing an invalid transaction, and the error explaining why it's invalid. +type InvalidTransaction struct { + Transaction *externalapi.DomainTransaction + Error *RuleError +} + +func (invalid InvalidTransaction) String() string { + return fmt.Sprintf("(%v: %s)", consensushashing.TransactionID(invalid.Transaction), invalid.Error) +} + +// ErrInvalidTransactionsInNewBlock indicates that some transactions in a new block are invalid +type ErrInvalidTransactionsInNewBlock struct { + InvalidTransactions []InvalidTransaction +} + +func (e ErrInvalidTransactionsInNewBlock) Error() string { + return fmt.Sprint(e.InvalidTransactions) +} + +// NewErrInvalidTransactionsInNewBlock Creates a new ErrInvalidTransactionsInNewBlock error wrapped in a RuleError +func NewErrInvalidTransactionsInNewBlock(invalidTransactions []InvalidTransaction) error { + return errors.WithStack(RuleError{ + message: "ErrInvalidTransactionsInNewBlock", + inner: ErrInvalidTransactionsInNewBlock{invalidTransactions}, + }) +} diff --git a/domain/consensus/ruleerrors/rule_error_test.go b/domain/consensus/ruleerrors/rule_error_test.go new file mode 100644 index 0000000..a74c866 --- /dev/null +++ b/domain/consensus/ruleerrors/rule_error_test.go @@ -0,0 +1,85 @@ +package ruleerrors + +import ( + "errors" + "fmt" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestNewErrMissingTxOut(t *testing.T) { + outer := NewErrMissingTxOut( + []*externalapi.DomainOutpoint{ + { + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{255, 255, 255}), + Index: 5, + }, + }) + expectedOuterErr := "ErrMissingTxOut: missing the following outpoint: [(ffffff0000000000000000000000000000000000000000000000000000000000: 5)]" + inner := &ErrMissingTxOut{} + if !errors.As(outer, inner) { + t.Fatal("TestWrapInRuleError: Outer should contain ErrMissingTxOut in it") + } + + if len(inner.MissingOutpoints) != 1 { + t.Fatalf("TestWrapInRuleError: Expected len(inner.MissingOutpoints) 1, found: %d", len(inner.MissingOutpoints)) + } + if inner.MissingOutpoints[0].Index != 5 { + t.Fatalf("TestWrapInRuleError: Expected 5. found: %d", inner.MissingOutpoints[0].Index) + } + + rule := &RuleError{} + if !errors.As(outer, rule) { + t.Fatal("TestWrapInRuleError: Outer should contain RuleError in it") + } + if rule.message != "ErrMissingTxOut" { + t.Fatalf("TestWrapInRuleError: Expected message = 'ErrMissingTxOut', found: '%s'", rule.message) + } + if errors.Is(rule.inner, inner) { + t.Fatal("TestWrapInRuleError: rule.inner should contain the ErrMissingTxOut in it") + } + + if outer.Error() != expectedOuterErr { + t.Fatalf("TestWrapInRuleError: Expected %s. found: %s", expectedOuterErr, outer.Error()) + } +} + +func TestNewErrInvalidTransactionsInNewBlock(t *testing.T) { + tx := &externalapi.DomainTransaction{Fee: 1337} + txID := consensushashing.TransactionID(tx) + outer := NewErrInvalidTransactionsInNewBlock([]InvalidTransaction{{tx, &ErrNoTxInputs}}) + //TODO: Implement Stringer for `DomainTransaction` + expectedOuterErr := fmt.Sprintf("ErrInvalidTransactionsInNewBlock: [(%s: ErrNoTxInputs)]", txID) + inner := &ErrInvalidTransactionsInNewBlock{} + if !errors.As(outer, inner) { + t.Fatal("TestNewErrInvalidTransactionsInNewBlock: Outer should contain ErrInvalidTransactionsInNewBlock in it") + } + + if len(inner.InvalidTransactions) != 1 { + t.Fatalf("TestNewErrInvalidTransactionsInNewBlock: Expected len(inner.MissingOutpoints) 1, found: %d", len(inner.InvalidTransactions)) + } + if *inner.InvalidTransactions[0].Error != ErrNoTxInputs { + t.Fatalf("TestNewErrInvalidTransactionsInNewBlock: Expected ErrNoTxInputs. found: %v", inner.InvalidTransactions[0].Error) + } + if inner.InvalidTransactions[0].Transaction.Fee != 1337 { + t.Fatalf("TestNewErrInvalidTransactionsInNewBlock: Expected 1337. found: %v", inner.InvalidTransactions[0].Transaction.Fee) + } + + rule := &RuleError{} + if !errors.As(outer, rule) { + t.Fatal("TestNewErrInvalidTransactionsInNewBlock: Outer should contain RuleError in it") + } + if rule.message != "ErrInvalidTransactionsInNewBlock" { + t.Fatalf("TestNewErrInvalidTransactionsInNewBlock: Expected message = 'ErrInvalidTransactionsInNewBlock', found: '%s'", rule.message) + } + if errors.Is(rule.inner, inner) { + t.Fatal("TestNewErrInvalidTransactionsInNewBlock: rule.inner should contain the ErrInvalidTransactionsInNewBlock in it") + } + + if outer.Error() != expectedOuterErr { + t.Fatalf("TestNewErrInvalidTransactionsInNewBlock: Expected %s. found: %s", expectedOuterErr, outer.Error()) + } +} diff --git a/domain/consensus/test_consensus.go b/domain/consensus/test_consensus.go new file mode 100644 index 0000000..ea35ddc --- /dev/null +++ b/domain/consensus/test_consensus.go @@ -0,0 +1,293 @@ +package consensus + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/utils/hashset" + "github.com/spectre-project/spectred/util/staging" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +type testConsensus struct { + *consensus + dagParams *dagconfig.Params + database database.Database + + testBlockBuilder testapi.TestBlockBuilder + testReachabilityManager testapi.TestReachabilityManager + testConsensusStateManager testapi.TestConsensusStateManager + testTransactionValidator testapi.TestTransactionValidator + + buildBlockConsensus *consensus +} + +func (tc *testConsensus) DAGParams() *dagconfig.Params { + return tc.dagParams +} + +func (tc *testConsensus) BuildBlockWithParents(parentHashes []*externalapi.DomainHash, + coinbaseData *externalapi.DomainCoinbaseData, transactions []*externalapi.DomainTransaction) ( + *externalapi.DomainBlock, externalapi.UTXODiff, error) { + + // Require write lock because BuildBlockWithParents stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + block, diff, err := tc.testBlockBuilder.BuildBlockWithParents(parentHashes, coinbaseData, transactions) + if err != nil { + return nil, nil, err + } + + return block, diff, nil +} + +func (tc *testConsensus) AddBlock(parentHashes []*externalapi.DomainHash, coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) { + + // Require write lock because BuildBlockWithParents stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + block, _, err := tc.testBlockBuilder.BuildBlockWithParents(parentHashes, coinbaseData, transactions) + if err != nil { + return nil, nil, err + } + + virtualChangeSet, err := tc.validateAndInsertBlockNoLock(block, true) + if err != nil { + return nil, nil, err + } + + return consensushashing.BlockHash(block), virtualChangeSet, nil +} + +func (tc *testConsensus) AddBlockOnTips(coinbaseData *externalapi.DomainCoinbaseData, + transactions []*externalapi.DomainTransaction) (*externalapi.DomainHash, *externalapi.VirtualChangeSet, error) { + + tips, err := tc.Tips() + if err != nil { + return nil, nil, err + } + + return tc.AddBlock(tips, coinbaseData, transactions) +} + +func (tc *testConsensus) AddUTXOInvalidHeader(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, + *externalapi.VirtualChangeSet, error) { + + // Require write lock because BuildBlockWithParents stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + header, err := tc.testBlockBuilder.BuildUTXOInvalidHeader(parentHashes) + if err != nil { + return nil, nil, err + } + + virtualChangeSet, err := tc.validateAndInsertBlockNoLock(&externalapi.DomainBlock{ + Header: header, + Transactions: nil, + }, true) + if err != nil { + return nil, nil, err + } + + return consensushashing.HeaderHash(header), virtualChangeSet, nil +} + +func (tc *testConsensus) AddUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainHash, + *externalapi.VirtualChangeSet, error) { + + // Require write lock because BuildBlockWithParents stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + block, err := tc.testBlockBuilder.BuildUTXOInvalidBlock(parentHashes) + if err != nil { + return nil, nil, err + } + + virtualChangeSet, err := tc.validateAndInsertBlockNoLock(block, true) + if err != nil { + return nil, nil, err + } + + return consensushashing.BlockHash(block), virtualChangeSet, nil +} + +func (tc *testConsensus) ResolveVirtualWithMaxParam(maxBlocksToResolve uint64) (*externalapi.VirtualChangeSet, bool, error) { + tc.lock.Lock() + defer tc.lock.Unlock() + + return tc.resolveVirtualChunkNoLock(maxBlocksToResolve) +} + +// jsonBlock is a json representation of a block in mine format +type jsonBlock struct { + ID string `json:"id"` + Parents []string `json:"parents"` +} + +func (tc *testConsensus) MineJSON(r io.Reader, blockType testapi.MineJSONBlockType) (tips []*externalapi.DomainHash, err error) { + tipSet := map[externalapi.DomainHash]*externalapi.DomainHash{} + tipSet[*tc.dagParams.GenesisHash] = tc.dagParams.GenesisHash + + parentsMap := make(map[string]*externalapi.DomainHash) + parentsMap["0"] = tc.dagParams.GenesisHash + + decoder := json.NewDecoder(r) + // read open bracket + _, err = decoder.Token() + if err != nil { + return nil, err + } + // while the array contains values + for decoder.More() { + var block jsonBlock + // decode an array value (Message) + err := decoder.Decode(&block) + if err != nil { + return nil, err + } + if block.ID == "0" { + continue + } + parentHashes := make([]*externalapi.DomainHash, len(block.Parents)) + var ok bool + for i, parentID := range block.Parents { + parentHashes[i], ok = parentsMap[parentID] + if !ok { + return nil, errors.Errorf("Couldn't find blockID: %s", parentID) + } + delete(tipSet, *parentHashes[i]) + } + + var blockHash *externalapi.DomainHash + switch blockType { + case testapi.MineJSONBlockTypeUTXOValidBlock: + blockHash, _, err = tc.AddBlock(parentHashes, nil, nil) + if err != nil { + return nil, err + } + case testapi.MineJSONBlockTypeUTXOInvalidBlock: + blockHash, _, err = tc.AddUTXOInvalidBlock(parentHashes) + if err != nil { + return nil, err + } + case testapi.MineJSONBlockTypeUTXOInvalidHeader: + blockHash, _, err = tc.AddUTXOInvalidHeader(parentHashes) + if err != nil { + return nil, err + } + default: + return nil, errors.Errorf("unknwon block type %v", blockType) + } + + parentsMap[block.ID] = blockHash + tipSet[*blockHash] = blockHash + } + + tips = make([]*externalapi.DomainHash, len(tipSet)) + i := 0 + for _, v := range tipSet { + tips[i] = v + i++ + } + return tips, nil +} + +func (tc *testConsensus) ToJSON(w io.Writer) error { + hashToID := make(map[externalapi.DomainHash]string) + lastID := 0 + + encoder := json.NewEncoder(w) + visited := hashset.New() + queue := tc.dagTraversalManager.NewUpHeap(model.NewStagingArea()) + err := queue.Push(tc.dagParams.GenesisHash) + if err != nil { + return err + } + + blocksToAdd := make([]jsonBlock, 0) + for queue.Len() > 0 { + current := queue.Pop() + if visited.Contains(current) { + continue + } + + visited.Add(current) + + if current.Equal(model.VirtualBlockHash) { + continue + } + + header, err := tc.blockHeaderStore.BlockHeader(tc.databaseContext, model.NewStagingArea(), current) + if err != nil { + return err + } + + directParents := header.DirectParents() + + parentIDs := make([]string, len(directParents)) + for i, parent := range directParents { + parentIDs[i] = hashToID[*parent] + } + lastIDStr := fmt.Sprintf("%d", lastID) + blocksToAdd = append(blocksToAdd, jsonBlock{ + ID: lastIDStr, + Parents: parentIDs, + }) + hashToID[*current] = lastIDStr + lastID++ + + children, err := tc.dagTopologyManagers[0].Children(model.NewStagingArea(), current) + if err != nil { + return err + } + + err = queue.PushSlice(children) + if err != nil { + return err + } + } + + return encoder.Encode(blocksToAdd) +} + +func (tc *testConsensus) BuildUTXOInvalidBlock(parentHashes []*externalapi.DomainHash) (*externalapi.DomainBlock, error) { + // Require write lock because BuildBlockWithParents stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + return tc.testBlockBuilder.BuildUTXOInvalidBlock(parentHashes) +} + +func (tc *testConsensus) BuildHeaderWithParents(parentHashes []*externalapi.DomainHash) (externalapi.BlockHeader, error) { + // Require write lock because BuildUTXOInvalidHeader stages temporary data + tc.lock.Lock() + defer tc.lock.Unlock() + + return tc.testBlockBuilder.BuildUTXOInvalidHeader(parentHashes) +} + +func (tc *testConsensus) UpdatePruningPointByVirtual() error { + tc.lock.Lock() + defer tc.lock.Unlock() + + stagingArea := model.NewStagingArea() + err := tc.pruningManager.UpdatePruningPointByVirtual(stagingArea) + if err != nil { + return err + } + + return staging.CommitAllChanges(tc.databaseContext, stagingArea) +} diff --git a/domain/consensus/test_consensus_getters.go b/domain/consensus/test_consensus_getters.go new file mode 100644 index 0000000..cba0b58 --- /dev/null +++ b/domain/consensus/test_consensus_getters.go @@ -0,0 +1,152 @@ +package consensus + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func (tc *testConsensus) DatabaseContext() model.DBManager { + return tc.databaseContext +} + +func (tc *testConsensus) Database() database.Database { + return tc.database +} + +func (tc *testConsensus) AcceptanceDataStore() model.AcceptanceDataStore { + return tc.acceptanceDataStore +} + +func (tc *testConsensus) BlockHeaderStore() model.BlockHeaderStore { + return tc.blockHeaderStore +} + +func (tc *testConsensus) BlockRelationStore() model.BlockRelationStore { + return tc.blockRelationStores[0] +} + +func (tc *testConsensus) BlockStatusStore() model.BlockStatusStore { + return tc.blockStatusStore +} + +func (tc *testConsensus) BlockStore() model.BlockStore { + return tc.blockStore +} + +func (tc *testConsensus) ConsensusStateStore() model.ConsensusStateStore { + return tc.consensusStateStore +} + +func (tc *testConsensus) GHOSTDAGDataStore() model.GHOSTDAGDataStore { + return tc.ghostdagDataStores[0] +} + +func (tc *testConsensus) GHOSTDAGDataStores() []model.GHOSTDAGDataStore { + return tc.ghostdagDataStores +} + +func (tc *testConsensus) HeaderTipsStore() model.HeaderSelectedTipStore { + return tc.headersSelectedTipStore +} + +func (tc *testConsensus) MultisetStore() model.MultisetStore { + return tc.multisetStore +} + +func (tc *testConsensus) PruningStore() model.PruningStore { + return tc.pruningStore +} + +func (tc *testConsensus) ReachabilityDataStore() model.ReachabilityDataStore { + return tc.reachabilityDataStore +} + +func (tc *testConsensus) UTXODiffStore() model.UTXODiffStore { + return tc.utxoDiffStore +} + +func (tc *testConsensus) BlockBuilder() testapi.TestBlockBuilder { + return tc.testBlockBuilder +} + +func (tc *testConsensus) BlockProcessor() model.BlockProcessor { + return tc.blockProcessor +} + +func (tc *testConsensus) BlockValidator() model.BlockValidator { + return tc.blockValidator +} + +func (tc *testConsensus) CoinbaseManager() model.CoinbaseManager { + return tc.coinbaseManager +} + +func (tc *testConsensus) ConsensusStateManager() testapi.TestConsensusStateManager { + return tc.testConsensusStateManager +} + +func (tc *testConsensus) DAGTopologyManager() model.DAGTopologyManager { + return tc.dagTopologyManagers[0] +} + +func (tc *testConsensus) DAGTraversalManager() model.DAGTraversalManager { + return tc.dagTraversalManager +} + +func (tc *testConsensus) DifficultyManager() model.DifficultyManager { + return tc.difficultyManager +} + +func (tc *testConsensus) GHOSTDAGManager() model.GHOSTDAGManager { + return tc.ghostdagManagers[0] +} + +func (tc *testConsensus) HeaderTipsManager() model.HeadersSelectedTipManager { + return tc.headerTipsManager +} + +func (tc *testConsensus) MergeDepthManager() model.MergeDepthManager { + return tc.mergeDepthManager +} + +func (tc *testConsensus) PastMedianTimeManager() model.PastMedianTimeManager { + return tc.pastMedianTimeManager +} + +func (tc *testConsensus) PruningManager() model.PruningManager { + return tc.pruningManager +} + +func (tc *testConsensus) ReachabilityManager() testapi.TestReachabilityManager { + return tc.testReachabilityManager +} + +func (tc *testConsensus) SyncManager() model.SyncManager { + return tc.syncManager +} + +func (tc *testConsensus) TransactionValidator() testapi.TestTransactionValidator { + return tc.testTransactionValidator +} + +func (tc *testConsensus) FinalityManager() model.FinalityManager { + return tc.finalityManager +} + +func (tc *testConsensus) FinalityStore() model.FinalityStore { + return tc.finalityStore +} + +func (tc *testConsensus) HeadersSelectedChainStore() model.HeadersSelectedChainStore { + return tc.headersSelectedChainStore +} + +func (tc *testConsensus) DAABlocksStore() model.DAABlocksStore { + return tc.daaBlocksStore +} + +func (tc *testConsensus) Consensus() externalapi.Consensus { + return tc +} diff --git a/domain/consensus/test_consensus_render_to_dot.go b/domain/consensus/test_consensus_render_to_dot.go new file mode 100644 index 0000000..d13bb29 --- /dev/null +++ b/domain/consensus/test_consensus_render_to_dot.go @@ -0,0 +1,83 @@ +package consensus + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os/exec" + "strings" + + "github.com/spectre-project/spectred/domain/consensus/model" +) + +// RenderDAGToDot is a helper function for debugging tests. +// It requires graphviz installed. +func (tc *testConsensus) RenderDAGToDot(filename string) error { + dotScript, _ := tc.convertToDot() + return renderDotScript(dotScript, filename) +} + +func (tc *testConsensus) convertToDot() (string, error) { + var dotScriptBuilder strings.Builder + dotScriptBuilder.WriteString("digraph {\n\trankdir = TB; \n") + + edges := []string{} + + blocksIterator, err := tc.blockStore.AllBlockHashesIterator(tc.databaseContext) + if err != nil { + return "", err + } + defer blocksIterator.Close() + + stagingArea := model.NewStagingArea() + for ok := blocksIterator.First(); ok; ok = blocksIterator.Next() { + hash, err := blocksIterator.Get() + if err != nil { + return "", err + } + dotScriptBuilder.WriteString(fmt.Sprintf("\t\"%s\";\n", hash)) + + parents, err := tc.dagTopologyManagers[0].Parents(stagingArea, hash) + if err != nil { + return "", err + } + + for _, parentHash := range parents { + edges = append(edges, fmt.Sprintf("\t\"%s\" -> \"%s\";", hash, parentHash)) + } + } + + dotScriptBuilder.WriteString("\n") + + dotScriptBuilder.WriteString(strings.Join(edges, "\n")) + + dotScriptBuilder.WriteString("\n}") + + return dotScriptBuilder.String(), nil +} + +func renderDotScript(dotScript string, filename string) error { + command := exec.Command("dot", "-Tsvg") + stdin, err := command.StdinPipe() + if err != nil { + return fmt.Errorf("Error creating stdin pipe: %s", err) + } + spawn("renderDotScript", func() { + defer stdin.Close() + + _, err = io.WriteString(stdin, dotScript) + if err != nil { + panic(fmt.Errorf("Error writing dotScript into stdin pipe: %s", err)) + } + }) + + var stderr bytes.Buffer + command.Stderr = &stderr + svg, err := command.Output() + if err != nil { + return fmt.Errorf("Error getting output of dot: %s\nstderr:\n%s", err, stderr.String()) + } + + return ioutil.WriteFile(filename, svg, 0600) +} diff --git a/domain/consensus/testdata/dags/dag0.json b/domain/consensus/testdata/dags/dag0.json new file mode 100644 index 0000000..8c36f23 --- /dev/null +++ b/domain/consensus/testdata/dags/dag0.json @@ -0,0 +1,252 @@ +{ + "K": 4, + "GenesisID": "A", + "ExpectedReds": [ + "Q", + "H", + "I" + ], + "Blocks": [ + { + "ID": "B", + "ExpectedScore": 1, + "ExpectedSelectedParent": "A", + "ExpectedReds": [], + "ExpectedBlues": [ + "A" + ], + "Parents": [ + "A" + ] + }, + { + "ID": "C", + "ExpectedScore": 2, + "ExpectedSelectedParent": "B", + "ExpectedReds": [], + "ExpectedBlues": [ + "B" + ], + "Parents": [ + "B" + ] + }, + { + "ID": "D", + "ExpectedScore": 1, + "ExpectedSelectedParent": "A", + "ExpectedReds": [], + "ExpectedBlues": [ + "A" + ], + "Parents": [ + "A" + ] + }, + { + "ID": "E", + "ExpectedScore": 4, + "ExpectedSelectedParent": "C", + "ExpectedReds": [], + "ExpectedBlues": [ + "C", + "D" + ], + "Parents": [ + "C", + "D" + ] + }, + { + "ID": "F", + "ExpectedScore": 1, + "ExpectedSelectedParent": "A", + "ExpectedReds": [], + "ExpectedBlues": [ + "A" + ], + "Parents": [ + "A" + ] + }, + { + "ID": "G", + "ExpectedScore": 2, + "ExpectedSelectedParent": "F", + "ExpectedReds": [], + "ExpectedBlues": [ + "F" + ], + "Parents": [ + "F" + ] + }, + { + "ID": "H", + "ExpectedScore": 1, + "ExpectedSelectedParent": "A", + "ExpectedReds": [], + "ExpectedBlues": [ + "A" + ], + "Parents": [ + "A" + ] + }, + { + "ID": "I", + "ExpectedScore": 1, + "ExpectedSelectedParent": "A", + "ExpectedReds": [], + "ExpectedBlues": [ + "A" + ], + "Parents": [ + "A" + ] + }, + { + "ID": "J", + "ExpectedScore": 7, + "ExpectedSelectedParent": "E", + "ExpectedReds": [], + "ExpectedBlues": [ + "E", + "F", + "G" + ], + "Parents": [ + "E", + "G" + ] + }, + { + "ID": "K", + "ExpectedScore": 8, + "ExpectedSelectedParent": "J", + "ExpectedReds": [], + "ExpectedBlues": [ + "J" + ], + "Parents": [ + "J" + ] + }, + { + "ID": "L", + "ExpectedScore": 9, + "ExpectedSelectedParent": "K", + "ExpectedReds": ["I"], + "ExpectedBlues": [ + "K" + ], + "Parents": [ + "I", + "K" + ] + }, + { + "ID": "M", + "ExpectedScore": 10, + "ExpectedSelectedParent": "L", + "ExpectedReds": [], + "ExpectedBlues": [ + "L" + ], + "Parents": [ + "L" + ] + }, + { + "ID": "N", + "ExpectedScore": 11, + "ExpectedSelectedParent": "M", + "ExpectedReds": [], + "ExpectedBlues": [ + "M" + ], + "Parents": [ + "M" + ] + }, + { + "ID": "O", + "ExpectedScore": 11, + "ExpectedSelectedParent": "M", + "ExpectedReds": [], + "ExpectedBlues": [ + "M" + ], + "Parents": [ + "M" + ] + }, + { + "ID": "2", + "ExpectedScore": 11, + "ExpectedSelectedParent": "M", + "ExpectedReds": [], + "ExpectedBlues": [ + "M" + ], + "Parents": [ + "M" + ] + }, + { + "ID": "Q", + "ExpectedScore": 11, + "ExpectedSelectedParent": "M", + "ExpectedReds": [], + "ExpectedBlues": [ + "M" + ], + "Parents": [ + "M" + ] + }, + { + "ID": "R", + "ExpectedScore": 11, + "ExpectedSelectedParent": "M", + "ExpectedReds": [], + "ExpectedBlues": [ + "M" + ], + "Parents": [ + "M" + ] + }, + { + "ID": "S", + "ExpectedScore": 12, + "ExpectedSelectedParent": "R", + "ExpectedReds": [], + "ExpectedBlues": [ + "R" + ], + "Parents": [ + "R" + ] + }, + { + "ID": "T", + "ExpectedScore": 16, + "ExpectedSelectedParent": "S", + "ExpectedReds": ["Q"], + "ExpectedBlues": [ + "S", + "2", + "N", + "O" + ], + "Parents": [ + "N", + "O", + "2", + "Q", + "S" + ] + } + ] +} diff --git a/domain/consensus/testdata/dags/dag1.json b/domain/consensus/testdata/dags/dag1.json new file mode 100644 index 0000000..58db801 --- /dev/null +++ b/domain/consensus/testdata/dags/dag1.json @@ -0,0 +1,436 @@ +{ + "K": 4, + "GenesisID": "0", + "Blocks": [ + { + "ID": "a1", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "2", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "3", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "4", + "ExpectedScore": 2, + "ExpectedSelectedParent": "a1", + "ExpectedReds": [], + "ExpectedBlues": [ + "a1" + ], + "Parents": [ + "a1" + ] + }, + { + "ID": "5", + "ExpectedScore": 3, + "ExpectedSelectedParent": "3", + "ExpectedReds": [], + "ExpectedBlues": [ + "3", + "2" + ], + "Parents": [ + "2", + "3" + ] + }, + { + "ID": "6", + "ExpectedScore": 2, + "ExpectedSelectedParent": "3", + "ExpectedReds": [], + "ExpectedBlues": [ + "3" + ], + "Parents": [ + "3" + ] + }, + { + "ID": "7", + "ExpectedScore": 3, + "ExpectedSelectedParent": "6", + "ExpectedReds": [], + "ExpectedBlues": [ + "6" + ], + "Parents": [ + "6" + ] + }, + { + "ID": "8", + "ExpectedScore": 3, + "ExpectedSelectedParent": "a1", + "ExpectedReds": [], + "ExpectedBlues": [ + "a1", + "2" + ], + "Parents": [ + "a1", + "2" + ] + }, + { + "ID": "9", + "ExpectedScore": 5, + "ExpectedSelectedParent": "5", + "ExpectedReds": [], + "ExpectedBlues": [ + "5", + "6" + ], + "Parents": [ + "5", + "6" + ] + }, + { + "ID": "a10", + "ExpectedScore": 5, + "ExpectedSelectedParent": "8", + "ExpectedReds": [], + "ExpectedBlues": [ + "8", + "4" + ], + "Parents": [ + "8", + "4" + ] + }, + { + "ID": "11", + "ExpectedScore": 7, + "ExpectedSelectedParent": "9", + "ExpectedReds": [], + "ExpectedBlues": [ + "9", + "7" + ], + "Parents": [ + "7", + "9" + ] + }, + { + "ID": "12", + "ExpectedScore": 8, + "ExpectedSelectedParent": "a10", + "ExpectedReds": [ + "3", + "6" + ], + "ExpectedBlues": [ + "a10", + "5", + "9" + ], + "Parents": [ + "a10", + "9" + ] + }, + { + "ID": "13", + "ExpectedScore": 6, + "ExpectedSelectedParent": "8", + "ExpectedReds": [], + "ExpectedBlues": [ + "8", + "3", + "5" + ], + "Parents": [ + "5", + "8" + ] + }, + { + "ID": "14", + "ExpectedScore": 8, + "ExpectedSelectedParent": "13", + "ExpectedReds": [ + "4" + ], + "ExpectedBlues": [ + "13", + "a10" + ], + "Parents": [ + "13", + "a10" + ] + }, + { + "ID": "15", + "ExpectedScore": 9, + "ExpectedSelectedParent": "11", + "ExpectedReds": [ + "a1", + "8" + ], + "ExpectedBlues": [ + "11", + "13" + ], + "Parents": [ + "11", + "13" + ] + }, + { + "ID": "16", + "ExpectedScore": 8, + "ExpectedSelectedParent": "11", + "ExpectedReds": [], + "ExpectedBlues": [ + "11" + ], + "Parents": [ + "11" + ] + }, + { + "ID": "17", + "ExpectedScore": 9, + "ExpectedSelectedParent": "14", + "ExpectedReds": [], + "ExpectedBlues": [ + "14" + ], + "Parents": [ + "14" + ] + }, + { + "ID": "18", + "ExpectedScore": 7, + "ExpectedSelectedParent": "13", + "ExpectedReds": [], + "ExpectedBlues": [ + "13" + ], + "Parents": [ + "13" + ] + }, + { + "ID": "19", + "ExpectedScore": 10, + "ExpectedSelectedParent": "15", + "ExpectedReds": [ + "18" + ], + "ExpectedBlues": [ + "15" + ], + "Parents": [ + "18", + "15" + ] + }, + { + "ID": "20", + "ExpectedScore": 10, + "ExpectedSelectedParent": "17", + "ExpectedReds": [ + "6", + "7", + "9", + "11", + "16" + ], + "ExpectedBlues": [ + "17" + ], + "Parents": [ + "16", + "17" + ] + }, + { + "ID": "21", + "ExpectedScore": 12, + "ExpectedSelectedParent": "20", + "ExpectedReds": [], + "ExpectedBlues": [ + "20", + "18" + ], + "Parents": [ + "18", + "20" + ] + }, + { + "ID": "22", + "ExpectedScore": 13, + "ExpectedSelectedParent": "21", + "ExpectedReds": [ + "15", + "19" + ], + "ExpectedBlues": [ + "21" + ], + "Parents": [ + "19", + "21" + ] + }, + { + "ID": "23", + "ExpectedScore": 11, + "ExpectedSelectedParent": "17", + "ExpectedReds": [ + "6", + "9" + ], + "ExpectedBlues": [ + "17", + "12" + ], + "Parents": [ + "12", + "17" + ] + }, + { + "ID": "24", + "ExpectedScore": 13, + "ExpectedSelectedParent": "23", + "ExpectedReds": [ + "7", + "11", + "16" + ], + "ExpectedBlues": [ + "23", + "20" + ], + "Parents": [ + "20", + "23" + ] + }, + { + "ID": "25555", + "ExpectedScore": 13, + "ExpectedSelectedParent": "21", + "ExpectedReds": [], + "ExpectedBlues": [ + "21" + ], + "Parents": [ + "21" + ] + }, + { + "ID": "26", + "ExpectedScore": 15, + "ExpectedSelectedParent": "25555", + "ExpectedReds": [ + "12", + "15", + "19", + "23", + "24" + ], + "ExpectedBlues": [ + "25555", + "22" + ], + "Parents": [ + "22", + "24", + "25555" + ] + }, + { + "ID": "27", + "ExpectedScore": 9, + "ExpectedSelectedParent": "16", + "ExpectedReds": [], + "ExpectedBlues": [ + "16" + ], + "Parents": [ + "16" + ] + }, + { + "ID": "28", + "ExpectedScore": 14, + "ExpectedSelectedParent": "25555", + "ExpectedReds": [ + "12", + "23" + ], + "ExpectedBlues": [ + "25555" + ], + "Parents": [ + "23", + "25555" + ] + }, + { + "ID": "29", + "ExpectedScore": 17, + "ExpectedSelectedParent": "26", + "ExpectedReds": [], + "ExpectedBlues": [ + "26", + "28" + ], + "Parents": [ + "26", + "28" + ] + }, + { + "ID": "30", + "ExpectedScore": 10, + "ExpectedSelectedParent": "27", + "ExpectedReds": [], + "ExpectedBlues": [ + "27" + ], + "Parents": [ + "27" + ] + } + ] +} diff --git a/domain/consensus/testdata/dags/dag2.json b/domain/consensus/testdata/dags/dag2.json new file mode 100644 index 0000000..f1c5c62 --- /dev/null +++ b/domain/consensus/testdata/dags/dag2.json @@ -0,0 +1,126 @@ +{ + "K": 18, + "GenesisID": "786", + "ExpectedReds": [], + "Blocks": [ + { + "ID": "21d", + "ExpectedScore": 1, + "ExpectedSelectedParent": "786", + "ExpectedReds": [], + "ExpectedBlues": [ + "786" + ], + "Parents": [ + "786" + ] + }, + { + "ID": "6ef", + "ExpectedScore": 2, + "ExpectedSelectedParent": "21d", + "ExpectedReds": [], + "ExpectedBlues": [ + "21d" + ], + "Parents": [ + "21d" + ] + }, + { + "ID": "c98", + "ExpectedScore": 3, + "ExpectedSelectedParent": "6ef", + "ExpectedReds": [], + "ExpectedBlues": [ + "6ef" + ], + "Parents": [ + "6ef" + ] + }, + { + "ID": "d1c", + "ExpectedScore": 1, + "ExpectedSelectedParent": "786", + "ExpectedReds": [], + "ExpectedBlues": [ + "786" + ], + "Parents": [ + "786" + ] + }, + { + "ID": "ec9", + "ExpectedScore": 5, + "ExpectedSelectedParent": "c98", + "ExpectedReds": [], + "ExpectedBlues": [ + "c98", + "d1c" + ], + "Parents": [ + "d1c", + "c98" + ] + }, + { + "ID": "f154", + "ExpectedScore": 1, + "ExpectedSelectedParent": "786", + "ExpectedReds": [], + "ExpectedBlues": [ + "786" + ], + "Parents": [ + "786" + ] + }, + { + "ID": "6c7", + "ExpectedScore": 4, + "ExpectedSelectedParent": "f154", + "ExpectedReds": [], + "ExpectedBlues": [ + "f154", + "21d", + "d1c" + ], + "Parents": [ + "d1c", + "21d", + "f154" + ] + }, + { + "ID": "015", + "ExpectedScore": 8, + "ExpectedSelectedParent": "ec9", + "ExpectedReds": [], + "ExpectedBlues": [ + "ec9", + "f154", + "6c7" + ], + "Parents": [ + "ec9", + "6c7" + ] + }, + { + "ID": "crash", + "ExpectedScore": 6, + "ExpectedSelectedParent": "6c7", + "ExpectedReds": [], + "ExpectedBlues": [ + "6c7", + "6ef" + ], + "Parents": [ + "6ef", + "6c7" + ] + } + ] +} diff --git a/domain/consensus/testdata/dags/dag3.json b/domain/consensus/testdata/dags/dag3.json new file mode 100644 index 0000000..1f0b96c --- /dev/null +++ b/domain/consensus/testdata/dags/dag3.json @@ -0,0 +1,132 @@ +{ + "K": 3, + "GenesisID": "0", + "Blocks": [ + { + "ID": "1", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "2", + "ExpectedScore": 2, + "ExpectedSelectedParent": "1", + "ExpectedReds": [], + "ExpectedBlues": [ + "1" + ], + "Parents": [ + "1" + ] + }, + { + "ID": "3", + "ExpectedScore": 2, + "ExpectedSelectedParent": "1", + "ExpectedReds": [], + "ExpectedBlues": [ + "1" + ], + "Parents": [ + "1" + ] + }, + { + "ID": "4", + "ExpectedScore": 2, + "ExpectedSelectedParent": "1", + "ExpectedReds": [], + "ExpectedBlues": [ + "1" + ], + "Parents": [ + "1" + ] + }, + { + "ID": "5", + "ExpectedScore": 5, + "ExpectedSelectedParent": "4", + "ExpectedReds": [], + "ExpectedBlues": [ + "4", + "2", + "3" + ], + "Parents": [ + "4", + "2", + "3" + ] + }, + { + "ID": "6", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "7", + "ExpectedScore": 2, + "ExpectedSelectedParent": "6", + "ExpectedReds": [], + "ExpectedBlues": [ + "6" + ], + "Parents": [ + "6" + ] + }, + { + "ID": "8", + "ExpectedScore": 3, + "ExpectedSelectedParent": "7", + "ExpectedReds": [], + "ExpectedBlues": [ + "7" + ], + "Parents": [ + "7" + ] + }, + { + "ID": "9", + "ExpectedScore": 4, + "ExpectedSelectedParent": "8", + "ExpectedReds": [], + "ExpectedBlues": [ + "8" + ], + "Parents": [ + "8" + ] + }, + { + "ID": "10", + "ExpectedScore": 6, + "ExpectedSelectedParent": "5", + "ExpectedReds": ["6", "7", "8", "9"], + "ExpectedBlues": [ + "5" + ], + "Parents": [ + "5", + "9" + ] + } + + ] +} diff --git a/domain/consensus/testdata/dags/dag4.json b/domain/consensus/testdata/dags/dag4.json new file mode 100644 index 0000000..4ddf93a --- /dev/null +++ b/domain/consensus/testdata/dags/dag4.json @@ -0,0 +1,121 @@ +{ + "K": 2, + "GenesisID": "0", + "Blocks": [ + { + "ID": "1", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "2", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "3", + "ExpectedScore": 2, + "ExpectedSelectedParent": "2", + "ExpectedReds": [], + "ExpectedBlues": [ + "2" + ], + "Parents": [ + "2" + ] + }, + { + "ID": "4", + "ExpectedScore": 2, + "ExpectedSelectedParent": "2", + "ExpectedReds": [], + "ExpectedBlues": [ + "2" + ], + "Parents": [ + "2" + ] + }, + { + "ID": "5", + "ExpectedScore": 2, + "ExpectedSelectedParent": "1", + "ExpectedReds": [], + "ExpectedBlues": [ + "1" + ], + "Parents": [ + "1" + ] + }, + { + "ID": "6", + "ExpectedScore": 5, + "ExpectedSelectedParent": "5", + "ExpectedReds": [], + "ExpectedBlues": [ + "5", + "2", + "4" + ], + "Parents": [ + "4", + "5" + ] + }, + { + "ID": "7", + "ExpectedScore": 5, + "ExpectedSelectedParent": "5", + "ExpectedReds": [], + "ExpectedBlues": [ + "5", + "2", + "3" + ], + "Parents": [ + "3", + "5" + ] + }, + { + "ID": "8", + "ExpectedScore": 3, + "ExpectedSelectedParent": "3", + "ExpectedReds": [], + "ExpectedBlues": [ + "3" + ], + "Parents": [ + "3" + ] + }, + { + "ID": "9", + "ExpectedScore": 6, + "ExpectedSelectedParent": "7", + "ExpectedReds": ["4", "8", "6"], + "ExpectedBlues": [ + "7" + ], + "Parents": [ + "6","7","8" + ] + } + + ] +} diff --git a/domain/consensus/testdata/dags/dag5.json b/domain/consensus/testdata/dags/dag5.json new file mode 100644 index 0000000..a4250ee --- /dev/null +++ b/domain/consensus/testdata/dags/dag5.json @@ -0,0 +1,93 @@ +{ + "K": 3, + "GenesisID": "0", + "Blocks": [ + { + "ID": "1", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "2", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "3", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "4", + "ExpectedScore": 1, + "ExpectedSelectedParent": "0", + "ExpectedReds": [], + "ExpectedBlues": [ + "0" + ], + "Parents": [ + "0" + ] + }, + { + "ID": "5", + "ExpectedScore": 4, + "ExpectedSelectedParent": "3", + "ExpectedReds": [], + "ExpectedBlues": [ + "3", "1", "2" + ], + "Parents": [ + "1", + "2", + "3" + ] + }, + { + "ID": "6", + "ExpectedScore": 5, + "ExpectedSelectedParent": "4", + "ExpectedReds": [], + "ExpectedBlues": [ + "4", "1", "2", "3" + ], + "Parents": [ + "1", "2", "3", "4" + ] + }, + { + "ID": "7", + "ExpectedScore": 6, + "ExpectedSelectedParent": "6", + "ExpectedReds": ["5"], + "ExpectedBlues": [ + "6" + ], + "Parents": [ + "5", + "6" + ] + } + ] +} diff --git a/domain/consensus/testdata/reachability/attack-dag-blocks--2^12-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/attack-dag-blocks--2^12-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..b26317f Binary files /dev/null and b/domain/consensus/testdata/reachability/attack-dag-blocks--2^12-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/testdata/reachability/attack-dag-blocks--2^13-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/attack-dag-blocks--2^13-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..3bc5c3c Binary files /dev/null and b/domain/consensus/testdata/reachability/attack-dag-blocks--2^13-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/testdata/reachability/attack-dag-blocks--2^14-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/attack-dag-blocks--2^14-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..1ed8b67 Binary files /dev/null and b/domain/consensus/testdata/reachability/attack-dag-blocks--2^14-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/testdata/reachability/noattack-dag-blocks--2^12-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^12-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..47362ca Binary files /dev/null and b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^12-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/testdata/reachability/noattack-dag-blocks--2^13-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^13-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..2f370b3 Binary files /dev/null and b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^13-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/testdata/reachability/noattack-dag-blocks--2^14-delay-factor--1-k--18.json.gz b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^14-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..43d081f Binary files /dev/null and b/domain/consensus/testdata/reachability/noattack-dag-blocks--2^14-delay-factor--1-k--18.json.gz differ diff --git a/domain/consensus/timelock_CLTV_test.go b/domain/consensus/timelock_CLTV_test.go new file mode 100644 index 0000000..303f55b --- /dev/null +++ b/domain/consensus/timelock_CLTV_test.go @@ -0,0 +1,537 @@ +package consensus_test + +import ( + "errors" + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +// TestCheckLockTimeVerifyConditionedByDAAScore verifies that an output locked by the CLTV script is spendable only after +// the DAA score reached the set target. +func TestCheckLockTimeVerifyConditionedByDAAScore(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckLockTimeVerifyConditionedByDAAScore") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + blockAHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{testConsensus.DAGParams().GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockA: %v", err) + } + blockBHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockB: %v", err) + } + blockCHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockC: %v", err) + } + blockDHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockCHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockD: %v", err) + } + blockD, _, err := testConsensus.GetBlock(blockDHash) + if err != nil { + t.Fatalf("Failed getting blockD: %v", err) + } + fees := uint64(1) + //Create a CLTV script: + targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30) + redeemScriptCLTV, err := createScriptCLTV(targetDAAScore) + if err != nil { + t.Fatalf("Failed to create a script using createScriptCLTV: %v", err) + } + p2shScriptCLTV, err := txscript.PayToScriptHashScript(redeemScriptCLTV) + if err != nil { + t.Fatalf("Failed to create a pay-to-script-hash script : %v", err) + } + scriptPublicKeyCLTV := externalapi.ScriptPublicKey{ + Version: constants.MaxScriptPublicKeyVersion, + Script: p2shScriptCLTV, + } + transactionWithLockedOutput, err := createTransactionWithLockedOutput(blockD.Transactions[transactionhelper.CoinbaseTransactionIndex], + fees, &scriptPublicKeyCLTV) + if err != nil { + t.Fatalf("Error in createTransactionWithLockedOutput: %v", err) + } + // BlockE contains the locked output (locked by CLTV). + // This block should be valid since CLTV script locked only the output. + blockEHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockDHash}, nil, + []*externalapi.DomainTransaction{transactionWithLockedOutput}) + if err != nil { + t.Fatalf("Error creating blockE: %v", err) + } + // Create a transaction that tries to spend the locked output. + transactionThatSpentTheLockedOutput, err := createTransactionThatSpentTheLockedOutput(transactionWithLockedOutput, + fees, redeemScriptCLTV, targetDAAScore) + if err != nil { + t.Fatalf("Error creating transactionThatSpentTheLockedOutput: %v", err) + } + // Add a block that contains a transaction that spends the locked output before the time, and therefore should be failed. + // (The DAA score should be x, before the output will be spendable, where x = 'targetDAAScore' ). + _, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{blockEHash}, nil, + []*externalapi.DomainTransaction{transactionThatSpentTheLockedOutput}) + if err == nil || !errors.Is(err, ruleerrors.ErrUnfinalizedTx) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrUnfinalizedTx, err) + } + + // Add blocks to release the locked output, the DAA score should be 'numOfBlocksToWait'. + tipHash := blockEHash + stagingArea := model.NewStagingArea() + currentDAAScore, err := testConsensus.DAABlocksStore().DAAScore(testConsensus.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("Failed getting DAA score: %v", err) + } + numOfBlocksToAdd := targetDAAScore - currentDAAScore + for numOfBlocksToAdd > 0 { + tipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating tip: %v", err) + } + numOfBlocksToAdd-- + } + // Tries to spend the output that should be no longer locked + validBlock, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, + []*externalapi.DomainTransaction{transactionThatSpentTheLockedOutput}) + if err != nil { + t.Fatalf("The block should be valid since the output is not locked anymore. but got an error: %v", err) + } + validBlockStatus, err := testConsensus.BlockStatusStore().Get(testConsensus.DatabaseContext(), stagingArea, + validBlock) + if err != nil { + t.Fatalf("Failed getting the status for validBlock: %v", err) + } + if !validBlockStatus.Equal(externalapi.StatusUTXOValid) { + t.Fatalf("The status of validBlock should be: %v, but got: %v", externalapi.StatusUTXOValid, + validBlockStatus) + } + }) +} + +// TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime verifies that in case of wrong lock time(lower than expected) +// the block status will be StatusDisqualifiedFromChain. +func TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, + "TestCheckLockTimeVerifyConditionedByDAAScoreWithWrongLockTime") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + blockAHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{testConsensus.DAGParams().GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockA: %v", err) + } + blockBHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockB: %v", err) + } + blockCHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockC: %v", err) + } + blockDHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockCHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockD: %v", err) + } + blockD, _, err := testConsensus.GetBlock(blockDHash) + if err != nil { + t.Fatalf("Failed getting blockD: %v", err) + } + fees := uint64(1) + //Create a CLTV script: + targetDAAScore := consensusConfig.GenesisBlock.Header.DAAScore() + uint64(30) + redeemScriptCLTV, err := createScriptCLTV(targetDAAScore) + if err != nil { + t.Fatalf("Failed to create a script using createScriptCLTV: %v", err) + } + p2shScriptCLTV, err := txscript.PayToScriptHashScript(redeemScriptCLTV) + if err != nil { + t.Fatalf("Failed to create a pay-to-script-hash script : %v", err) + } + scriptPublicKeyCLTV := externalapi.ScriptPublicKey{ + Version: constants.MaxScriptPublicKeyVersion, + Script: p2shScriptCLTV, + } + transactionWithLockedOutput, err := createTransactionWithLockedOutput(blockD.Transactions[transactionhelper.CoinbaseTransactionIndex], + fees, &scriptPublicKeyCLTV) + if err != nil { + t.Fatalf("Error in createTransactionWithLockedOutput: %v", err) + } + // BlockE contains the locked output (locked by CLTV). + // This block should be valid since CLTV script locked only the output. + blockEHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockDHash}, nil, + []*externalapi.DomainTransaction{transactionWithLockedOutput}) + if err != nil { + t.Fatalf("Error creating blockE: %v", err) + } + // Create a transaction that tries to spend the locked output. + // Decreased the lock time to get wrong lock time. + transactionWithWrongLockTime, err := createTransactionThatSpentTheLockedOutput(transactionWithLockedOutput, + fees, redeemScriptCLTV, targetDAAScore-1) + if err != nil { + t.Fatalf("Error creating transactionWithWrongLockTime: %v", err) + } + // Add blocks to release the locked output, the DAA score should be 'numOfBlocksToWait'. + tipHash := blockEHash + stagingArea := model.NewStagingArea() + currentDAAScore, err := testConsensus.DAABlocksStore().DAAScore(testConsensus.DatabaseContext(), stagingArea, tipHash) + if err != nil { + t.Fatalf("Failed getting DAA score for tip: %v", err) + } + numOfBlocksToAdd := targetDAAScore - currentDAAScore + for numOfBlocksToAdd > 0 { + tipHash, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating tip: %v", err) + } + numOfBlocksToAdd-- + } + // Tries to spend the output, the output is not locked anymore but since the lock time is wrong the block status should + // be 'disqualifiedFromChain'. + blockWithWrongLockTime, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, + []*externalapi.DomainTransaction{transactionWithWrongLockTime}) + if err != nil { + t.Fatalf("The block should be valid. but got an error: %v", err) + } + blockWithWrongLockTimeStatus, err := testConsensus.BlockStatusStore().Get(testConsensus.DatabaseContext(), stagingArea, + blockWithWrongLockTime) + if err != nil { + t.Fatalf("Failed getting the status for blockWithWrongLockTime: %v", err) + } + if !blockWithWrongLockTimeStatus.Equal(externalapi.StatusDisqualifiedFromChain) { + t.Fatalf("The status of blockWithWrongLockTime should be: %v, but got: %v", externalapi.StatusDisqualifiedFromChain, + blockWithWrongLockTimeStatus) + } + }) +} + +// TestCheckLockTimeVerifyConditionedByAbsoluteTime verifies that an output locked by the CLTV script has spendable only after +// the time has reached to the set target (compared to the past median time). +func TestCheckLockTimeVerifyConditionedByAbsoluteTime(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckLockTimeVerifyConditionedByAbsoluteTime") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + blockAHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{testConsensus.DAGParams().GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockA: %v", err) + } + blockBHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockB: %v", err) + } + blockCHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockC: %v", err) + } + fees := uint64(1) + blockDHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockCHash}, nil, + []*externalapi.DomainTransaction{}) + if err != nil { + t.Fatalf("Error creating blockD: %v", err) + } + blockD, _, err := testConsensus.GetBlock(blockDHash) + if err != nil { + t.Fatalf("Failed getting blockD: %v", err) + } + //Create a CLTV script: + timeToWait := uint64(12 * 1000) + lockTimeTarget := uint64(blockD.Header.TimeInMilliseconds()) + timeToWait + redeemScriptCLTV, err := createScriptCLTV(lockTimeTarget) + if err != nil { + t.Fatalf("Failed to create a script using createScriptCLTV: %v", err) + } + p2shScriptCLTV, err := txscript.PayToScriptHashScript(redeemScriptCLTV) + if err != nil { + t.Fatalf("Failed to create a pay-to-script-hash script : %v", err) + } + scriptPublicKeyCLTV := externalapi.ScriptPublicKey{ + Version: constants.MaxScriptPublicKeyVersion, + Script: p2shScriptCLTV, + } + transactionWithLockedOutput, err := createTransactionWithLockedOutput(blockD.Transactions[transactionhelper.CoinbaseTransactionIndex], + fees, &scriptPublicKeyCLTV) + if err != nil { + t.Fatalf("Error in createTransactionWithLockedOutput: %v", err) + } + // BlockE contains the locked output (locked by CLTV). + // This block should be valid since CLTV script locked only the output. + blockEHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockDHash}, nil, + []*externalapi.DomainTransaction{transactionWithLockedOutput}) + if err != nil { + t.Fatalf("Error creating blockE: %v", err) + } + blockE, _, err := testConsensus.GetBlock(blockEHash) + if err != nil { + t.Fatalf("Failed getting blockE: %v", err) + } + // Create a transaction that tries to spend the locked output. + transactionThatSpentTheLockedOutput, err := createTransactionThatSpentTheLockedOutput(transactionWithLockedOutput, + fees, redeemScriptCLTV, lockTimeTarget) + if err != nil { + t.Fatalf("Error creating transactionThatSpentTheLockedOutput: %v", err) + } + // Add a block that contains a transaction that tries to spend the locked output before the time, and therefore should be failed. + _, _, err = testConsensus.AddBlock([]*externalapi.DomainHash{blockEHash}, nil, + []*externalapi.DomainTransaction{transactionThatSpentTheLockedOutput}) + if err == nil || !errors.Is(err, ruleerrors.ErrUnfinalizedTx) { + t.Fatalf("Expected block to be invalid with err: %v, instead found: %v", ruleerrors.ErrUnfinalizedTx, err) + } + + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + var tipHash *externalapi.DomainHash + timeStampBlockE := blockE.Header.TimeInMilliseconds() + stagingArea := model.NewStagingArea() + // Make sure the time limitation has passed. + for i := int64(0); ; i++ { + tipBlock, err := testConsensus.BuildBlock(&emptyCoinbase, nil) + if err != nil { + t.Fatalf("Error creating tip using BuildBlock: %+v", err) + } + blockHeader := tipBlock.Header.ToMutable() + blockHeader.SetTimeInMilliseconds(timeStampBlockE + i*1000) + tipBlock.Header = blockHeader.ToImmutable() + err = testConsensus.ValidateAndInsertBlock(tipBlock, true) + if err != nil { + t.Fatalf("Error validating and inserting tip block: %v", err) + } + tipHash = consensushashing.BlockHash(tipBlock) + pastMedianTime, err := testConsensus.PastMedianTimeManager().PastMedianTime(stagingArea, tipHash) + if err != nil { + t.Fatalf("Failed getting pastMedianTime: %v", err) + } + if uint64(pastMedianTime) > lockTimeTarget { + break + } + } + // Tries to spend the output that should be no longer locked + validBlock, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, + []*externalapi.DomainTransaction{transactionThatSpentTheLockedOutput}) + if err != nil { + t.Fatalf("The block should be valid since the output is not locked anymore. but got an error: %v", err) + } + validBlockStatus, err := testConsensus.BlockStatusStore().Get(testConsensus.DatabaseContext(), stagingArea, + validBlock) + if err != nil { + t.Fatalf("Failed getting the status for validBlock: %v", err) + } + if !validBlockStatus.Equal(externalapi.StatusUTXOValid) { + t.Fatalf("The status of validBlock should be: %v, but got: %v", externalapi.StatusUTXOValid, + validBlockStatus) + } + }) +} + +// TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime verifies that in case of wrong lock time(lower than expected) +// the block status will be StatusDisqualifiedFromChain +func TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + testConsensus, teardown, err := factory.NewTestConsensus(consensusConfig, + "TestCheckLockTimeVerifyConditionedByAbsoluteTimeWithWrongLockTime") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + blockAHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{testConsensus.DAGParams().GenesisHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockA: %v", err) + } + blockBHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockAHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockB: %v", err) + } + blockCHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockBHash}, nil, nil) + if err != nil { + t.Fatalf("Error creating blockC: %v", err) + } + fees := uint64(1) + blockDHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockCHash}, nil, + []*externalapi.DomainTransaction{}) + if err != nil { + t.Fatalf("Error creating blockD: %v", err) + } + blockD, _, err := testConsensus.GetBlock(blockDHash) + if err != nil { + t.Fatalf("Failed getting blockD: %v", err) + } + //Create a CLTV script: + timeToWait := uint64(12 * 1000) + lockTimeTarget := uint64(blockD.Header.TimeInMilliseconds()) + timeToWait + redeemScriptCLTV, err := createScriptCLTV(lockTimeTarget) + if err != nil { + t.Fatalf("Failed to create a script using createScriptCLTV: %v", err) + } + p2shScriptCLTV, err := txscript.PayToScriptHashScript(redeemScriptCLTV) + if err != nil { + t.Fatalf("Failed to create a pay-to-script-hash script : %v", err) + } + scriptPublicKeyCLTV := externalapi.ScriptPublicKey{ + Version: constants.MaxScriptPublicKeyVersion, + Script: p2shScriptCLTV, + } + transactionWithLockedOutput, err := createTransactionWithLockedOutput(blockD.Transactions[transactionhelper.CoinbaseTransactionIndex], + fees, &scriptPublicKeyCLTV) + if err != nil { + t.Fatalf("Error in createTransactionWithLockedOutput: %v", err) + } + // BlockE contains the locked output (locked by CLTV). + // This block should be valid since CLTV script locked only the output. + blockEHash, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{blockDHash}, nil, + []*externalapi.DomainTransaction{transactionWithLockedOutput}) + if err != nil { + t.Fatalf("Error creating blockE: %v", err) + } + blockE, _, err := testConsensus.GetBlock(blockEHash) + if err != nil { + t.Fatalf("Failed getting blockE: %v", err) + } + // Create a transaction that tries to spend the locked output. + transactionWithWrongLockTime, err := createTransactionThatSpentTheLockedOutput(transactionWithLockedOutput, + fees, redeemScriptCLTV, lockTimeTarget-1) + if err != nil { + t.Fatalf("Error creating transactionWithWrongLockTime: %v", err) + } + emptyCoinbase := externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + var tipHash *externalapi.DomainHash + timeStampBlockE := blockE.Header.TimeInMilliseconds() + stagingArea := model.NewStagingArea() + // Make sure the time limitation has passed. + for i := int64(0); ; i++ { + tipBlock, err := testConsensus.BuildBlock(&emptyCoinbase, nil) + if err != nil { + t.Fatalf("Error creating tip using BuildBlock: %v", err) + } + blockHeader := tipBlock.Header.ToMutable() + blockHeader.SetTimeInMilliseconds(timeStampBlockE + i*1000) + tipBlock.Header = blockHeader.ToImmutable() + err = testConsensus.ValidateAndInsertBlock(tipBlock, true) + if err != nil { + t.Fatalf("Error validating and inserting tip block: %v", err) + } + tipHash = consensushashing.BlockHash(tipBlock) + pastMedianTime, err := testConsensus.PastMedianTimeManager().PastMedianTime(stagingArea, tipHash) + if err != nil { + t.Fatalf("Failed getting pastMedianTime: %v", err) + } + if uint64(pastMedianTime) > lockTimeTarget { + break + } + } + // Tries to spend the output, the output is not locked anymore but since the lock time is wrong the block status should + // be 'disqualifiedFromChain'. + blockWithWrongLockTime, _, err := testConsensus.AddBlock([]*externalapi.DomainHash{tipHash}, nil, + []*externalapi.DomainTransaction{transactionWithWrongLockTime}) + if err != nil { + t.Fatalf("The block should be valid. but got an error: %v", err) + } + blockWithWrongLockTimeStatus, err := testConsensus.BlockStatusStore().Get(testConsensus.DatabaseContext(), stagingArea, + blockWithWrongLockTime) + if err != nil { + t.Fatalf("Failed getting the status for blockWithWrongLockTime: %v", err) + } + if !blockWithWrongLockTimeStatus.Equal(externalapi.StatusDisqualifiedFromChain) { + t.Fatalf("The status of blockWithWrongLockTime should be: %v, but got: %v", externalapi.StatusDisqualifiedFromChain, + blockWithWrongLockTimeStatus) + } + }) +} + +func createScriptCLTV(absoluteTimeOrDAAScoreTarget uint64) ([]byte, error) { + scriptBuilder := txscript.NewScriptBuilder() + scriptBuilder.AddLockTimeNumber(absoluteTimeOrDAAScoreTarget) + scriptBuilder.AddOp(txscript.OpCheckLockTimeVerify) + scriptBuilder.AddOp(txscript.OpTrue) + return scriptBuilder.Script() +} + +func createTransactionWithLockedOutput(txToSpend *externalapi.DomainTransaction, fee uint64, + scriptPublicKeyCLTV *externalapi.ScriptPublicKey) (*externalapi.DomainTransaction, error) { + + _, redeemScript := testutils.OpTrueScript() + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, nil) + if err != nil { + return nil, err + } + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(txToSpend), + Index: 0, + }, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum, + } + output := &externalapi.DomainTransactionOutput{ + ScriptPublicKey: scriptPublicKeyCLTV, + Value: txToSpend.Outputs[0].Value - fee, + } + return &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: []*externalapi.DomainTransactionOutput{output}, + Payload: []byte{}, + }, nil +} + +func createTransactionThatSpentTheLockedOutput(txToSpend *externalapi.DomainTransaction, fee uint64, + redeemScript []byte, lockTime uint64) (*externalapi.DomainTransaction, error) { + + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, []byte{}) + if err != nil { + return nil, err + } + scriptPublicKeyOutput, _ := testutils.OpTrueScript() + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(txToSpend), + Index: 0, + }, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum - 1, + } + output := &externalapi.DomainTransactionOutput{ + ScriptPublicKey: scriptPublicKeyOutput, + Value: txToSpend.Outputs[0].Value - fee, + } + return &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: []*externalapi.DomainTransactionOutput{output}, + Payload: []byte{}, + LockTime: lockTime, // less than 500 billion interpreted as a DAA score, and above as an UNIX timestamp. + }, nil +} diff --git a/domain/consensus/utils/blockheader/blockheader.go b/domain/consensus/utils/blockheader/blockheader.go new file mode 100644 index 0000000..f65006d --- /dev/null +++ b/domain/consensus/utils/blockheader/blockheader.go @@ -0,0 +1,226 @@ +package blockheader + +import ( + "math/big" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" +) + +type blockHeader struct { + version uint16 + parents []externalapi.BlockLevelParents + hashMerkleRoot *externalapi.DomainHash + acceptedIDMerkleRoot *externalapi.DomainHash + utxoCommitment *externalapi.DomainHash + timeInMilliseconds int64 + bits uint32 + nonce uint64 + daaScore uint64 + blueScore uint64 + blueWork *big.Int + pruningPoint *externalapi.DomainHash + + isBlockLevelCached bool + blockLevel int +} + +func (bh *blockHeader) BlueScore() uint64 { + return bh.blueScore +} + +func (bh *blockHeader) PruningPoint() *externalapi.DomainHash { + return bh.pruningPoint +} + +func (bh *blockHeader) DAAScore() uint64 { + return bh.daaScore +} + +func (bh *blockHeader) BlueWork() *big.Int { + return bh.blueWork +} + +func (bh *blockHeader) ToImmutable() externalapi.BlockHeader { + return bh.clone() +} + +func (bh *blockHeader) SetNonce(nonce uint64) { + bh.isBlockLevelCached = false + bh.nonce = nonce +} + +func (bh *blockHeader) SetTimeInMilliseconds(timeInMilliseconds int64) { + bh.isBlockLevelCached = false + bh.timeInMilliseconds = timeInMilliseconds +} + +func (bh *blockHeader) SetHashMerkleRoot(hashMerkleRoot *externalapi.DomainHash) { + bh.isBlockLevelCached = false + bh.hashMerkleRoot = hashMerkleRoot +} + +func (bh *blockHeader) Version() uint16 { + return bh.version +} + +func (bh *blockHeader) Parents() []externalapi.BlockLevelParents { + return bh.parents +} + +func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { + if len(bh.parents) == 0 { + return externalapi.BlockLevelParents{} + } + + return bh.parents[0] +} + +func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash { + return bh.hashMerkleRoot +} + +func (bh *blockHeader) AcceptedIDMerkleRoot() *externalapi.DomainHash { + return bh.acceptedIDMerkleRoot +} + +func (bh *blockHeader) UTXOCommitment() *externalapi.DomainHash { + return bh.utxoCommitment +} + +func (bh *blockHeader) TimeInMilliseconds() int64 { + return bh.timeInMilliseconds +} + +func (bh *blockHeader) Bits() uint32 { + return bh.bits +} + +func (bh *blockHeader) Nonce() uint64 { + return bh.nonce +} + +func (bh *blockHeader) Equal(other externalapi.BaseBlockHeader) bool { + if bh == nil || other == nil { + return bh == other + } + + // If only the underlying value of other is nil it'll + // make `other == nil` return false, so we check it + // explicitly. + downcastedOther := other.(*blockHeader) + if bh == nil || downcastedOther == nil { + return bh == downcastedOther + } + + if bh.version != other.Version() { + return false + } + + if !externalapi.ParentsEqual(bh.parents, other.Parents()) { + return false + } + + if !bh.hashMerkleRoot.Equal(other.HashMerkleRoot()) { + return false + } + + if !bh.acceptedIDMerkleRoot.Equal(other.AcceptedIDMerkleRoot()) { + return false + } + + if !bh.utxoCommitment.Equal(other.UTXOCommitment()) { + return false + } + + if bh.timeInMilliseconds != other.TimeInMilliseconds() { + return false + } + + if bh.bits != other.Bits() { + return false + } + + if bh.nonce != other.Nonce() { + return false + } + + if bh.daaScore != other.DAAScore() { + return false + } + + if bh.blueScore != other.BlueScore() { + return false + } + + if bh.blueWork.Cmp(other.BlueWork()) != 0 { + return false + } + + if !bh.pruningPoint.Equal(other.PruningPoint()) { + return false + } + + return true +} + +func (bh *blockHeader) clone() *blockHeader { + return &blockHeader{ + version: bh.version, + parents: externalapi.CloneParents(bh.parents), + hashMerkleRoot: bh.hashMerkleRoot, + acceptedIDMerkleRoot: bh.acceptedIDMerkleRoot, + utxoCommitment: bh.utxoCommitment, + timeInMilliseconds: bh.timeInMilliseconds, + bits: bh.bits, + nonce: bh.nonce, + daaScore: bh.daaScore, + blueScore: bh.blueScore, + blueWork: bh.blueWork, + pruningPoint: bh.pruningPoint, + } +} + +func (bh *blockHeader) ToMutable() externalapi.MutableBlockHeader { + return bh.clone() +} + +func (bh *blockHeader) BlockLevel(maxBlockLevel int) int { + if !bh.isBlockLevelCached { + bh.blockLevel = pow.BlockLevel(bh, maxBlockLevel) + bh.isBlockLevelCached = true + } + + return bh.blockLevel +} + +// NewImmutableBlockHeader returns a new immutable header +func NewImmutableBlockHeader( + version uint16, + parents []externalapi.BlockLevelParents, + hashMerkleRoot *externalapi.DomainHash, + acceptedIDMerkleRoot *externalapi.DomainHash, + utxoCommitment *externalapi.DomainHash, + timeInMilliseconds int64, + bits uint32, + nonce uint64, + daaScore uint64, + blueScore uint64, + blueWork *big.Int, + pruningPoint *externalapi.DomainHash, +) externalapi.BlockHeader { + return &blockHeader{ + version: version, + parents: parents, + hashMerkleRoot: hashMerkleRoot, + acceptedIDMerkleRoot: acceptedIDMerkleRoot, + utxoCommitment: utxoCommitment, + timeInMilliseconds: timeInMilliseconds, + bits: bits, + nonce: nonce, + daaScore: daaScore, + blueScore: blueScore, + blueWork: blueWork, + pruningPoint: pruningPoint, + } +} diff --git a/domain/consensus/utils/blockheader/blockheader_test.go b/domain/consensus/utils/blockheader/blockheader_test.go new file mode 100644 index 0000000..6cdfde2 --- /dev/null +++ b/domain/consensus/utils/blockheader/blockheader_test.go @@ -0,0 +1,354 @@ +package blockheader + +import ( + "math/big" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestDomainBlockHeader_Equal(t *testing.T) { + type headerToCompare struct { + header *blockHeader + expectedResult bool + } + tests := []struct { + baseHeader *blockHeader + headersToCompareTo []headerToCompare + }{ + { + baseHeader: nil, + headersToCompareTo: []headerToCompare{ + { + header: nil, + expectedResult: true, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + 4, + 5, + 6, + 7, + 8, + big.NewInt(9), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{10}), + false, + 0, + }, + expectedResult: false, + }, + }, + }, + { + baseHeader: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + headersToCompareTo: []headerToCompare{ + { + header: nil, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: true, + }, + { + header: &blockHeader{ + 100, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 100, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 100, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 100, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 100, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 100, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(100), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{11}), + false, + 0, + }, + expectedResult: false, + }, + { + header: &blockHeader{ + 0, + []externalapi.BlockLevelParents{[]*externalapi.DomainHash{externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1})}}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{4}), + 5, + 6, + 7, + 8, + 9, + big.NewInt(10), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{100}), + false, + 0, + }, + expectedResult: false, + }, + }, + }, + } + + for i, test := range tests { + for j, subTest := range test.headersToCompareTo { + result1 := test.baseHeader.Equal(subTest.header) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + + result2 := subTest.header.Equal(test.baseHeader) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} diff --git a/domain/consensus/utils/consensushashing/block.go b/domain/consensus/utils/consensushashing/block.go new file mode 100644 index 0000000..ffb769b --- /dev/null +++ b/domain/consensus/utils/consensushashing/block.go @@ -0,0 +1,55 @@ +package consensushashing + +import ( + "io" + + "github.com/spectre-project/spectred/domain/consensus/utils/serialization" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" +) + +// BlockHash returns the given block's hash +func BlockHash(block *externalapi.DomainBlock) *externalapi.DomainHash { + return HeaderHash(block.Header) +} + +// HeaderHash returns the given header's hash +func HeaderHash(header externalapi.BaseBlockHeader) *externalapi.DomainHash { + // Encode the header and hash everything prior to the number of + // transactions. + writer := hashes.NewBlockHashWriter() + err := serializeHeader(writer, header) + if err != nil { + // It seems like this could only happen if the writer returned an error. + // and this writer should never return an error (no allocations or possible failures) + // the only non-writer error path here is unknown types in `WriteElement` + panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error")) + } + + return writer.Finalize() +} + +func serializeHeader(w io.Writer, header externalapi.BaseBlockHeader) error { + timestamp := header.TimeInMilliseconds() + blueWork := header.BlueWork().Bytes() + + numParents := len(header.Parents()) + if err := serialization.WriteElements(w, header.Version(), uint64(numParents)); err != nil { + return err + } + for _, blockLevelParents := range header.Parents() { + numBlockLevelParents := len(blockLevelParents) + if err := serialization.WriteElements(w, uint64(numBlockLevelParents)); err != nil { + return err + } + for _, hash := range blockLevelParents { + if err := serialization.WriteElement(w, hash); err != nil { + return err + } + } + } + return serialization.WriteElements(w, header.HashMerkleRoot(), header.AcceptedIDMerkleRoot(), header.UTXOCommitment(), timestamp, + header.Bits(), header.Nonce(), header.DAAScore(), header.BlueScore(), blueWork, header.PruningPoint()) +} diff --git a/domain/consensus/utils/consensushashing/calculate_signature_hash.go b/domain/consensus/utils/consensushashing/calculate_signature_hash.go new file mode 100644 index 0000000..88f4615 --- /dev/null +++ b/domain/consensus/utils/consensushashing/calculate_signature_hash.go @@ -0,0 +1,244 @@ +package consensushashing + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/domain/consensus/utils/serialization" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +// SigHashType represents hash type bits at the end of a signature. +type SigHashType uint8 + +// Hash type bits from the end of a signature. +const ( + SigHashAll SigHashType = 0b00000001 + SigHashNone SigHashType = 0b00000010 + SigHashSingle SigHashType = 0b00000100 + SigHashAnyOneCanPay SigHashType = 0b10000000 + + // SigHashMask defines the number of bits of the hash type which is used + // to identify which outputs are signed. + SigHashMask = 0b00000111 +) + +// IsStandardSigHashType returns true if sht represents a standard SigHashType +func (sht SigHashType) IsStandardSigHashType() bool { + switch sht { + case SigHashAll, SigHashNone, SigHashSingle, + SigHashAll | SigHashAnyOneCanPay, SigHashNone | SigHashAnyOneCanPay, SigHashSingle | SigHashAnyOneCanPay: + return true + default: + return false + } +} + +func (sht SigHashType) isSigHashAll() bool { + return sht&SigHashMask == SigHashAll +} +func (sht SigHashType) isSigHashNone() bool { + return sht&SigHashMask == SigHashNone +} +func (sht SigHashType) isSigHashSingle() bool { + return sht&SigHashMask == SigHashSingle +} +func (sht SigHashType) isSigHashAnyOneCanPay() bool { + return sht&SigHashAnyOneCanPay == SigHashAnyOneCanPay +} + +// SighashReusedValues holds all fields used in the calculation of a transaction's sigHash, that are +// the same for all transaction inputs. +// Reuse of such values prevents the quadratic hashing problem. +type SighashReusedValues struct { + previousOutputsHash *externalapi.DomainHash + sequencesHash *externalapi.DomainHash + sigOpCountsHash *externalapi.DomainHash + outputsHash *externalapi.DomainHash + payloadHash *externalapi.DomainHash +} + +// CalculateSignatureHashSchnorr will, given a script and hash type calculate the signature hash +// to be used for signing and verification for Schnorr. +// This returns error only if one of the provided parameters are consensus-invalid. +func CalculateSignatureHashSchnorr(tx *externalapi.DomainTransaction, inputIndex int, hashType SigHashType, + reusedValues *SighashReusedValues) (*externalapi.DomainHash, error) { + + if !hashType.IsStandardSigHashType() { + return nil, errors.Errorf("SigHashType %d is not a valid SigHash type", hashType) + } + + txIn := tx.Inputs[inputIndex] + prevScriptPublicKey := txIn.UTXOEntry.ScriptPublicKey() + return calculateSignatureHash(tx, inputIndex, txIn, prevScriptPublicKey, hashType, reusedValues) +} + +// CalculateSignatureHashECDSA will, given a script and hash type calculate the signature hash +// to be used for signing and verification for ECDSA. +// This returns error only if one of the provided parameters are consensus-invalid. +func CalculateSignatureHashECDSA(tx *externalapi.DomainTransaction, inputIndex int, hashType SigHashType, + reusedValues *SighashReusedValues) (*externalapi.DomainHash, error) { + + hash, err := CalculateSignatureHashSchnorr(tx, inputIndex, hashType, reusedValues) + if err != nil { + return nil, err + } + + hashWriter := hashes.NewTransactionSigningHashECDSAWriter() + hashWriter.InfallibleWrite(hash.ByteSlice()) + + return hashWriter.Finalize(), nil +} + +func calculateSignatureHash(tx *externalapi.DomainTransaction, inputIndex int, txIn *externalapi.DomainTransactionInput, + prevScriptPublicKey *externalapi.ScriptPublicKey, hashType SigHashType, reusedValues *SighashReusedValues) ( + *externalapi.DomainHash, error) { + + hashWriter := hashes.NewTransactionSigningHashWriter() + infallibleWriteElement(hashWriter, tx.Version) + + previousOutputsHash := getPreviousOutputsHash(tx, hashType, reusedValues) + infallibleWriteElement(hashWriter, previousOutputsHash) + + sequencesHash := getSequencesHash(tx, hashType, reusedValues) + infallibleWriteElement(hashWriter, sequencesHash) + + sigOpCountsHash := getSigOpCountsHash(tx, hashType, reusedValues) + infallibleWriteElement(hashWriter, sigOpCountsHash) + + hashOutpoint(hashWriter, txIn.PreviousOutpoint) + + infallibleWriteElement(hashWriter, prevScriptPublicKey.Version) + infallibleWriteElement(hashWriter, prevScriptPublicKey.Script) + + infallibleWriteElement(hashWriter, txIn.UTXOEntry.Amount()) + + infallibleWriteElement(hashWriter, txIn.Sequence) + + infallibleWriteElement(hashWriter, txIn.SigOpCount) + + outputsHash := getOutputsHash(tx, inputIndex, hashType, reusedValues) + infallibleWriteElement(hashWriter, outputsHash) + + infallibleWriteElement(hashWriter, tx.LockTime) + + infallibleWriteElement(hashWriter, tx.SubnetworkID) + infallibleWriteElement(hashWriter, tx.Gas) + + payloadHash := getPayloadHash(tx, reusedValues) + infallibleWriteElement(hashWriter, payloadHash) + + infallibleWriteElement(hashWriter, uint8(hashType)) + + return hashWriter.Finalize(), nil +} + +func getPreviousOutputsHash(tx *externalapi.DomainTransaction, hashType SigHashType, reusedValues *SighashReusedValues) *externalapi.DomainHash { + if hashType.isSigHashAnyOneCanPay() { + return externalapi.NewZeroHash() + } + + if reusedValues.previousOutputsHash == nil { + hashWriter := hashes.NewTransactionSigningHashWriter() + for _, txIn := range tx.Inputs { + hashOutpoint(hashWriter, txIn.PreviousOutpoint) + } + reusedValues.previousOutputsHash = hashWriter.Finalize() + } + + return reusedValues.previousOutputsHash +} + +func getSequencesHash(tx *externalapi.DomainTransaction, hashType SigHashType, reusedValues *SighashReusedValues) *externalapi.DomainHash { + if hashType.isSigHashSingle() || hashType.isSigHashAnyOneCanPay() || hashType.isSigHashNone() { + return externalapi.NewZeroHash() + } + + if reusedValues.sequencesHash == nil { + hashWriter := hashes.NewTransactionSigningHashWriter() + for _, txIn := range tx.Inputs { + infallibleWriteElement(hashWriter, txIn.Sequence) + } + reusedValues.sequencesHash = hashWriter.Finalize() + } + + return reusedValues.sequencesHash +} + +func getSigOpCountsHash(tx *externalapi.DomainTransaction, hashType SigHashType, reusedValues *SighashReusedValues) *externalapi.DomainHash { + if hashType.isSigHashAnyOneCanPay() { + return externalapi.NewZeroHash() + } + + if reusedValues.sigOpCountsHash == nil { + hashWriter := hashes.NewTransactionSigningHashWriter() + for _, txIn := range tx.Inputs { + infallibleWriteElement(hashWriter, txIn.SigOpCount) + } + reusedValues.sigOpCountsHash = hashWriter.Finalize() + } + + return reusedValues.sigOpCountsHash +} + +func getOutputsHash(tx *externalapi.DomainTransaction, inputIndex int, hashType SigHashType, reusedValues *SighashReusedValues) *externalapi.DomainHash { + // SigHashNone: return zero-hash + if hashType.isSigHashNone() { + return externalapi.NewZeroHash() + } + + // SigHashSingle: If the relevant output exists - return it's hash, otherwise return zero-hash + if hashType.isSigHashSingle() { + if inputIndex >= len(tx.Outputs) { + return externalapi.NewZeroHash() + } + hashWriter := hashes.NewTransactionSigningHashWriter() + hashTxOut(hashWriter, tx.Outputs[inputIndex]) + return hashWriter.Finalize() + } + + // SigHashAll: Return hash of all outputs. Re-use hash if available. + if reusedValues.outputsHash == nil { + hashWriter := hashes.NewTransactionSigningHashWriter() + for _, txOut := range tx.Outputs { + hashTxOut(hashWriter, txOut) + } + reusedValues.outputsHash = hashWriter.Finalize() + } + + return reusedValues.outputsHash +} + +func getPayloadHash(tx *externalapi.DomainTransaction, reusedValues *SighashReusedValues) *externalapi.DomainHash { + if tx.SubnetworkID.Equal(&subnetworks.SubnetworkIDNative) { + return externalapi.NewZeroHash() + } + + if reusedValues.payloadHash == nil { + hashWriter := hashes.NewTransactionSigningHashWriter() + infallibleWriteElement(hashWriter, tx.Payload) + reusedValues.payloadHash = hashWriter.Finalize() + } + return reusedValues.payloadHash +} + +func hashTxOut(hashWriter hashes.HashWriter, txOut *externalapi.DomainTransactionOutput) { + infallibleWriteElement(hashWriter, txOut.Value) + infallibleWriteElement(hashWriter, txOut.ScriptPublicKey.Version) + infallibleWriteElement(hashWriter, txOut.ScriptPublicKey.Script) +} + +func hashOutpoint(hashWriter hashes.HashWriter, outpoint externalapi.DomainOutpoint) { + infallibleWriteElement(hashWriter, outpoint.TransactionID) + infallibleWriteElement(hashWriter, outpoint.Index) +} + +func infallibleWriteElement(hashWriter hashes.HashWriter, element interface{}) { + err := serialization.WriteElement(hashWriter, element) + if err != nil { + // It seems like this could only happen if the writer returned an error. + // and this writer should never return an error (no allocations or possible failures) + // the only non-writer error path here is unknown types in `WriteElement` + panic(errors.Wrap(err, "TransactionHashForSigning() failed. this should never fail for structurally-valid transactions")) + } +} diff --git a/domain/consensus/utils/consensushashing/calculate_signature_hash_test.go b/domain/consensus/utils/consensushashing/calculate_signature_hash_test.go new file mode 100644 index 0000000..e8c1019 --- /dev/null +++ b/domain/consensus/utils/consensushashing/calculate_signature_hash_test.go @@ -0,0 +1,518 @@ +package consensushashing_test + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + + "github.com/spectre-project/go-secp256k1" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// shortened versions of SigHash types to fit in single line of test case +const ( + all = consensushashing.SigHashAll + none = consensushashing.SigHashNone + single = consensushashing.SigHashSingle + allAnyoneCanPay = consensushashing.SigHashAll | consensushashing.SigHashAnyOneCanPay + noneAnyoneCanPay = consensushashing.SigHashNone | consensushashing.SigHashAnyOneCanPay + singleAnyoneCanPay = consensushashing.SigHashSingle | consensushashing.SigHashAnyOneCanPay +) + +func modifyOutput(outputIndex int) func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + return func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.Outputs[outputIndex].Value = 100 + return clone + } +} + +func modifyInput(inputIndex int) func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + return func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.Inputs[inputIndex].PreviousOutpoint.Index = 2 + return clone + } +} + +func modifyAmountSpent(inputIndex int) func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + return func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + utxoEntry := clone.Inputs[inputIndex].UTXOEntry + clone.Inputs[inputIndex].UTXOEntry = utxo.NewUTXOEntry(666, utxoEntry.ScriptPublicKey(), false, 100) + return clone + } +} + +func modifyScriptPublicKey(inputIndex int) func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + return func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + utxoEntry := clone.Inputs[inputIndex].UTXOEntry + scriptPublicKey := utxoEntry.ScriptPublicKey() + scriptPublicKey.Script = append(scriptPublicKey.Script, 1, 2, 3) + clone.Inputs[inputIndex].UTXOEntry = utxo.NewUTXOEntry(utxoEntry.Amount(), scriptPublicKey, false, 100) + return clone + } +} + +func modifySequence(inputIndex int) func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + return func(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.Inputs[inputIndex].Sequence = 12345 + return clone + } +} + +func modifyPayload(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.Payload = []byte{6, 6, 6, 4, 2, 0, 1, 3, 3, 7} + return clone +} + +func modifyGas(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.Gas = 1234 + return clone +} + +func modifySubnetworkID(tx *externalapi.DomainTransaction) *externalapi.DomainTransaction { + clone := tx.Clone() + clone.SubnetworkID = externalapi.DomainSubnetworkID{6, 6, 6, 4, 2, 0, 1, 3, 3, 7} + return clone +} + +func TestCalculateSignatureHashSchnorr(t *testing.T) { + nativeTx, subnetworkTx, err := generateTxs() + if err != nil { + t.Fatalf("Error from generateTxs: %+v", err) + } + + // Note: Expected values were generated by the same code that they test, + // As long as those were not verified using 3rd-party code they only check for regression, not correctness + tests := []struct { + name string + tx *externalapi.DomainTransaction + hashType consensushashing.SigHashType + inputIndex int + modificationFunction func(*externalapi.DomainTransaction) *externalapi.DomainTransaction + expectedSignatureHash string + }{ + // native transactions + + // sigHashAll + {name: "native-all-0", tx: nativeTx, hashType: all, inputIndex: 0, + expectedSignatureHash: "c95d809f186a8287663fe9b26f41665f0b05858a2e0879f8d5e2415f6a12e671"}, + {name: "native-all-0-modify-input-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifyInput(1), // should change the hash + expectedSignatureHash: "8c78a488c4f2bf4d509eb583ac333aeb35e162a4f23bbb3962522b415ca907e2"}, + {name: "native-all-0-modify-output-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifyOutput(1), // should change the hash + expectedSignatureHash: "a160201d1f07475f82c0359d44cb3ff8deaba16b2c2bf9209806ef6732f6fe7d"}, + {name: "native-all-0-modify-sequence-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifySequence(1), // should change the hash + expectedSignatureHash: "530148177aa2cfaae134efc4f700d6f413975657462588908680ae873bc4ceb4"}, + {name: "native-all-anyonecanpay-0", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "264aa3a337079fc08752e0ef3e09d728d4924e0fc7a01a35b74ecec63c15c13a"}, + {name: "native-all-anyonecanpay-0-modify-input-0", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyInput(0), // should change the hash + expectedSignatureHash: "699682065d07c8de000d69f0dcf70ff93ee3debb31cfd3d475e97d52f71c7047"}, + {name: "native-all-anyonecanpay-0-modify-input-1", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyInput(1), // shouldn't change the hash + expectedSignatureHash: "264aa3a337079fc08752e0ef3e09d728d4924e0fc7a01a35b74ecec63c15c13a"}, + {name: "native-all-anyonecanpay-0-modify-sequence", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "264aa3a337079fc08752e0ef3e09d728d4924e0fc7a01a35b74ecec63c15c13a"}, + + // sigHashNone + {name: "native-none-0", tx: nativeTx, hashType: none, inputIndex: 0, + expectedSignatureHash: "5fab356d3bfe6a0ee5fbc9b93594b8339c05325a7aec2d8f9753604e4c2009c1"}, + {name: "native-none-0-modify-output-1", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "5fab356d3bfe6a0ee5fbc9b93594b8339c05325a7aec2d8f9753604e4c2009c1"}, + {name: "native-none-0-modify-sequence-0", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifySequence(0), // should change the hash + expectedSignatureHash: "103fb84b3edf1a25799c3b6e82310aeef322b7613e84b808b417c659342cb118"}, + {name: "native-none-0-modify-sequence-1", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "5fab356d3bfe6a0ee5fbc9b93594b8339c05325a7aec2d8f9753604e4c2009c1"}, + {name: "native-none-anyonecanpay-0", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "afc8d205d8f732fbd7fa2cd00f6c9541bf3fc3b44629a1119d8e660632b3753b"}, + {name: "native-none-anyonecanpay-0-modify-amount-spent", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyAmountSpent(0), // should change the hash + expectedSignatureHash: "38d1201b6862e6ea50bd60505525f5adb3606f5043a9b49c149d6568e3e6a3cd"}, + {name: "native-none-anyonecanpay-0-modify-script-public-key", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyScriptPublicKey(0), // should change the hash + expectedSignatureHash: "53e9748199cd84ecc98e1cadbc631029ecb758150750d95eedd3c3d2ac0380ce"}, + + // sigHashSingle + {name: "native-single-0", tx: nativeTx, hashType: single, inputIndex: 0, + expectedSignatureHash: "566273f743cb23346c9abb09b98532e1f685de9b789924bdd785c9239297b0e7"}, + {name: "native-single-0-modify-output-0", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifyOutput(0), // should change the hash + expectedSignatureHash: "d1791747f3b1291f8c526822cea921e8d19a2df626bf9ff30e29b2bf7822e6b0"}, + {name: "native-single-0-modify-output-1", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "566273f743cb23346c9abb09b98532e1f685de9b789924bdd785c9239297b0e7"}, + {name: "native-single-0-modify-sequence-0", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifySequence(0), // should change the hash + expectedSignatureHash: "24b1a805f9224f9398e0bdde170c316f0b2844e5f699c4c814759235070e7aaf"}, + {name: "native-single-0-modify-sequence-1", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "566273f743cb23346c9abb09b98532e1f685de9b789924bdd785c9239297b0e7"}, + {name: "native-single-2-no-corresponding-output", tx: nativeTx, hashType: single, inputIndex: 2, + expectedSignatureHash: "b57b719a451f8edc8ea92543cba52f5b494b7632709758deb30d0dc9135d4cf3"}, + {name: "native-single-2-no-corresponding-output-modify-output-1", tx: nativeTx, hashType: single, inputIndex: 2, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "b57b719a451f8edc8ea92543cba52f5b494b7632709758deb30d0dc9135d4cf3"}, + {name: "native-single-anyonecanpay-0", tx: nativeTx, hashType: singleAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "8f6a97df5c2e05502e970e477b85d0a479447efe0ac0d1445f03f690f56fab15"}, + {name: "native-single-anyonecanpay-2-no-corresponding-output", tx: nativeTx, hashType: singleAnyoneCanPay, inputIndex: 2, + expectedSignatureHash: "9c47f2fa63b93735a314e49609074e9e0db8e20d6da27749705de1b1a996b0f4"}, + + // subnetwork transaction + {name: "subnetwork-all-0", tx: subnetworkTx, hashType: all, inputIndex: 0, + expectedSignatureHash: "c43dd1ecf5075076c9e7e596f8805e40bbcef5355141de8ddc749d3ea7d3f0b6"}, + {name: "subnetwork-all-modify-payload", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifyPayload, // should change the hash + expectedSignatureHash: "a74a3b0a37e28f091ddbc747d30d65b0b3ac3c423552c16d49fa366b5d1a66ee"}, + {name: "subnetwork-all-modify-gas", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifyGas, // should change the hash + expectedSignatureHash: "282ea720987bbd344af2cfd0cbc5d90aca66a7f702d84461e34b9c8de561e3e3"}, + {name: "subnetwork-all-subnetwork-id", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifySubnetworkID, // should change the hash + expectedSignatureHash: "baea9e020dac798714ff2a05995d8183656b78ce669cd15db935db9a39600bf4"}, + } + + for _, test := range tests { + tx := test.tx + if test.modificationFunction != nil { + tx = test.modificationFunction(tx) + } + + actualSignatureHash, err := consensushashing.CalculateSignatureHashSchnorr( + tx, test.inputIndex, test.hashType, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Errorf("%s: Error from CalculateSignatureHashSchnorr: %+v", test.name, err) + continue + } + + if actualSignatureHash.String() != test.expectedSignatureHash { + t.Errorf("%s: expected signature hash: '%s'; but got: '%s'", + test.name, test.expectedSignatureHash, actualSignatureHash) + } + } +} + +func TestCalculateSignatureHashECDSA(t *testing.T) { + nativeTx, subnetworkTx, err := generateTxs() + if err != nil { + t.Fatalf("Error from generateTxs: %+v", err) + } + + // Note: Expected values were generated by the same code that they test, + // As long as those were not verified using 3rd-party code they only check for regression, not correctness + tests := []struct { + name string + tx *externalapi.DomainTransaction + hashType consensushashing.SigHashType + inputIndex int + modificationFunction func(*externalapi.DomainTransaction) *externalapi.DomainTransaction + expectedSignatureHash string + }{ + // native transactions + + // sigHashAll + {name: "native-all-0", tx: nativeTx, hashType: all, inputIndex: 0, + expectedSignatureHash: "0f392dc3a17a16dc27d5aed6dc6a29078267836c124b1e1e3a97e27c6b42bb9b"}, + {name: "native-all-0-modify-input-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifyInput(1), // should change the hash + expectedSignatureHash: "998b1f5e80fe8872928c76b8562df04c12e9717f90eb1d4eae416ae8c2f84989"}, + {name: "native-all-0-modify-output-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifyOutput(1), // should change the hash + expectedSignatureHash: "2b31a6408b7cae812fc565f1c8f077562d690c3bfc0002a003bf2c5616e1cb99"}, + {name: "native-all-0-modify-sequence-1", tx: nativeTx, hashType: all, inputIndex: 0, + modificationFunction: modifySequence(1), // should change the hash + expectedSignatureHash: "a153b42ad946eb83113cfa08709ad6d0a12bbc87ff195dab8e28b7d80752fc7a"}, + {name: "native-all-anyonecanpay-0", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "29f85e18054bcdc15d9ee57c55b9a5f6fdaebb8b85e11d2bd9e910f29226f24a"}, + {name: "native-all-anyonecanpay-0-modify-input-0", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyInput(0), // should change the hash + expectedSignatureHash: "31583790d9476fd8e8f97beacfd9a3c81e0d70855e5f55ec55fea801a1a1e060"}, + {name: "native-all-anyonecanpay-0-modify-input-1", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyInput(1), // shouldn't change the hash + expectedSignatureHash: "29f85e18054bcdc15d9ee57c55b9a5f6fdaebb8b85e11d2bd9e910f29226f24a"}, + {name: "native-all-anyonecanpay-0-modify-sequence", tx: nativeTx, hashType: allAnyoneCanPay, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "29f85e18054bcdc15d9ee57c55b9a5f6fdaebb8b85e11d2bd9e910f29226f24a"}, + + // sigHashNone + {name: "native-none-0", tx: nativeTx, hashType: none, inputIndex: 0, + expectedSignatureHash: "d7601287982d62ec9eb76f600a7f86a8779f0971bb607800a00c35bce4016bd2"}, + {name: "native-none-0-modify-output-1", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "d7601287982d62ec9eb76f600a7f86a8779f0971bb607800a00c35bce4016bd2"}, + {name: "native-none-0-modify-sequence-0", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifySequence(0), // should change the hash + expectedSignatureHash: "6412f1d36914b5aa716c0d60c54e3a4dfa6807bb007ce889f8344822c1be2c4e"}, + {name: "native-none-0-modify-sequence-1", tx: nativeTx, hashType: none, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "d7601287982d62ec9eb76f600a7f86a8779f0971bb607800a00c35bce4016bd2"}, + {name: "native-none-anyonecanpay-0", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "c52e38429bc2d2a9f81000570cf42bb112427ea295c17fb104384b2bdc6dbc60"}, + {name: "native-none-anyonecanpay-0-modify-amount-spent", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyAmountSpent(0), // should change the hash + expectedSignatureHash: "7559a680d27ee9da46df8550c8da7979677b207865a46cd2a3520ebae26bd0d0"}, + {name: "native-none-anyonecanpay-0-modify-script-public-key", tx: nativeTx, hashType: noneAnyoneCanPay, inputIndex: 0, + modificationFunction: modifyScriptPublicKey(0), // should change the hash + expectedSignatureHash: "3510f7680401713da521f143b257ffb84a3b57c534d97069427dfa4c76b6aa91"}, + + // sigHashSingle + {name: "native-single-0", tx: nativeTx, hashType: single, inputIndex: 0, + expectedSignatureHash: "c80cf7b68f1595a9c8f725022d872d15611b399c281656a73fe0b27f08e3bf29"}, + {name: "native-single-0-modify-output-0", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifyOutput(0), // should change the hash + expectedSignatureHash: "5eec4b7a0764c3d30b912395abc7d4835f4a5ee732452673c97bb140cf440be0"}, + {name: "native-single-0-modify-output-1", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "c80cf7b68f1595a9c8f725022d872d15611b399c281656a73fe0b27f08e3bf29"}, + {name: "native-single-0-modify-sequence-0", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifySequence(0), // should change the hash + expectedSignatureHash: "d2747fa260931c165a9df19449c00eafe552d826e80738246fa553c90030ddee"}, + {name: "native-single-0-modify-sequence-1", tx: nativeTx, hashType: single, inputIndex: 0, + modificationFunction: modifySequence(1), // shouldn't change the hash + expectedSignatureHash: "c80cf7b68f1595a9c8f725022d872d15611b399c281656a73fe0b27f08e3bf29"}, + {name: "native-single-2-no-corresponding-output", tx: nativeTx, hashType: single, inputIndex: 2, + expectedSignatureHash: "4b6a072a43403657d2b9b1e6cf6ae33803f106e67c5d0aaaf9149f189f7dce50"}, + {name: "native-single-2-no-corresponding-output-modify-output-1", tx: nativeTx, hashType: single, inputIndex: 2, + modificationFunction: modifyOutput(1), // shouldn't change the hash + expectedSignatureHash: "4b6a072a43403657d2b9b1e6cf6ae33803f106e67c5d0aaaf9149f189f7dce50"}, + {name: "native-single-anyonecanpay-0", tx: nativeTx, hashType: singleAnyoneCanPay, inputIndex: 0, + expectedSignatureHash: "e8751360200f73836dd1a1c7a1eb19d28aa515796b73785c6e6280321ea4dc4d"}, + {name: "native-single-anyonecanpay-2-no-corresponding-output", tx: nativeTx, hashType: singleAnyoneCanPay, inputIndex: 2, + expectedSignatureHash: "d65cd66de93608941a2f142642a837bee4b1ec46678a9ff60dbb513b4cec5f03"}, + + // subnetwork transaction + {name: "subnetwork-all-0", tx: subnetworkTx, hashType: all, inputIndex: 0, + expectedSignatureHash: "aa5c340d283799d8e6ecfb15dc88101349524ea220b53265e6de6d2d42fe3a01"}, + {name: "subnetwork-all-modify-payload", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifyPayload, // should change the hash + expectedSignatureHash: "e4d8dab094310f49784afe0d435c4964e2c4f2158e8c4c2952095559f676adfc"}, + {name: "subnetwork-all-modify-gas", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifyGas, // should change the hash + expectedSignatureHash: "d622a95da7e6cae688ee26f5906276627d937bd8570d8b089b1c740606db64bb"}, + {name: "subnetwork-all-subnetwork-id", tx: subnetworkTx, hashType: all, inputIndex: 0, + modificationFunction: modifySubnetworkID, // should change the hash + expectedSignatureHash: "fe884f10a46f9f8bb934dde4975a00a9ad1959659ece1af766518b9a6d67bd90"}, + } + + for _, test := range tests { + tx := test.tx + if test.modificationFunction != nil { + tx = test.modificationFunction(tx) + } + + actualSignatureHash, err := consensushashing.CalculateSignatureHashECDSA( + tx, test.inputIndex, test.hashType, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Errorf("%s: Error from CalculateSignatureHashECDSA: %+v", test.name, err) + continue + } + + if actualSignatureHash.String() != test.expectedSignatureHash { + t.Errorf("%s: expected signature hash: '%s'; but got: '%s'", + test.name, test.expectedSignatureHash, actualSignatureHash) + } + } +} + +func generateTxs() (nativeTx, subnetworkTx *externalapi.DomainTransaction, err error) { + genesisCoinbase := dagconfig.SimnetParams.GenesisBlock.Transactions[0] + genesisCoinbaseTransactionID := consensushashing.TransactionID(genesisCoinbase) + + address1Str := "spectresim:qzpj2cfa9m40w9m2cmr8pvfuqpp32mzzwsuw6ukhfduqpp32mzzwsdyy3h0f6" + address1, err := util.DecodeAddress(address1Str, util.Bech32PrefixSpectreSim) + if err != nil { + return nil, nil, fmt.Errorf("error decoding address1: %+v", err) + } + address1ToScript, err := txscript.PayToAddrScript(address1) + if err != nil { + return nil, nil, fmt.Errorf("error generating script: %+v", err) + } + + address2Str := "spectresim:qr7w7nqsdnc3zddm6u8s9fex4ysk95hm3v30q353ymuqpp32mzzwsdyy3h0f6" + address2, err := util.DecodeAddress(address2Str, util.Bech32PrefixSpectreSim) + if err != nil { + return nil, nil, fmt.Errorf("error decoding address2: %+v", err) + } + address2ToScript, err := txscript.PayToAddrScript(address2) + if err != nil { + return nil, nil, fmt.Errorf("error generating script: %+v", err) + } + + txIns := []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: *externalapi.NewDomainOutpoint(genesisCoinbaseTransactionID, 0), + Sequence: 0, + UTXOEntry: utxo.NewUTXOEntry(100, address1ToScript, false, 0), + }, + { + PreviousOutpoint: *externalapi.NewDomainOutpoint(genesisCoinbaseTransactionID, 1), + Sequence: 1, + UTXOEntry: utxo.NewUTXOEntry(200, address2ToScript, false, 0), + }, + { + PreviousOutpoint: *externalapi.NewDomainOutpoint(genesisCoinbaseTransactionID, 2), + Sequence: 2, + UTXOEntry: utxo.NewUTXOEntry(300, address2ToScript, false, 0), + }, + } + + txOuts := []*externalapi.DomainTransactionOutput{ + { + Value: 300, + ScriptPublicKey: address2ToScript, + }, + { + Value: 300, + ScriptPublicKey: address1ToScript, + }, + } + + nativeTx = &externalapi.DomainTransaction{ + Version: 0, + Inputs: txIns, + Outputs: txOuts, + LockTime: 1615462089000, + SubnetworkID: subnetworks.SubnetworkIDNative, + } + subnetworkTx = &externalapi.DomainTransaction{ + Version: 0, + Inputs: txIns, + Outputs: txOuts, + LockTime: 1615462089000, + SubnetworkID: externalapi.DomainSubnetworkID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Gas: 250, + Payload: []byte{10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + } + + return nativeTx, subnetworkTx, nil +} + +func BenchmarkCalculateSignatureHashSchnorr(b *testing.B) { + sigHashTypes := []consensushashing.SigHashType{ + consensushashing.SigHashAll, + consensushashing.SigHashNone, + consensushashing.SigHashSingle, + consensushashing.SigHashAll | consensushashing.SigHashAnyOneCanPay, + consensushashing.SigHashNone | consensushashing.SigHashAnyOneCanPay, + consensushashing.SigHashSingle | consensushashing.SigHashAnyOneCanPay} + + for _, size := range []int{10, 100, 1000} { + tx := generateTransaction(b, sigHashTypes, size) + + b.Run(fmt.Sprintf("%d-inputs-and-outputs", size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + reusedValues := &consensushashing.SighashReusedValues{} + for inputIndex := range tx.Inputs { + sigHashType := sigHashTypes[inputIndex%len(sigHashTypes)] + _, err := consensushashing.CalculateSignatureHashSchnorr(tx, inputIndex, sigHashType, reusedValues) + if err != nil { + b.Fatalf("Error from CalculateSignatureHashSchnorr: %+v", err) + } + } + } + }) + } +} + +func generateTransaction(b *testing.B, sigHashTypes []consensushashing.SigHashType, inputAndOutputSizes int) *externalapi.DomainTransaction { + sourceScript := getSourceScript(b) + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: generateInputs(inputAndOutputSizes, sourceScript), + Outputs: generateOutputs(inputAndOutputSizes, sourceScript), + LockTime: 123456789, + SubnetworkID: externalapi.DomainSubnetworkID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + Gas: 125, + Payload: []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}, + Fee: 0, + Mass: 0, + ID: nil, + } + signTx(b, tx, sigHashTypes) + return tx +} + +func signTx(b *testing.B, tx *externalapi.DomainTransaction, sigHashTypes []consensushashing.SigHashType) { + sourceAddressPKStr := "a4d85b7532123e3dd34e58d7ce20895f7ca32349e29b01700bb5a3e72d2570eb" + privateKeyBytes, err := hex.DecodeString(sourceAddressPKStr) + if err != nil { + b.Fatalf("Error parsing private key hex: %+v", err) + } + keyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + b.Fatalf("Error deserializing private key: %+v", err) + } + for i, txIn := range tx.Inputs { + signatureScript, err := txscript.SignatureScript( + tx, i, sigHashTypes[i%len(sigHashTypes)], keyPair, &consensushashing.SighashReusedValues{}) + if err != nil { + b.Fatalf("Error from SignatureScript: %+v", err) + } + txIn.SignatureScript = signatureScript + } + +} + +func generateInputs(size int, sourceScript *externalapi.ScriptPublicKey) []*externalapi.DomainTransactionInput { + inputs := make([]*externalapi.DomainTransactionInput, size) + + for i := 0; i < size; i++ { + inputs[i] = &externalapi.DomainTransactionInput{ + PreviousOutpoint: *externalapi.NewDomainOutpoint( + externalapi.NewDomainTransactionIDFromByteArray(&[32]byte{12, 3, 4, 5}), 1), + SignatureScript: nil, + Sequence: uint64(i), + UTXOEntry: utxo.NewUTXOEntry(uint64(i), sourceScript, false, 12), + } + } + + return inputs +} + +func getSourceScript(b *testing.B) *externalapi.ScriptPublicKey { + sourceAddressStr := "spectresim:qz6f9z6l3x4v3lf9mgf0t934th4nx5kgzu663x9yjh" + + sourceAddress, err := util.DecodeAddress(sourceAddressStr, util.Bech32PrefixSpectreSim) + if err != nil { + b.Fatalf("Error from DecodeAddress: %+v", err) + } + + sourceScript, err := txscript.PayToAddrScript(sourceAddress) + if err != nil { + b.Fatalf("Error from PayToAddrScript: %+v", err) + } + return sourceScript +} + +func generateOutputs(size int, script *externalapi.ScriptPublicKey) []*externalapi.DomainTransactionOutput { + outputs := make([]*externalapi.DomainTransactionOutput, size) + + for i := 0; i < size; i++ { + outputs[i] = &externalapi.DomainTransactionOutput{ + Value: uint64(i), + ScriptPublicKey: script, + } + } + + return outputs +} diff --git a/domain/consensus/utils/consensushashing/transaction.go b/domain/consensus/utils/consensushashing/transaction.go new file mode 100644 index 0000000..e28edb7 --- /dev/null +++ b/domain/consensus/utils/consensushashing/transaction.go @@ -0,0 +1,190 @@ +package consensushashing + +import ( + "io" + + "github.com/spectre-project/spectred/domain/consensus/utils/serialization" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/util/binaryserializer" +) + +// txEncoding is a bitmask defining which transaction fields we +// want to encode and which to ignore. +type txEncoding uint8 + +const ( + txEncodingFull txEncoding = 0 + + txEncodingExcludeSignatureScript = 1 << iota +) + +// TransactionHash returns the transaction hash. +func TransactionHash(tx *externalapi.DomainTransaction) *externalapi.DomainHash { + // Encode the header and hash everything prior to the number of + // transactions. + writer := hashes.NewTransactionHashWriter() + err := serializeTransaction(writer, tx, txEncodingFull) + if err != nil { + // It seems like this could only happen if the writer returned an error. + // and this writer should never return an error (no allocations or possible failures) + // the only non-writer error path here is unknown types in `WriteElement` + panic(errors.Wrap(err, "TransactionHash() failed. this should never fail for structurally-valid transactions")) + } + + return writer.Finalize() +} + +// TransactionID generates the Hash for the transaction without the signature script and payload field. +func TransactionID(tx *externalapi.DomainTransaction) *externalapi.DomainTransactionID { + // If transaction ID is already cached, return it + if tx.ID != nil { + return tx.ID + } + + // Encode the transaction, replace signature script with zeroes, cut off + // payload and hash the result. + var encodingFlags txEncoding + if !transactionhelper.IsCoinBase(tx) { + encodingFlags = txEncodingExcludeSignatureScript + } + writer := hashes.NewTransactionIDWriter() + err := serializeTransaction(writer, tx, encodingFlags) + if err != nil { + // this writer never return errors (no allocations or possible failures) so errors can only come from validity checks, + // and we assume we never construct malformed transactions. + panic(errors.Wrap(err, "TransactionID() failed. this should never fail for structurally-valid transactions")) + } + transactionID := externalapi.DomainTransactionID(*writer.Finalize()) + + tx.ID = &transactionID + + return tx.ID +} + +// TransactionIDs converts the provided slice of DomainTransactions to a corresponding slice of TransactionIDs +func TransactionIDs(txs []*externalapi.DomainTransaction) []*externalapi.DomainTransactionID { + txIDs := make([]*externalapi.DomainTransactionID, len(txs)) + for i, tx := range txs { + txIDs[i] = TransactionID(tx) + } + return txIDs +} + +func serializeTransaction(w io.Writer, tx *externalapi.DomainTransaction, encodingFlags txEncoding) error { + err := binaryserializer.PutUint16(w, tx.Version) + if err != nil { + return err + } + + count := uint64(len(tx.Inputs)) + err = serialization.WriteElement(w, count) + if err != nil { + return err + } + + for _, ti := range tx.Inputs { + err = writeTransactionInput(w, ti, encodingFlags) + if err != nil { + return err + } + } + + count = uint64(len(tx.Outputs)) + err = serialization.WriteElement(w, count) + if err != nil { + return err + } + + for _, output := range tx.Outputs { + err = writeTxOut(w, output) + if err != nil { + return err + } + } + + err = binaryserializer.PutUint64(w, tx.LockTime) + if err != nil { + return err + } + + _, err = w.Write(tx.SubnetworkID[:]) + if err != nil { + return err + } + + err = binaryserializer.PutUint64(w, tx.Gas) + if err != nil { + return err + } + + err = writeVarBytes(w, tx.Payload) + if err != nil { + return err + } + + return nil +} + +// writeTransactionInput encodes ti to the spectre protocol encoding for a transaction +// input to w. +func writeTransactionInput(w io.Writer, ti *externalapi.DomainTransactionInput, encodingFlags txEncoding) error { + err := writeOutpoint(w, &ti.PreviousOutpoint) + if err != nil { + return err + } + + if encodingFlags&txEncodingExcludeSignatureScript != txEncodingExcludeSignatureScript { + err = writeVarBytes(w, ti.SignatureScript) + if err != nil { + return err + } + + _, err = w.Write([]byte{ti.SigOpCount}) + if err != nil { + return err + } + } else { + err = writeVarBytes(w, []byte{}) + if err != nil { + return err + } + } + + return binaryserializer.PutUint64(w, ti.Sequence) +} + +func writeOutpoint(w io.Writer, outpoint *externalapi.DomainOutpoint) error { + _, err := w.Write(outpoint.TransactionID.ByteSlice()) + if err != nil { + return err + } + + return binaryserializer.PutUint32(w, outpoint.Index) +} + +func writeVarBytes(w io.Writer, data []byte) error { + dataLength := uint64(len(data)) + err := serialization.WriteElement(w, dataLength) + if err != nil { + return err + } + + _, err = w.Write(data) + return err +} + +func writeTxOut(w io.Writer, to *externalapi.DomainTransactionOutput) error { + err := binaryserializer.PutUint64(w, to.Value) + if err != nil { + return err + } + err = binaryserializer.PutUint16(w, to.ScriptPublicKey.Version) + if err != nil { + return err + } + return writeVarBytes(w, to.ScriptPublicKey.Script) +} diff --git a/domain/consensus/utils/constants/constants.go b/domain/consensus/utils/constants/constants.go new file mode 100644 index 0000000..508af4d --- /dev/null +++ b/domain/consensus/utils/constants/constants.go @@ -0,0 +1,41 @@ +package constants + +import "math" + +const ( + // BlockVersion represents the current block version + BlockVersion uint16 = 1 + + // MaxTransactionVersion is the current latest supported transaction version. + MaxTransactionVersion uint16 = 0 + + // MaxScriptPublicKeyVersion is the current latest supported public key script version. + MaxScriptPublicKeyVersion uint16 = 0 + + // SompiPerSpectre is the number of sompi in one spectre (1 SPR). + SompiPerSpectre = 100_000_000 + + // MaxSompi is the maximum transaction amount allowed in sompi. + MaxSompi = uint64(1_161_000_000 * SompiPerSpectre) + + // MaxTxInSequenceNum is the maximum sequence number the sequence field + // of a transaction input can be. + MaxTxInSequenceNum uint64 = math.MaxUint64 + + // SequenceLockTimeDisabled is a flag that if set on a transaction + // input's sequence number, the sequence number will not be interpreted + // as a relative locktime. + SequenceLockTimeDisabled uint64 = 1 << 63 + + // SequenceLockTimeMask is a mask that extracts the relative locktime + // when masked against the transaction input sequence number. + SequenceLockTimeMask uint64 = 0x00000000ffffffff + + // LockTimeThreshold is the number below which a lock time is + // interpreted to be a DAA score. + LockTimeThreshold = 5e11 // Tue Nov 5 00:53:20 1985 UTC + + // UnacceptedDAAScore is used to for UTXOEntries that were created by transactions in the mempool, or otherwise + // not-yet-accepted transactions. + UnacceptedDAAScore = math.MaxUint64 +) diff --git a/domain/consensus/utils/hashes/domains.go b/domain/consensus/utils/hashes/domains.go new file mode 100644 index 0000000..30d703c --- /dev/null +++ b/domain/consensus/utils/hashes/domains.go @@ -0,0 +1,89 @@ +package hashes + +import ( + "crypto/sha256" + + "github.com/pkg/errors" + "golang.org/x/crypto/blake2b" + "golang.org/x/crypto/sha3" +) + +const ( + transcationHashDomain = "TransactionHash" + transcationIDDomain = "TransactionID" + transcationSigningDomain = "TransactionSigningHash" + transcationSigningECDSADomain = "TransactionSigningHashECDSA" + blockDomain = "BlockHash" + proofOfWorkDomain = "ProofOfWorkHash" + heavyHashDomain = "HeavyHash" + merkleBranchDomain = "MerkleBranchHash" +) + +// transactionSigningECDSADomainHash is a hashed version of transcationSigningECDSADomain that is used +// to make it a constant size. This is needed because this domain is used by sha256 hash writer, and +// sha256 doesn't support variable size domain separation. +var transactionSigningECDSADomainHash = sha256.Sum256([]byte(transcationSigningECDSADomain)) + +// NewTransactionHashWriter Returns a new HashWriter used for transaction hashes +func NewTransactionHashWriter() HashWriter { + blake, err := blake2b.New256([]byte(transcationHashDomain)) + if err != nil { + panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", transcationHashDomain)) + } + return HashWriter{blake} +} + +// NewTransactionIDWriter Returns a new HashWriter used for transaction IDs +func NewTransactionIDWriter() HashWriter { + blake, err := blake2b.New256([]byte(transcationIDDomain)) + if err != nil { + panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", transcationIDDomain)) + } + return HashWriter{blake} +} + +// NewTransactionSigningHashWriter Returns a new HashWriter used for signing on a transaction +func NewTransactionSigningHashWriter() HashWriter { + blake, err := blake2b.New256([]byte(transcationSigningDomain)) + if err != nil { + panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", transcationSigningDomain)) + } + return HashWriter{blake} +} + +// NewTransactionSigningHashECDSAWriter Returns a new HashWriter used for signing on a transaction with ECDSA +func NewTransactionSigningHashECDSAWriter() HashWriter { + hashWriter := HashWriter{sha256.New()} + hashWriter.InfallibleWrite(transactionSigningECDSADomainHash[:]) + return hashWriter +} + +// NewBlockHashWriter Returns a new HashWriter used for hashing blocks +func NewBlockHashWriter() HashWriter { + blake, err := blake2b.New256([]byte(blockDomain)) + if err != nil { + panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", blockDomain)) + } + return HashWriter{blake} +} + +// NewPoWHashWriter Returns a new HashWriter used for the PoW function +func NewPoWHashWriter() ShakeHashWriter { + shake256 := sha3.NewCShake256(nil, []byte(proofOfWorkDomain)) + return ShakeHashWriter{shake256} +} + +// NewHeavyHashWriter Returns a new HashWriter used for the HeavyHash function +func NewHeavyHashWriter() ShakeHashWriter { + shake256 := sha3.NewCShake256(nil, []byte(heavyHashDomain)) + return ShakeHashWriter{shake256} +} + +// NewMerkleBranchHashWriter Returns a new HashWriter used for a merkle tree branch +func NewMerkleBranchHashWriter() HashWriter { + blake, err := blake2b.New256([]byte(merkleBranchDomain)) + if err != nil { + panic(errors.Wrapf(err, "this should never happen. %s is less than 64 bytes", merkleBranchDomain)) + } + return HashWriter{blake} +} diff --git a/domain/consensus/utils/hashes/strings.go b/domain/consensus/utils/hashes/strings.go new file mode 100644 index 0000000..a280b14 --- /dev/null +++ b/domain/consensus/utils/hashes/strings.go @@ -0,0 +1,14 @@ +package hashes + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// ToStrings converts a slice of hashes into a slice of the corresponding strings +func ToStrings(hashes []*externalapi.DomainHash) []string { + strings := make([]string, len(hashes)) + for i, hash := range hashes { + strings[i] = hash.String() + } + return strings +} diff --git a/domain/consensus/utils/hashes/writers.go b/domain/consensus/utils/hashes/writers.go new file mode 100644 index 0000000..ddfe615 --- /dev/null +++ b/domain/consensus/utils/hashes/writers.go @@ -0,0 +1,59 @@ +package hashes + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "golang.org/x/crypto/sha3" + "hash" +) + +// HashWriter is used to incrementally hash data without concatenating all of the data to a single buffer +// it exposes an io.Writer api and a Finalize function to get the resulting hash. +// The used hash function is blake2b. +// This can only be created via one of the domain separated constructors +type HashWriter struct { + hash.Hash +} + +// InfallibleWrite is just like write but doesn't return anything +func (h HashWriter) InfallibleWrite(p []byte) { + // This write can never return an error, this is part of the hash.Hash interface contract. + _, err := h.Write(p) + if err != nil { + panic(errors.Wrap(err, "this should never happen. hash.Hash interface promises to not return errors.")) + } +} + +// Finalize returns the resulting hash +func (h HashWriter) Finalize() *externalapi.DomainHash { + var sum [externalapi.DomainHashSize]byte + // This should prevent `Sum` for allocating an output buffer, by using the DomainHash buffer. we still copy because we don't want to rely on that. + copy(sum[:], h.Sum(sum[:0])) + return externalapi.NewDomainHashFromByteArray(&sum) +} + +// ShakeHashWriter is exactly the same as HashWriter but for CShake256 +type ShakeHashWriter struct { + sha3.ShakeHash +} + +// InfallibleWrite is just like write but doesn't return anything +func (h *ShakeHashWriter) InfallibleWrite(p []byte) { + // This write can never return an error, this is part of the hash.Hash interface contract. + _, err := h.Write(p) + if err != nil { + panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors.")) + } +} + +// Finalize returns the resulting hash +func (h *ShakeHashWriter) Finalize() *externalapi.DomainHash { + var sum [externalapi.DomainHashSize]byte + // This should prevent `Sum` for allocating an output buffer, by using the DomainHash buffer. we still copy because we don't want to rely on that. + _, err := h.Read(sum[:]) + if err != nil { + panic(errors.Wrap(err, "this should never happen. sha3.ShakeHash interface promises to not return errors.")) + } + h.ShakeHash = nil // prevent double reading as it will return a different hash + return externalapi.NewDomainHashFromByteArray(&sum) +} diff --git a/domain/consensus/utils/hashes/writers_test.go b/domain/consensus/utils/hashes/writers_test.go new file mode 100644 index 0000000..c1a76bd --- /dev/null +++ b/domain/consensus/utils/hashes/writers_test.go @@ -0,0 +1,120 @@ +package hashes + +import ( + "fmt" + "math/rand" + "testing" +) + +func TestNewBlockHash(t *testing.T) { + datas := [][]byte{ + {}, + {1}, + {5, 199, 126, 44, 71, 32, 82, 139, 122, 217, 43, 48, 52, 112, 40, 209, 180, 83, 139, 231, 72, 48, 136, 48, 168, 226, 133, 7, 60, 4, 160, 205}, + {42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42}, + {0, 0, 0, 0, 0, 0, 0, 0}, + } + tests := []struct { + hasher HashWriter + expected []string + }{ + {NewTransactionHashWriter(), []string{ + "50272a9e37c728026f93d0eda6ab4467f627338b879076483c88d291193cb3bf", + "f9bf7e04c712621a0f4bb75d763f9ef5f73af6c438fd15b80744393bc96398ad", + "8e791f3edcc92b71b8de2778efbc4666ee5bd146acbe8723a55bca26b022b0e0", + "a6dab1a3088548c62d13a082fa28e870fdbbe51adcd8c364e2ea37e473c04d81", + "3b79b78b967233843ad30f707b165eb3d6a91af8338076be8755c46a963c3d1d", + }}, + {NewTransactionIDWriter(), []string{ + "e5f65efda0894d2b0590c2e9e46e9acc03032f505a1522f5e8c78c5ec70b1d9c", + "aea52cf5e5a13da13a52dd69abd636eb1b0f86e58bc1dda6b17886b94593415a", + "a50a2f87bdce075740189e9e23907ae22b5addbd875ccb70c116811b1fa5fb18", + "0db7a485f7013a346a8f7f5caf73d52ca3c3b5ee101ad8753adedd4235b7236b", + "2afc9c855854b0a6e94a722c3451d0cdfc8c11748b78ef65b9786f87b48d0d07", + }}, + {NewTransactionSigningHashWriter(), []string{ + "34c75037ad62740d4b3228f88f844f7901c07bfacd55a045be518eabc15e52ce", + "8523b0471bcbea04575ccaa635eef9f9114f2890bda54367e5ff8caa3878bf82", + "a51c49d9eb3d13f9de16e1aa8d1ff17668d55633ce00f36a643ac714b0fb137f", + "487f199ef74c3e893e85bd37770e6334575a2d4d113b2e10474593c49807de93", + "6392adc33a8e24e9a0a0c4c5f07f9c1cc958ad40c16d7a9a276e374cebb4e32b", + }}, + {NewTransactionSigningHashECDSAWriter(), []string{ + "b31ad1fbbe41b0e2a90e07c84708b38ba581f0c0e9185416913a04fb6d342027", + "c43e1f75ea9df6379b56a95074c2b6289ed8c5a01fff2d49d9d44ad5575c164b", + "49085f99fa0084b5436663f757a5916b1e4290c3321707fb76921ed4e47844ec", + "3f887e866428de813c1d0463b14eef3ca1363c8187e917dda1eee0ec5996490b", + "56de89a8c75f0fee2de61b11ab05d0d42e29ed50879467cf128dd80800a52ada", + }}, + {NewBlockHashWriter(), []string{ + "a80b6aa20f20b15ebabe2b1949527f78a257594a732e774de637d85e6973a768", + "5643023add641f9421187b8c9aa3c6c73227d5ec34131c61a08d35b43e7e4b65", + "4dc3bf72045431e46f8839a7d390898f27c887fddd8637149bfb70f732f04334", + "15d7648e69023dca65c949a61ea166192049f449c604523494813873b19918a7", + "3ac41af8385ea5d902ce6d47f509b7accc9c631f1d57a719d777874467f6d877", + }}, + {NewMerkleBranchHashWriter(), []string{ + "4de3617db456d01248173f17ec58196e92fbd994b636476db4b875ed2ec84054", + "5737cd8b6fca5a30c19a491323a14e6b7021641cb3f8875f10c7a2eafd3cf43f", + "a49eeda61cc75e0a8e5915829752fe0ad97620d6d32de7c9883595b0810ca33e", + "28f33681dcff1313674e07dacc2d74c3089f6d8cea7a4f8792a71fd870988ee5", + "2d53a43a42020a5091c125230bcd8a4cf0eeb188333e68325d4bce58a1c75ca3", + }}, + } + + for _, testVector := range tests { + hasher := testVector.hasher + for i, data := range datas { + hasher.InfallibleWrite(data) + res := hasher.Finalize().String() + if res != testVector.expected[i] { + panic(fmt.Sprintf("expected: %s, got: %s", testVector.expected[i], res)) + } + } + } + +} + +func BenchmarkNewBlockHashWriterSmall(b *testing.B) { + r := rand.New(rand.NewSource(0)) + var someBytes [32]byte + r.Read(someBytes[:]) + for i := 0; i < b.N; i++ { + hasher := NewBlockHashWriter() + hasher.InfallibleWrite(someBytes[:]) + hasher.Finalize() + } +} + +func BenchmarkNewBlockHashWriterBig(b *testing.B) { + r := rand.New(rand.NewSource(0)) + var someBytes [1024]byte + r.Read(someBytes[:]) + for i := 0; i < b.N; i++ { + hasher := NewBlockHashWriter() + hasher.InfallibleWrite(someBytes[:]) + hasher.Finalize() + } +} + +func BenchmarkNewHeavyHashWriterSmall(b *testing.B) { + r := rand.New(rand.NewSource(0)) + var someBytes [32]byte + r.Read(someBytes[:]) + for i := 0; i < b.N; i++ { + hasher := NewHeavyHashWriter() + hasher.InfallibleWrite(someBytes[:]) + hasher.Finalize() + } +} + +func BenchmarkNewHeavyHashWriterBig(b *testing.B) { + r := rand.New(rand.NewSource(0)) + var someBytes [1024]byte + r.Read(someBytes[:]) + for i := 0; i < b.N; i++ { + hasher := NewHeavyHashWriter() + hasher.InfallibleWrite(someBytes[:]) + hasher.Finalize() + } +} diff --git a/domain/consensus/utils/hashset/hash_set.go b/domain/consensus/utils/hashset/hash_set.go new file mode 100644 index 0000000..9826e22 --- /dev/null +++ b/domain/consensus/utils/hashset/hash_set.go @@ -0,0 +1,93 @@ +package hashset + +import ( + "strings" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// HashSet is an unsorted unique collection of DomainHashes +type HashSet map[externalapi.DomainHash]struct{} + +// New creates and returns an empty HashSet +func New() HashSet { + return HashSet{} +} + +// NewFromSlice creates and returns a HashSet with contents according to provided slice +func NewFromSlice(hashes ...*externalapi.DomainHash) HashSet { + set := New() + + for _, hash := range hashes { + set.Add(hash) + } + + return set +} + +// String returns a string representation of this hash set +func (hs HashSet) String() string { + hashStrings := make([]string, 0, len(hs)) + for hash := range hs { + hashStrings = append(hashStrings, hash.String()) + } + return strings.Join(hashStrings, ", ") +} + +// Add appends a hash to this HashSet. If given hash already exists - does nothing +func (hs HashSet) Add(hash *externalapi.DomainHash) { + hs[*hash] = struct{}{} +} + +// Remove removes a hash from this HashSet. If given hash does not exist in HashSet - does nothing. +func (hs HashSet) Remove(hash *externalapi.DomainHash) { + delete(hs, *hash) +} + +// Contains returns true if this HashSet contains the given hash. +func (hs HashSet) Contains(hash *externalapi.DomainHash) bool { + _, ok := hs[*hash] + return ok +} + +// Subtract creates and returns a new HashSet that contains all hashes in this HashSet minus the ones in `other` +func (hs HashSet) Subtract(other HashSet) HashSet { + diff := New() + + for hash := range hs { + hashCopy := hash + if !other.Contains(&hashCopy) { + diff.Add(&hashCopy) + } + } + + return diff +} + +// ContainsAllInSlice returns true if this HashSet contains all hashes in given slice +func (hs HashSet) ContainsAllInSlice(slice []*externalapi.DomainHash) bool { + for _, hash := range slice { + if !hs.Contains(hash) { + return false + } + } + + return true +} + +// ToSlice converts this HashSet into a slice of hashes +func (hs HashSet) ToSlice() []*externalapi.DomainHash { + slice := make([]*externalapi.DomainHash, 0, len(hs)) + + for hash := range hs { + hashCopy := hash + slice = append(slice, &hashCopy) + } + + return slice +} + +// Length returns the length of this HashSet +func (hs HashSet) Length() int { + return len(hs) +} diff --git a/domain/consensus/utils/lrucache/lrucache.go b/domain/consensus/utils/lrucache/lrucache.go new file mode 100644 index 0000000..ae4222e --- /dev/null +++ b/domain/consensus/utils/lrucache/lrucache.go @@ -0,0 +1,65 @@ +package lrucache + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// LRUCache is a least-recently-used cache for any type +// that's able to be indexed by DomainHash +type LRUCache struct { + cache map[externalapi.DomainHash]interface{} + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[externalapi.DomainHash]interface{} + if preallocate { + cache = make(map[externalapi.DomainHash]interface{}, capacity+1) + } else { + cache = make(map[externalapi.DomainHash]interface{}) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(key *externalapi.DomainHash, value interface{}) { + c.cache[*key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(key *externalapi.DomainHash) (interface{}, bool) { + value, ok := c.cache[*key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(key *externalapi.DomainHash) bool { + _, ok := c.cache[*key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(key *externalapi.DomainHash) { + delete(c.cache, *key) +} + +func (c *LRUCache) evictRandom() { + var keyToEvict externalapi.DomainHash + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(&keyToEvict) +} diff --git a/domain/consensus/utils/lrucacheghostdagdata/lrucacheghostdagdata.go b/domain/consensus/utils/lrucacheghostdagdata/lrucacheghostdagdata.go new file mode 100644 index 0000000..13a9eff --- /dev/null +++ b/domain/consensus/utils/lrucacheghostdagdata/lrucacheghostdagdata.go @@ -0,0 +1,79 @@ +package lrucacheghostdagdata + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +type lruKey struct { + blockHash externalapi.DomainHash + isTrustedData bool +} + +func newKey(blockHash *externalapi.DomainHash, isTrustedData bool) lruKey { + return lruKey{ + blockHash: *blockHash, + isTrustedData: isTrustedData, + } +} + +// LRUCache is a least-recently-used cache from +// lruKey to *externalapi.BlockGHOSTDAGData +type LRUCache struct { + cache map[lruKey]*externalapi.BlockGHOSTDAGData + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[lruKey]*externalapi.BlockGHOSTDAGData + if preallocate { + cache = make(map[lruKey]*externalapi.BlockGHOSTDAGData, capacity+1) + } else { + cache = make(map[lruKey]*externalapi.BlockGHOSTDAGData) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(blockHash *externalapi.DomainHash, isTrustedData bool, value *externalapi.BlockGHOSTDAGData) { + key := newKey(blockHash, isTrustedData) + c.cache[key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(blockHash *externalapi.DomainHash, isTrustedData bool) (*externalapi.BlockGHOSTDAGData, bool) { + key := newKey(blockHash, isTrustedData) + value, ok := c.cache[key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(blockHash *externalapi.DomainHash, isTrustedData bool) bool { + key := newKey(blockHash, isTrustedData) + _, ok := c.cache[key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, isTrustedData bool) { + key := newKey(blockHash, isTrustedData) + delete(c.cache, key) +} + +func (c *LRUCache) evictRandom() { + var keyToEvict lruKey + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(&keyToEvict.blockHash, keyToEvict.isTrustedData) +} diff --git a/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs/lrucachehashandwindowsizetoblockghostdagdatahashpairs.go b/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs/lrucachehashandwindowsizetoblockghostdagdatahashpairs.go new file mode 100644 index 0000000..f7155a1 --- /dev/null +++ b/domain/consensus/utils/lrucachehashandwindowsizetoblockghostdagdatahashpairs/lrucachehashandwindowsizetoblockghostdagdatahashpairs.go @@ -0,0 +1,79 @@ +package lrucachehashandwindowsizetoblockghostdagdatahashpairs + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +type lruKey struct { + blockHash externalapi.DomainHash + windowSize int +} + +func newKey(blockHash *externalapi.DomainHash, windowSize int) lruKey { + return lruKey{ + blockHash: *blockHash, + windowSize: windowSize, + } +} + +// LRUCache is a least-recently-used cache from +// lruKey to *externalapi.BlockGHOSTDAGDataHashPair +type LRUCache struct { + cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair + if preallocate { + cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair, capacity+1) + } else { + cache = make(map[lruKey][]*externalapi.BlockGHOSTDAGDataHashPair) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(blockHash *externalapi.DomainHash, windowSize int, value []*externalapi.BlockGHOSTDAGDataHashPair) { + key := newKey(blockHash, windowSize) + c.cache[key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(blockHash *externalapi.DomainHash, windowSize int) ([]*externalapi.BlockGHOSTDAGDataHashPair, bool) { + key := newKey(blockHash, windowSize) + value, ok := c.cache[key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(blockHash *externalapi.DomainHash, windowSize int) bool { + key := newKey(blockHash, windowSize) + _, ok := c.cache[key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, windowSize int) { + key := newKey(blockHash, windowSize) + delete(c.cache, key) +} + +func (c *LRUCache) evictRandom() { + var keyToEvict lruKey + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(&keyToEvict.blockHash, keyToEvict.windowSize) +} diff --git a/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair/lrucachehashpairtoblockghostdagdatahashpair.go b/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair/lrucachehashpairtoblockghostdagdatahashpair.go new file mode 100644 index 0000000..1d34f6b --- /dev/null +++ b/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair/lrucachehashpairtoblockghostdagdatahashpair.go @@ -0,0 +1,79 @@ +package lrucachehashpairtoblockghostdagdatahashpair + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +type lruKey struct { + blockHash externalapi.DomainHash + index uint64 +} + +func newKey(blockHash *externalapi.DomainHash, index uint64) lruKey { + return lruKey{ + blockHash: *blockHash, + index: index, + } +} + +// LRUCache is a least-recently-used cache from +// lruKey to *externalapi.BlockGHOSTDAGDataHashPair +type LRUCache struct { + cache map[lruKey]*externalapi.BlockGHOSTDAGDataHashPair + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[lruKey]*externalapi.BlockGHOSTDAGDataHashPair + if preallocate { + cache = make(map[lruKey]*externalapi.BlockGHOSTDAGDataHashPair, capacity+1) + } else { + cache = make(map[lruKey]*externalapi.BlockGHOSTDAGDataHashPair) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(blockHash *externalapi.DomainHash, index uint64, value *externalapi.BlockGHOSTDAGDataHashPair) { + key := newKey(blockHash, index) + c.cache[key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(blockHash *externalapi.DomainHash, index uint64) (*externalapi.BlockGHOSTDAGDataHashPair, bool) { + key := newKey(blockHash, index) + value, ok := c.cache[key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(blockHash *externalapi.DomainHash, index uint64) bool { + key := newKey(blockHash, index) + _, ok := c.cache[key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(blockHash *externalapi.DomainHash, index uint64) { + key := newKey(blockHash, index) + delete(c.cache, key) +} + +func (c *LRUCache) evictRandom() { + var keyToEvict lruKey + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(&keyToEvict.blockHash, keyToEvict.index) +} diff --git a/domain/consensus/utils/lrucacheuint64tohash/lrucacheuint64tohash.go b/domain/consensus/utils/lrucacheuint64tohash/lrucacheuint64tohash.go new file mode 100644 index 0000000..ce38062 --- /dev/null +++ b/domain/consensus/utils/lrucacheuint64tohash/lrucacheuint64tohash.go @@ -0,0 +1,63 @@ +package lrucacheuint64tohash + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// LRUCache is a least-recently-used cache from +// uint64 to DomainHash +type LRUCache struct { + cache map[uint64]*externalapi.DomainHash + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[uint64]*externalapi.DomainHash + if preallocate { + cache = make(map[uint64]*externalapi.DomainHash, capacity+1) + } else { + cache = make(map[uint64]*externalapi.DomainHash) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(key uint64, value *externalapi.DomainHash) { + c.cache[key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(key uint64) (*externalapi.DomainHash, bool) { + value, ok := c.cache[key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(key uint64) bool { + _, ok := c.cache[key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(key uint64) { + delete(c.cache, key) +} + +func (c *LRUCache) evictRandom() { + var keyToEvict uint64 + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(keyToEvict) +} diff --git a/domain/consensus/utils/merkle/merkle.go b/domain/consensus/utils/merkle/merkle.go new file mode 100644 index 0000000..c3322b2 --- /dev/null +++ b/domain/consensus/utils/merkle/merkle.go @@ -0,0 +1,100 @@ +package merkle + +import ( + "math" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" +) + +// nextPowerOfTwo returns the next highest power of two from a given number if +// it is not already a power of two. This is a helper function used during the +// calculation of a merkle tree. +func nextPowerOfTwo(n int) int { + // Return the number if it's already a power of 2. + if n&(n-1) == 0 { + return n + } + + // Figure out and return the next power of two. + exponent := uint(math.Log2(float64(n))) + 1 + return 1 << exponent // 2^exponent +} + +// hashMerkleBranches takes two hashes, treated as the left and right tree +// nodes, and returns the hash of their concatenation. This is a helper +// function used to aid in the generation of a merkle tree. +func hashMerkleBranches(left, right *externalapi.DomainHash) *externalapi.DomainHash { + // Concatenate the left and right nodes. + w := hashes.NewMerkleBranchHashWriter() + + w.InfallibleWrite(left.ByteSlice()) + w.InfallibleWrite(right.ByteSlice()) + + return w.Finalize() +} + +// CalculateHashMerkleRoot calculates the merkle root of a tree consisted of the given transaction hashes. +// See `merkleRoot` for more info. +func CalculateHashMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash { + txHashes := make([]*externalapi.DomainHash, len(transactions)) + for i, tx := range transactions { + txHashes[i] = consensushashing.TransactionHash(tx) + } + return merkleRoot(txHashes) +} + +// CalculateIDMerkleRoot calculates the merkle root of a tree consisted of the given transaction IDs. +// See `merkleRoot` for more info. +func CalculateIDMerkleRoot(transactions []*externalapi.DomainTransaction) *externalapi.DomainHash { + if len(transactions) == 0 { + return &externalapi.DomainHash{} + } + + txIDs := make([]*externalapi.DomainHash, len(transactions)) + for i, tx := range transactions { + txIDs[i] = (*externalapi.DomainHash)(consensushashing.TransactionID(tx)) + } + return merkleRoot(txIDs) +} + +// merkleRoot creates a merkle tree from a slice of hashes, and returns its root. +func merkleRoot(hashes []*externalapi.DomainHash) *externalapi.DomainHash { + // Calculate how many entries are required to hold the binary merkle + // tree as a linear array and create an array of that size. + nextPoT := nextPowerOfTwo(len(hashes)) + arraySize := nextPoT*2 - 1 + merkles := make([]*externalapi.DomainHash, arraySize) + + // Create the base transaction hashes and populate the array with them. + for i, hash := range hashes { + merkles[i] = hash + } + + // Start the array offset after the last transaction and adjusted to the + // next power of two. + offset := nextPoT + for i := 0; i < arraySize-1; i += 2 { + switch { + // When there is no left child node, the parent is nil too. + case merkles[i] == nil: + merkles[offset] = nil + + // When there is no right child, the parent is generated by + // hashing the concatenation of the left child with zeros. + case merkles[i+1] == nil: + newHash := hashMerkleBranches(merkles[i], &externalapi.DomainHash{}) + merkles[offset] = newHash + + // The normal case sets the parent node to the hash + // of the concatentation of the left and right children. + default: + newHash := hashMerkleBranches(merkles[i], merkles[i+1]) + merkles[offset] = newHash + } + offset++ + } + + return merkles[len(merkles)-1] +} diff --git a/domain/consensus/utils/mining/solve.go b/domain/consensus/utils/mining/solve.go new file mode 100644 index 0000000..c860721 --- /dev/null +++ b/domain/consensus/utils/mining/solve.go @@ -0,0 +1,25 @@ +package mining + +import ( + "math" + "math/rand" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" +) + +// SolveBlock increments the given block's nonce until it matches the difficulty requirements in its bits field +func SolveBlock(block *externalapi.DomainBlock, rd *rand.Rand) { + header := block.Header.ToMutable() + state := pow.NewState(header) + for state.Nonce = rd.Uint64(); state.Nonce < math.MaxUint64; state.Nonce++ { + if state.CheckProofOfWork() { + header.SetNonce(state.Nonce) + block.Header = header.ToImmutable() + return + } + } + + panic(errors.New("went over all the nonce space and couldn't find a single one that gives a valid block")) +} diff --git a/domain/consensus/utils/multiset/multiset.go b/domain/consensus/utils/multiset/multiset.go new file mode 100644 index 0000000..13c2d16 --- /dev/null +++ b/domain/consensus/utils/multiset/multiset.go @@ -0,0 +1,54 @@ +package multiset + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/go-muhash" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type multiset struct { + ms *muhash.MuHash +} + +func (m multiset) Add(data []byte) { + m.ms.Add(data) +} + +func (m multiset) Remove(data []byte) { + m.ms.Remove(data) +} + +func (m multiset) Hash() *externalapi.DomainHash { + finalizedHash := m.ms.Finalize() + return externalapi.NewDomainHashFromByteArray(finalizedHash.AsArray()) +} + +func (m multiset) Serialize() []byte { + return m.ms.Serialize()[:] +} + +func (m multiset) Clone() model.Multiset { + return &multiset{ms: m.ms.Clone()} +} + +// FromBytes deserializes the given bytes slice and returns a multiset. +func FromBytes(multisetBytes []byte) (model.Multiset, error) { + serialized := &muhash.SerializedMuHash{} + if len(serialized) != len(multisetBytes) { + return nil, errors.Errorf("mutliset bytes expected to be in length of %d but got %d", + len(serialized), len(multisetBytes)) + } + copy(serialized[:], multisetBytes) + ms, err := muhash.DeserializeMuHash(serialized) + if err != nil { + return nil, err + } + + return &multiset{ms: ms}, nil +} + +// New returns a new model.Multiset +func New() model.Multiset { + return &multiset{ms: muhash.NewMuHash()} +} diff --git a/domain/consensus/utils/pow/heavyhash.go b/domain/consensus/utils/pow/heavyhash.go new file mode 100644 index 0000000..dd6cf1d --- /dev/null +++ b/domain/consensus/utils/pow/heavyhash.go @@ -0,0 +1,92 @@ +package pow + +import ( + "math" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" +) + +const eps float64 = 1e-9 + +type matrix [64][64]uint16 + +func generateMatrix(hash *externalapi.DomainHash) *matrix { + var mat matrix + generator := newxoShiRo256PlusPlus(hash) + for { + for i := range mat { + for j := 0; j < 64; j += 16 { + val := generator.Uint64() + for shift := 0; shift < 16; shift++ { + mat[i][j+shift] = uint16(val >> (4 * shift) & 0x0F) + } + } + } + if mat.computeRank() == 64 { + return &mat + } + } +} + +func (mat *matrix) computeRank() int { + var B [64][64]float64 + for i := range B { + for j := range B[0] { + B[i][j] = float64(mat[i][j]) + } + } + var rank int + var rowSelected [64]bool + for i := 0; i < 64; i++ { + var j int + for j = 0; j < 64; j++ { + if !rowSelected[j] && math.Abs(B[j][i]) > eps { + break + } + } + if j != 64 { + rank++ + rowSelected[j] = true + for p := i + 1; p < 64; p++ { + B[j][p] /= B[j][i] + } + for k := 0; k < 64; k++ { + if k != j && math.Abs(B[k][i]) > eps { + for p := i + 1; p < 64; p++ { + B[k][p] -= B[j][p] * B[k][i] + } + } + } + } + } + return rank +} + +func (mat *matrix) HeavyHash(hash *externalapi.DomainHash) *externalapi.DomainHash { + hashBytes := hash.ByteArray() + var vector [64]uint16 + var product [64]uint16 + for i := 0; i < 32; i++ { + vector[2*i] = uint16(hashBytes[i] >> 4) + vector[2*i+1] = uint16(hashBytes[i] & 0x0F) + } + // Matrix-vector multiplication, and convert to 4 bits. + for i := 0; i < 64; i++ { + var sum uint16 + for j := 0; j < 64; j++ { + sum += mat[i][j] * vector[j] + } + product[i] = sum >> 10 + } + + // Concatenate 4 LSBs back to 8 bit xor with sum1 + var res [32]byte + for i := range res { + res[i] = hashBytes[i] ^ (byte(product[2*i]<<4) | byte(product[2*i+1])) + } + // Hash again + writer := hashes.NewHeavyHashWriter() + writer.InfallibleWrite(res[:]) + return writer.Finalize() +} diff --git a/domain/consensus/utils/pow/heavyhash_test.go b/domain/consensus/utils/pow/heavyhash_test.go new file mode 100644 index 0000000..08dd8e7 --- /dev/null +++ b/domain/consensus/utils/pow/heavyhash_test.go @@ -0,0 +1,234 @@ +package pow + +import ( + "bytes" + "encoding/hex" + "math/rand" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" +) + +func BenchmarkMatrix_HeavyHash(b *testing.B) { + input := []byte("BenchmarkMatrix_HeavyHash") + writer := hashes.NewPoWHashWriter() + writer.InfallibleWrite(input) + hash := writer.Finalize() + matrix := generateMatrix(hash) + for i := 0; i < b.N; i++ { + hash = matrix.HeavyHash(hash) + } +} + +func BenchmarkMatrix_Generate(b *testing.B) { + r := rand.New(rand.NewSource(0)) + h := [32]byte{} + r.Read(h[:]) + hash := externalapi.NewDomainHashFromByteArray(&h) + for i := 0; i < b.N; i++ { + generateMatrix(hash) + } +} + +func BenchmarkMatrix_Rank(b *testing.B) { + r := rand.New(rand.NewSource(0)) + h := [32]byte{} + r.Read(h[:]) + hash := externalapi.NewDomainHashFromByteArray(&h) + matrix := generateMatrix(hash) + + for i := 0; i < b.N; i++ { + matrix.computeRank() + } +} + +func TestMatrix_Rank(t *testing.T) { + var mat matrix + if mat.computeRank() != 0 { + t.Fatalf("The zero matrix should have rank 0, instead got: %d", mat.computeRank()) + } + + r := rand.New(rand.NewSource(0)) + h := [32]byte{} + r.Read(h[:]) + hash := externalapi.NewDomainHashFromByteArray(&h) + mat = *generateMatrix(hash) + + if mat.computeRank() != 64 { + t.Fatalf("generateMatrix() should always return full rank matrix, instead got: %d", mat.computeRank()) + } + + for i := range mat { + mat[0][i] = mat[1][i] * 2 + } + if mat.computeRank() != 63 { + t.Fatalf("Made a linear depenency between 2 rows, rank should be 63, instead got: %d", mat.computeRank()) + } + + for i := range mat { + mat[33][i] = mat[32][i] * 3 + } + if mat.computeRank() != 62 { + t.Fatalf("Made anoter linear depenency between 2 rows, rank should be 62, instead got: %d", mat.computeRank()) + } +} + +func TestGenerateMatrix(t *testing.T) { + hashBytes := [32]byte{42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + hash := externalapi.NewDomainHashFromByteArray(&hashBytes) + expectedMatrix := matrix{ + {4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 15, 3, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 2, 10, 14, 1, 2, 2, 14, 10, 4, 12, 4, 12, 10, 10, 10, 10, 10, 10}, + {9, 11, 1, 11, 1, 11, 9, 11, 9, 11, 9, 3, 12, 13, 11, 5, 15, 15, 5, 0, 6, 8, 1, 8, 6, 11, 15, 5, 3, 6, 7, 3, 2, 15, 14, 3, 7, 11, 14, 7, 3, 6, 14, 12, 3, 9, 5, 1, 1, 0, 8, 4, 10, 15, 9, 10, 6, 13, 1, 1, 7, 4, 4, 6}, + {2, 6, 0, 8, 11, 15, 4, 0, 5, 2, 7, 13, 15, 3, 11, 12, 6, 2, 1, 8, 13, 4, 11, 4, 10, 14, 13, 2, 6, 15, 10, 6, 6, 5, 6, 9, 3, 3, 3, 1, 9, 12, 12, 15, 6, 0, 1, 5, 7, 13, 14, 1, 10, 10, 5, 14, 4, 0, 12, 13, 2, 15, 8, 4}, + {8, 6, 5, 1, 0, 6, 4, 8, 13, 0, 8, 12, 7, 2, 4, 3, 10, 5, 9, 3, 12, 13, 2, 4, 13, 14, 7, 7, 9, 12, 10, 8, 11, 6, 14, 3, 12, 8, 8, 0, 2, 10, 0, 9, 1, 9, 7, 8, 5, 2, 9, 13, 15, 6, 13, 10, 1, 9, 1, 10, 6, 2, 10, 9}, + {4, 2, 6, 14, 4, 2, 5, 7, 15, 6, 0, 4, 11, 9, 12, 0, 3, 2, 0, 4, 10, 5, 12, 3, 3, 4, 10, 1, 0, 13, 3, 12, 15, 0, 7, 10, 2, 2, 15, 0, 2, 15, 8, 2, 15, 12, 10, 6, 6, 2, 13, 3, 8, 14, 3, 13, 10, 5, 4, 5, 1, 6, 5, 10}, + {0, 3, 13, 12, 11, 4, 11, 13, 1, 12, 4, 11, 15, 14, 13, 4, 7, 1, 3, 0, 10, 3, 8, 8, 1, 2, 5, 14, 4, 5, 14, 1, 1, 3, 3, 1, 5, 15, 7, 5, 11, 8, 8, 12, 10, 5, 7, 9, 2, 10, 13, 11, 4, 2, 12, 15, 10, 6, 6, 0, 6, 6, 3, 12}, + {9, 12, 3, 3, 5, 8, 12, 13, 7, 4, 5, 11, 4, 0, 7, 2, 2, 15, 12, 14, 12, 5, 4, 2, 8, 8, 8, 13, 6, 1, 1, 5, 0, 15, 12, 13, 8, 5, 0, 4, 13, 1, 6, 1, 12, 14, 1, 0, 13, 12, 10, 10, 1, 4, 13, 13, 8, 4, 15, 13, 6, 6, 14, 10}, + {14, 15, 8, 0, 7, 2, 5, 10, 5, 3, 12, 0, 11, 3, 4, 2, 8, 11, 6, 14, 14, 3, 3, 12, 3, 7, 6, 2, 6, 12, 15, 1, 1, 13, 0, 6, 9, 9, 7, 7, 13, 4, 4, 2, 15, 5, 2, 15, 13, 13, 10, 6, 9, 15, 2, 9, 6, 10, 6, 14, 14, 3, 5, 11}, + {6, 4, 7, 8, 11, 0, 13, 11, 0, 7, 0, 0, 13, 6, 3, 11, 15, 14, 10, 2, 7, 8, 13, 14, 8, 15, 10, 8, 14, 6, 10, 14, 3, 11, 5, 11, 13, 5, 3, 12, 3, 0, 2, 0, 6, 14, 4, 12, 4, 4, 8, 15, 7, 8, 12, 11, 3, 9, 5, 13, 10, 14, 13, 4}, + {10, 0, 0, 15, 1, 4, 13, 3, 15, 10, 2, 5, 11, 2, 9, 14, 7, 3, 2, 8, 6, 15, 0, 12, 1, 4, 1, 9, 3, 0, 15, 8, 9, 13, 0, 7, 9, 10, 6, 14, 3, 7, 9, 7, 4, 0, 11, 8, 4, 6, 5, 8, 8, 0, 5, 14, 7, 12, 12, 2, 5, 6, 5, 6}, + {12, 0, 0, 14, 8, 3, 0, 3, 13, 10, 5, 13, 5, 7, 2, 4, 13, 11, 3, 1, 11, 2, 14, 5, 10, 5, 5, 9, 12, 15, 12, 8, 1, 0, 11, 13, 8, 1, 1, 11, 10, 0, 11, 15, 13, 9, 12, 14, 5, 4, 5, 14, 2, 7, 2, 1, 4, 12, 11, 11, 9, 12, 11, 15}, + {3, 15, 9, 8, 13, 12, 15, 7, 8, 7, 14, 6, 10, 3, 0, 5, 2, 2, 6, 6, 3, 2, 5, 12, 11, 2, 10, 11, 13, 3, 9, 7, 7, 6, 8, 15, 14, 14, 11, 11, 9, 7, 1, 3, 8, 5, 11, 11, 1, 2, 15, 8, 13, 8, 11, 4, 1, 5, 3, 12, 5, 3, 7, 7}, + {13, 13, 2, 14, 4, 3, 15, 2, 0, 15, 1, 5, 4, 1, 5, 1, 4, 14, 5, 1, 11, 13, 15, 1, 3, 3, 5, 13, 14, 1, 0, 4, 6, 1, 15, 7, 7, 0, 15, 8, 15, 3, 14, 7, 7, 8, 12, 10, 2, 14, 9, 2, 11, 11, 7, 10, 4, 3, 12, 13, 4, 13, 0, 14}, + {12, 14, 15, 15, 2, 0, 0, 13, 4, 6, 4, 2, 14, 11, 5, 6, 14, 8, 14, 7, 13, 15, 6, 15, 7, 9, 1, 0, 11, 9, 9, 0, 2, 12, 8, 8, 14, 11, 7, 5, 3, 0, 11, 12, 9, 2, 8, 9, 0, 0, 9, 8, 9, 8, 2, 14, 12, 2, 0, 14, 13, 8, 4, 10}, + {7, 10, 1, 15, 12, 14, 7, 4, 7, 13, 4, 8, 13, 12, 1, 7, 10, 6, 5, 14, 14, 3, 14, 4, 11, 14, 6, 12, 15, 12, 15, 12, 4, 5, 9, 8, 7, 7, 3, 0, 5, 7, 3, 8, 4, 4, 7, 5, 6, 12, 13, 0, 12, 10, 2, 5, 14, 9, 6, 4, 13, 13, 14, 5}, + {14, 5, 8, 3, 4, 15, 13, 14, 14, 10, 7, 14, 15, 2, 11, 14, 13, 13, 12, 10, 6, 9, 5, 5, 6, 13, 15, 13, 7, 0, 15, 11, 4, 12, 15, 7, 7, 4, 3, 11, 8, 14, 5, 10, 2, 4, 4, 12, 3, 6, 1, 9, 15, 1, 1, 13, 7, 5, 0, 14, 15, 7, 8, 6}, + {1, 2, 10, 5, 2, 13, 1, 11, 15, 10, 4, 9, 9, 12, 14, 13, 3, 5, 0, 3, 7, 11, 10, 3, 12, 5, 10, 2, 13, 7, 1, 7, 13, 8, 2, 8, 3, 14, 10, 3, 5, 12, 0, 9, 3, 9, 11, 2, 10, 9, 0, 6, 4, 0, 1, 14, 11, 0, 8, 6, 1, 15, 3, 10}, + {13, 9, 0, 5, 8, 7, 12, 15, 10, 10, 5, 1, 1, 7, 6, 1, 14, 5, 15, 2, 3, 5, 3, 5, 7, 3, 7, 7, 1, 4, 3, 14, 5, 0, 12, 0, 12, 10, 10, 6, 12, 6, 3, 5, 5, 11, 10, 1, 11, 3, 13, 3, 9, 11, 1, 7, 14, 14, 0, 8, 15, 5, 2, 7}, + {8, 5, 11, 6, 15, 0, 1, 13, 1, 6, 7, 15, 4, 3, 14, 12, 9, 3, 11, 6, 4, 12, 1, 11, 6, 12, 5, 11, 1, 12, 2, 3, 1, 2, 11, 12, 0, 5, 11, 5, 3, 13, 11, 3, 11, 14, 10, 8, 3, 9, 4, 8, 13, 11, 9, 11, 2, 4, 12, 3, 0, 14, 7, 11}, + {10, 11, 4, 10, 7, 8, 3, 14, 15, 8, 15, 6, 9, 8, 5, 6, 12, 1, 15, 6, 5, 5, 14, 13, 2, 12, 14, 6, 5, 5, 14, 9, 1, 10, 11, 14, 8, 6, 14, 11, 1, 15, 6, 11, 11, 8, 1, 2, 8, 5, 4, 15, 6, 8, 0, 8, 0, 11, 0, 1, 0, 7, 8, 15}, + {0, 15, 5, 0, 11, 4, 4, 2, 0, 4, 8, 12, 2, 2, 0, 8, 1, 2, 6, 5, 6, 12, 3, 1, 12, 1, 6, 10, 2, 5, 0, 2, 0, 11, 8, 6, 13, 4, 14, 4, 15, 5, 8, 11, 9, 6, 2, 6, 9, 1, 4, 2, 14, 10, 4, 4, 1, 1, 11, 8, 6, 11, 11, 9}, + {7, 3, 6, 5, 9, 1, 11, 0, 15, 13, 13, 13, 4, 14, 14, 12, 3, 7, 9, 3, 1, 6, 5, 9, 7, 6, 2, 11, 10, 4, 11, 14, 10, 13, 11, 8, 11, 8, 1, 15, 5, 0, 10, 5, 6, 0, 5, 15, 11, 6, 6, 4, 10, 11, 8, 12, 0, 10, 11, 11, 11, 1, 13, 6}, + {7, 15, 0, 0, 11, 5, 7, 13, 3, 7, 3, 2, 5, 12, 6, 11, 14, 4, 9, 8, 9, 9, 13, 0, 15, 2, 13, 2, 15, 6, 15, 1, 1, 7, 4, 0, 10, 1, 8, 14, 0, 10, 12, 4, 5, 13, 9, 0, 7, 12, 13, 11, 11, 8, 8, 15, 2, 15, 4, 4, 9, 3, 10, 7}, + {0, 9, 3, 5, 14, 6, 7, 14, 7, 2, 13, 7, 3, 15, 9, 15, 2, 8, 0, 4, 6, 0, 15, 6, 2, 1, 14, 8, 5, 8, 2, 4, 2, 11, 9, 2, 15, 13, 11, 12, 8, 15, 3, 13, 2, 2, 10, 13, 1, 8, 7, 15, 13, 6, 7, 7, 4, 3, 14, 7, 0, 9, 15, 11}, + {8, 13, 7, 7, 8, 8, 7, 8, 1, 4, 10, 1, 12, 4, 14, 11, 7, 12, 15, 0, 10, 15, 9, 2, 14, 2, 14, 2, 4, 5, 13, 3, 2, 10, 0, 15, 7, 6, 8, 11, 7, 6, 10, 10, 4, 7, 10, 6, 6, 14, 10, 4, 14, 6, 12, 2, 8, 1, 9, 13, 3, 4, 3, 14}, + {10, 10, 6, 3, 8, 5, 10, 7, 11, 10, 9, 4, 8, 14, 9, 10, 0, 9, 8, 14, 11, 15, 8, 13, 13, 7, 13, 13, 13, 9, 12, 11, 6, 3, 9, 6, 0, 0, 6, 6, 11, 6, 4, 8, 1, 5, 1, 7, 9, 6, 13, 4, 3, 8, 8, 11, 9, 10, 6, 11, 12, 13, 14, 14}, + {14, 10, 0, 15, 14, 4, 3, 0, 12, 4, 0, 14, 11, 9, 0, 6, 4, 6, 0, 9, 8, 14, 4, 4, 6, 8, 2, 8, 10, 3, 8, 0, 1, 1, 15, 4, 2, 4, 13, 9, 9, 4, 0, 5, 5, 1, 2, 5, 11, 6, 2, 1, 7, 8, 10, 10, 1, 5, 8, 6, 7, 0, 4, 14}, + {0, 15, 10, 11, 13, 12, 7, 7, 4, 0, 9, 5, 2, 8, 0, 10, 6, 6, 7, 5, 6, 7, 9, 0, 1, 4, 8, 14, 10, 3, 5, 5, 11, 5, 1, 10, 6, 10, 0, 14, 1, 15, 11, 12, 8, 2, 7, 8, 4, 0, 3, 11, 9, 15, 3, 5, 15, 15, 14, 15, 3, 4, 5, 14}, + {5, 12, 12, 8, 0, 0, 14, 1, 4, 15, 3, 2, 2, 6, 1, 10, 7, 10, 14, 5, 14, 0, 8, 5, 9, 0, 12, 8, 9, 10, 3, 12, 3, 2, 0, 0, 12, 12, 7, 13, 2, 6, 4, 7, 10, 10, 14, 1, 11, 6, 10, 3, 12, 2, 1, 10, 7, 13, 10, 12, 14, 11, 14, 8}, + {9, 5, 3, 12, 4, 3, 10, 14, 7, 5, 11, 12, 2, 13, 9, 8, 5, 2, 6, 2, 4, 9, 10, 10, 4, 3, 4, 0, 11, 1, 10, 9, 4, 10, 4, 5, 8, 11, 1, 7, 13, 7, 6, 6, 3, 12, 0, 0, 15, 6, 12, 12, 13, 7, 14, 14, 11, 15, 7, 14, 12, 6, 15, 2}, + {15, 2, 0, 12, 15, 14, 8, 14, 7, 14, 0, 3, 3, 11, 12, 2, 3, 14, 13, 5, 12, 9, 6, 11, 7, 4, 5, 1, 7, 12, 0, 11, 1, 5, 6, 6, 8, 6, 12, 2, 12, 3, 10, 3, 4, 10, 3, 3, 3, 10, 10, 14, 3, 13, 15, 0, 7, 6, 15, 6, 13, 7, 4, 11}, + {11, 15, 5, 14, 0, 1, 1, 14, 2, 3, 15, 14, 4, 3, 11, 1, 6, 6, 0, 12, 3, 5, 15, 6, 3, 11, 13, 11, 7, 7, 8, 11, 5, 9, 10, 10, 9, 14, 7, 1, 7, 2, 8, 6, 6, 5, 1, 9, 6, 5, 8, 14, 2, 14, 2, 9, 3, 3, 4, 15, 13, 5, 2, 7}, + {7, 8, 13, 9, 15, 8, 11, 7, 1, 9, 15, 12, 6, 9, 3, 1, 10, 10, 11, 0, 0, 8, 14, 5, 11, 12, 14, 4, 3, 9, 12, 9, 14, 0, 0, 9, 12, 4, 1, 13, 3, 6, 3, 4, 13, 10, 2, 9, 3, 7, 7, 10, 7, 10, 10, 3, 5, 15, 8, 9, 11, 7, 1, 14}, + {5, 5, 9, 1, 15, 3, 3, 11, 6, 11, 13, 13, 4, 12, 7, 12, 4, 8, 14, 13, 7, 12, 13, 8, 10, 2, 1, 12, 11, 7, 0, 8, 10, 9, 15, 1, 3, 9, 10, 0, 9, 1, 14, 1, 1, 9, 2, 2, 8, 9, 5, 6, 3, 2, 15, 9, 15, 6, 3, 11, 14, 4, 0, 4}, + {9, 2, 10, 2, 0, 9, 6, 13, 13, 0, 13, 14, 3, 12, 1, 15, 9, 3, 12, 2, 5, 15, 6, 6, 15, 11, 7, 11, 0, 4, 0, 11, 10, 12, 7, 9, 3, 0, 2, 2, 13, 13, 9, 6, 9, 2, 6, 4, 3, 6, 5, 10, 10, 9, 7, 2, 4, 9, 13, 11, 2, 13, 6, 8}, + {13, 15, 9, 8, 6, 2, 3, 2, 2, 12, 5, 3, 8, 6, 11, 6, 15, 7, 10, 3, 15, 8, 7, 5, 3, 8, 4, 2, 11, 1, 0, 4, 1, 1, 6, 1, 13, 6, 5, 1, 2, 6, 7, 10, 4, 3, 10, 6, 2, 0, 7, 13, 15, 1, 13, 0, 12, 10, 15, 6, 2, 4, 14, 3}, + {5, 11, 14, 4, 0, 7, 12, 4, 4, 14, 12, 3, 4, 10, 7, 14, 6, 4, 14, 7, 0, 12, 5, 9, 15, 6, 15, 6, 3, 12, 0, 10, 11, 7, 1, 14, 13, 5, 1, 14, 5, 15, 12, 1, 9, 13, 9, 13, 14, 5, 10, 11, 12, 10, 15, 11, 9, 13, 2, 14, 9, 12, 2, 11}, + {2, 12, 5, 7, 1, 5, 2, 11, 8, 4, 15, 6, 9, 14, 5, 1, 15, 4, 3, 1, 11, 4, 2, 1, 4, 5, 4, 4, 7, 3, 3, 12, 4, 3, 2, 15, 13, 1, 14, 15, 1, 4, 6, 11, 13, 15, 6, 12, 12, 13, 6, 8, 10, 0, 10, 12, 1, 10, 3, 2, 9, 8, 2, 8}, + {10, 12, 12, 6, 8, 5, 4, 4, 5, 3, 6, 7, 15, 5, 10, 3, 8, 15, 14, 5, 6, 2, 14, 4, 1, 7, 1, 3, 12, 3, 12, 4, 10, 15, 6, 6, 0, 6, 6, 8, 6, 9, 5, 7, 5, 1, 9, 2, 4, 9, 0, 8, 1, 1, 14, 3, 7, 14, 8, 9, 0, 4, 11, 7}, + {13, 11, 14, 7, 0, 4, 0, 10, 12, 11, 10, 8, 6, 12, 13, 15, 9, 2, 14, 9, 3, 0, 12, 14, 11, 15, 4, 7, 15, 14, 4, 8, 15, 12, 9, 14, 7, 7, 9, 13, 14, 14, 4, 9, 13, 8, 1, 13, 6, 3, 12, 7, 0, 15, 6, 15, 7, 2, 3, 0, 9, 5, 13, 0}, + {3, 8, 12, 11, 5, 9, 9, 14, 8, 14, 14, 5, 9, 9, 12, 10, 3, 12, 13, 0, 0, 0, 6, 7, 12, 4, 2, 3, 8, 8, 9, 15, 11, 1, 12, 13, 10, 15, 11, 1, 2, 13, 10, 1, 7, 2, 7, 11, 8, 15, 7, 6, 4, 6, 5, 11, 11, 15, 2, 1, 11, 1, 1, 8}, + {10, 7, 7, 1, 4, 13, 9, 10, 2, 2, 3, 7, 12, 8, 5, 5, 5, 5, 3, 1, 5, 6, 8, 2, 8, 11, 5, 0, 4, 12, 12, 6, 7, 9, 14, 10, 11, 8, 0, 9, 11, 4, 14, 7, 7, 8, 2, 15, 12, 7, 4, 4, 13, 2, 0, 3, 14, 0, 1, 5, 2, 15, 7, 11}, + {3, 8, 10, 4, 1, 7, 3, 13, 5, 14, 0, 9, 3, 1, 0, 11, 2, 15, 4, 9, 6, 5, 14, 0, 2, 8, 1, 14, 7, 6, 1, 5, 5, 7, 2, 0, 5, 3, 4, 15, 13, 10, 9, 13, 13, 12, 5, 11, 11, 14, 13, 10, 8, 14, 0, 8, 1, 7, 2, 10, 12, 12, 1, 11}, + {11, 14, 4, 13, 3, 11, 10, 6, 15, 2, 5, 10, 14, 4, 13, 3, 12, 7, 12, 10, 4, 0, 0, 1, 14, 6, 1, 2, 2, 12, 9, 2, 3, 11, 1, 4, 10, 4, 4, 7, 7, 12, 4, 3, 12, 11, 9, 3, 15, 13, 6, 13, 7, 11, 5, 12, 5, 13, 15, 12, 0, 13, 12, 9}, + {8, 7, 2, 2, 5, 3, 10, 15, 10, 8, 1, 0, 4, 5, 7, 6, 15, 13, 2, 14, 6, 2, 9, 5, 9, 0, 5, 12, 8, 6, 4, 12, 6, 8, 14, 15, 7, 15, 11, 2, 2, 12, 7, 9, 7, 11, 15, 7, 0, 4, 5, 13, 7, 2, 5, 9, 0, 5, 7, 6, 7, 12, 4, 1}, + {11, 4, 2, 13, 6, 10, 9, 4, 12, 9, 9, 6, 4, 2, 14, 14, 9, 5, 5, 15, 15, 9, 8, 11, 4, 2, 8, 11, 14, 3, 8, 10, 14, 9, 6, 6, 4, 7, 11, 2, 3, 7, 5, 1, 14, 2, 9, 4, 0, 1, 10, 7, 6, 7, 1, 3, 13, 7, 3, 2, 12, 3, 6, 6}, + {11, 1, 14, 3, 14, 3, 9, 9, 0, 11, 14, 6, 14, 7, 14, 8, 4, 2, 5, 6, 13, 3, 4, 10, 8, 8, 10, 11, 5, 1, 15, 15, 7, 0, 4, 14, 15, 13, 14, 13, 3, 2, 1, 6, 0, 6, 6, 4, 15, 6, 0, 12, 5, 11, 1, 7, 3, 3, 13, 12, 12, 6, 3, 2}, + {7, 2, 10, 14, 14, 13, 4, 14, 10, 6, 0, 2, 7, 7, 2, 5, 14, 1, 5, 14, 15, 1, 2, 9, 2, 13, 1, 3, 6, 1, 3, 13, 10, 6, 11, 13, 1, 7, 13, 15, 2, 11, 9, 6, 13, 7, 9, 2, 3, 13, 10, 10, 6, 2, 5, 9, 1, 3, 0, 3, 1, 5, 3, 12}, + {11, 14, 4, 2, 10, 11, 15, 5, 9, 7, 8, 11, 10, 9, 5, 7, 14, 3, 12, 2, 7, 15, 12, 15, 4, 15, 12, 9, 2, 6, 6, 6, 8, 5, 0, 7, 14, 15, 14, 14, 3, 12, 7, 12, 2, 4, 1, 7, 1, 3, 4, 7, 1, 9, 11, 15, 15, 3, 7, 1, 10, 9, 14, 14}, + {4, 13, 11, 1, 9, 6, 5, 1, 11, 6, 6, 8, 3, 9, 8, 15, 13, 12, 3, 13, 5, 9, 10, 5, 12, 1, 15, 14, 12, 1, 10, 11, 5, 7, 3, 12, 9, 12, 0, 2, 2, 3, 14, 4, 2, 13, 1, 15, 11, 8, 3, 13, 0, 10, 5, 4, 6, 0, 14, 8, 1, 0, 6, 15}, + {15, 2, 0, 5, 2, 14, 9, 0, 10, 5, 12, 8, 5, 6, 0, 1, 9, 4, 4, 1, 4, 6, 14, 5, 3, 0, 2, 2, 14, 9, 7, 0, 2, 15, 12, 0, 10, 12, 9, 12, 15, 1, 9, 4, 15, 3, 0, 13, 0, 6, 5, 0, 2, 6, 11, 9, 13, 15, 6, 3, 5, 4, 0, 8}, + {4, 14, 8, 14, 13, 4, 4, 10, 6, 12, 15, 11, 7, 2, 15, 6, 9, 9, 1, 11, 13, 2, 7, 10, 4, 4, 5, 12, 14, 15, 8, 5, 6, 1, 11, 15, 4, 11, 5, 2, 5, 7, 3, 4, 5, 7, 3, 8, 10, 13, 7, 5, 6, 5, 10, 1, 12, 13, 3, 6, 2, 8, 7, 15}, + {3, 15, 4, 9, 14, 12, 6, 1, 7, 0, 7, 15, 10, 6, 5, 5, 15, 5, 9, 4, 7, 6, 14, 2, 1, 4, 10, 3, 12, 1, 7, 1, 0, 10, 2, 11, 14, 13, 7, 10, 5, 11, 5, 11, 15, 5, 0, 3, 15, 1, 2, 14, 13, 13, 10, 9, 15, 12, 10, 5, 2, 10, 0, 6}, + {4, 6, 5, 13, 11, 10, 15, 4, 2, 15, 13, 6, 7, 7, 4, 0, 4, 6, 7, 4, 9, 1, 6, 7, 6, 1, 4, 2, 0, 11, 6, 3, 14, 5, 9, 2, 2, 10, 1, 2, 13, 14, 4, 11, 4, 7, 12, 9, 8, 2, 2, 9, 5, 7, 9, 12, 8, 15, 0, 9, 12, 11, 1, 12}, + {11, 12, 11, 9, 8, 15, 4, 12, 13, 10, 6, 6, 6, 12, 3, 0, 6, 15, 15, 10, 6, 12, 5, 7, 10, 2, 7, 1, 6, 12, 9, 11, 11, 14, 1, 12, 15, 0, 6, 2, 12, 15, 4, 15, 14, 8, 3, 4, 15, 4, 13, 3, 14, 1, 3, 7, 6, 13, 9, 1, 0, 12, 4, 14}, + {12, 11, 13, 10, 10, 10, 3, 7, 12, 3, 13, 9, 6, 0, 12, 10, 4, 11, 5, 4, 11, 5, 7, 14, 6, 10, 12, 12, 13, 15, 12, 1, 13, 15, 15, 7, 1, 2, 8, 6, 1, 12, 12, 0, 4, 3, 3, 3, 7, 8, 9, 10, 7, 7, 0, 0, 11, 13, 15, 4, 9, 5, 10, 9}, + {6, 12, 3, 0, 9, 11, 6, 4, 9, 9, 1, 5, 9, 14, 3, 7, 15, 3, 5, 0, 5, 11, 7, 6, 13, 5, 10, 2, 12, 10, 2, 6, 0, 1, 1, 13, 9, 3, 11, 7, 8, 2, 10, 9, 13, 6, 6, 4, 12, 0, 3, 10, 9, 4, 15, 11, 14, 1, 9, 3, 0, 14, 6, 1}, + {11, 14, 10, 10, 11, 6, 4, 7, 10, 0, 7, 9, 3, 2, 13, 13, 9, 9, 2, 3, 3, 14, 10, 4, 14, 1, 10, 7, 14, 4, 9, 15, 3, 11, 5, 10, 7, 8, 3, 0, 1, 2, 2, 3, 12, 9, 6, 2, 11, 15, 3, 9, 3, 6, 8, 0, 4, 5, 7, 3, 0, 14, 7, 9}, + {4, 11, 13, 12, 6, 2, 3, 15, 15, 3, 5, 1, 0, 5, 10, 2, 5, 3, 7, 10, 15, 0, 5, 3, 2, 10, 12, 10, 8, 3, 9, 15, 5, 3, 7, 13, 5, 7, 13, 12, 5, 10, 2, 9, 10, 1, 9, 4, 14, 1, 10, 13, 1, 2, 2, 12, 5, 3, 14, 7, 7, 8, 13, 13}, + {10, 12, 11, 10, 0, 15, 4, 3, 0, 8, 3, 0, 15, 0, 3, 10, 10, 9, 15, 3, 13, 3, 8, 3, 8, 2, 14, 7, 1, 6, 13, 8, 2, 2, 12, 3, 3, 0, 10, 12, 0, 1, 1, 7, 5, 0, 13, 10, 7, 13, 9, 9, 13, 7, 0, 1, 0, 2, 14, 2, 13, 0, 8, 3}, + {11, 3, 11, 10, 12, 15, 11, 6, 14, 8, 8, 5, 7, 11, 3, 1, 13, 7, 13, 4, 15, 7, 2, 3, 8, 7, 3, 8, 9, 15, 10, 15, 9, 0, 5, 4, 1, 7, 13, 8, 2, 7, 1, 10, 1, 12, 12, 1, 7, 12, 13, 5, 14, 10, 9, 15, 12, 2, 10, 3, 10, 3, 9, 12}, + {9, 8, 11, 0, 5, 6, 1, 5, 9, 1, 0, 12, 12, 0, 12, 11, 2, 8, 4, 0, 1, 7, 7, 5, 1, 14, 1, 9, 13, 7, 2, 12, 8, 9, 12, 13, 1, 11, 5, 3, 12, 14, 15, 4, 9, 8, 12, 7, 11, 1, 3, 9, 11, 5, 7, 14, 4, 6, 12, 3, 4, 12, 7, 9}, + {10, 12, 2, 14, 14, 1, 11, 8, 3, 7, 13, 7, 2, 1, 14, 13, 7, 6, 15, 8, 15, 12, 13, 10, 11, 15, 4, 2, 6, 13, 12, 3, 2, 10, 15, 14, 10, 11, 8, 14, 9, 3, 12, 9, 15, 2, 14, 14, 5, 13, 7, 6, 2, 1, 1, 4, 1, 0, 13, 10, 1, 0, 2, 9}, + {10, 5, 11, 14, 12, 1, 12, 7, 12, 8, 10, 5, 6, 10, 0, 7, 5, 6, 11, 11, 13, 12, 0, 13, 0, 6, 11, 0, 14, 4, 2, 1, 12, 7, 1, 10, 7, 15, 5, 3, 14, 15, 1, 3, 1, 2, 10, 4, 11, 8, 2, 11, 2, 5, 5, 4, 15, 5, 10, 3, 1, 7, 2, 14}, + } + mat := generateMatrix(hash) + + if *mat != expectedMatrix { + t.Fatal("The generated matrix doesn't match the test vector") + } +} + +func TestMatrix_HeavyHash(t *testing.T) { + expected, err := hex.DecodeString("87689f379943eaf9b7475ca95325687772bfcc68fc7899caeb4409ec4590c325") + if err != nil { + t.Fatal(err) + } + input := []byte{0xC1, 0xEC, 0xFD, 0xFC} + writer := hashes.NewPoWHashWriter() + writer.InfallibleWrite(input) + hashed := testMatrix.HeavyHash(writer.Finalize()) + + if !bytes.Equal(expected, hashed.ByteSlice()) { + t.Fatalf("expected: %x == %s", expected, hashed) + } + +} + +var testMatrix = matrix{ + {13, 2, 14, 13, 2, 15, 14, 3, 10, 4, 1, 8, 4, 3, 8, 15, 15, 15, 15, 15, 2, 11, 15, 15, 15, 1, 7, 12, 12, 4, 2, 0, 6, 1, 14, 10, 12, 14, 15, 8, 10, 12, 0, 5, 13, 3, 14, 10, 10, 6, 12, 11, 11, 7, 6, 6, 10, 2, 2, 4, 11, 12, 0, 5}, + {4, 13, 0, 2, 1, 15, 13, 13, 11, 2, 5, 12, 15, 7, 0, 10, 7, 2, 6, 3, 12, 0, 12, 0, 2, 6, 7, 7, 7, 7, 10, 12, 11, 14, 12, 12, 4, 11, 10, 0, 10, 11, 2, 10, 1, 7, 7, 12, 15, 9, 5, 14, 9, 12, 3, 0, 12, 13, 4, 13, 8, 15, 11, 6}, + {14, 6, 15, 9, 8, 2, 2, 12, 2, 3, 4, 12, 13, 15, 4, 5, 13, 4, 3, 0, 14, 3, 5, 14, 3, 13, 4, 15, 9, 12, 7, 15, 5, 1, 13, 12, 9, 9, 8, 11, 14, 11, 4, 10, 12, 6, 12, 8, 6, 3, 9, 8, 1, 6, 0, 5, 8, 9, 12, 5, 14, 15, 2, 2}, + {9, 6, 7, 6, 0, 11, 5, 6, 2, 14, 12, 6, 4, 13, 8, 9, 2, 1, 9, 7, 4, 5, 10, 8, 11, 11, 11, 15, 7, 11, 1, 14, 3, 8, 14, 8, 2, 8, 13, 7, 8, 8, 15, 7, 1, 13, 7, 9, 1, 7, 15, 15, 0, 0, 12, 15, 13, 5, 13, 10, 1, 5, 6, 13}, + {4, 0, 12, 10, 6, 11, 14, 2, 2, 15, 4, 1, 2, 4, 2, 12, 13, 1, 9, 10, 8, 0, 2, 10, 13, 8, 9, 7, 5, 3, 8, 2, 6, 6, 1, 12, 3, 0, 1, 4, 2, 8, 3, 13, 6, 15, 0, 13, 14, 4, 15, 0, 7, 3, 7, 8, 5, 14, 14, 5, 5, 0, 1, 2}, + {12, 14, 6, 3, 3, 4, 6, 7, 1, 3, 2, 7, 15, 15, 15, 10, 9, 12, 0, 6, 3, 8, 5, 0, 13, 5, 0, 6, 0, 14, 2, 12, 10, 4, 11, 2, 10, 7, 7, 6, 8, 11, 4, 4, 11, 9, 3, 12, 10, 5, 2, 6, 5, 5, 10, 13, 12, 10, 1, 6, 14, 7, 12, 4}, + {7, 14, 6, 7, 7, 12, 4, 1, 8, 6, 8, 13, 13, 5, 12, 14, 10, 8, 6, 2, 12, 3, 8, 15, 5, 15, 15, 3, 14, 0, 8, 6, 9, 12, 9, 7, 3, 8, 4, 0, 7, 14, 3, 3, 13, 14, 3, 7, 3, 2, 2, 3, 3, 12, 6, 7, 4, 1, 14, 10, 6, 10, 2, 9}, + {14, 11, 15, 5, 7, 10, 1, 11, 4, 2, 6, 2, 9, 7, 4, 0, 9, 12, 11, 2, 3, 13, 1, 5, 4, 10, 5, 6, 6, 12, 8, 1, 1, 15, 4, 2, 12, 12, 0, 4, 14, 3, 11, 1, 7, 5, 9, 4, 3, 15, 7, 3, 15, 9, 8, 3, 8, 3, 3, 6, 7, 6, 9, 2}, + {10, 4, 6, 10, 5, 2, 15, 12, 0, 14, 14, 15, 14, 0, 12, 9, 1, 12, 4, 5, 5, 2, 10, 4, 2, 13, 11, 3, 1, 8, 10, 0, 7, 0, 12, 4, 11, 1, 14, 6, 14, 5, 5, 11, 11, 1, 3, 8, 0, 6, 11, 11, 8, 4, 7, 6, 14, 4, 9, 14, 9, 7, 13, 9}, + {12, 7, 9, 8, 2, 3, 3, 5, 14, 8, 0, 9, 7, 4, 2, 15, 15, 3, 11, 11, 8, 5, 7, 5, 0, 15, 10, 8, 0, 13, 1, 14, 8, 10, 1, 4, 13, 1, 13, 3, 11, 11, 2, 3, 10, 6, 8, 14, 15, 2, 10, 10, 12, 7, 7, 6, 6, 3, 13, 8, 1, 14, 2, 1}, + {2, 11, 6, 9, 13, 3, 12, 6, 0, 4, 6, 13, 8, 14, 6, 9, 10, 2, 10, 8, 4, 13, 6, 5, 0, 13, 15, 4, 2, 2, 1, 7, 5, 3, 3, 13, 7, 3, 5, 9, 15, 14, 14, 6, 0, 15, 11, 2, 4, 15, 6, 9, 8, 9, 15, 2, 6, 9, 15, 8, 4, 4, 11, 1}, + {10, 11, 8, 3, 11, 13, 10, 2, 2, 5, 2, 14, 15, 10, 2, 11, 0, 1, 8, 2, 14, 1, 10, 0, 3, 7, 5, 10, 7, 8, 15, 7, 2, 5, 13, 4, 10, 3, 6, 2, 3, 9, 6, 11, 7, 14, 1, 11, 9, 3, 3, 7, 6, 0, 9, 11, 4, 10, 4, 1, 9, 7, 4, 15}, + {13, 8, 15, 14, 11, 12, 5, 3, 9, 14, 1, 5, 14, 13, 14, 5, 13, 5, 4, 10, 9, 9, 0, 0, 6, 12, 5, 7, 2, 7, 2, 6, 6, 6, 1, 12, 9, 15, 7, 11, 11, 10, 11, 1, 10, 10, 0, 8, 1, 4, 5, 5, 8, 10, 10, 15, 6, 8, 13, 11, 11, 3, 15, 5}, + {8, 11, 5, 10, 1, 10, 9, 1, 12, 7, 6, 11, 1, 1, 4, 1, 2, 8, 4, 4, 7, 7, 8, 2, 7, 1, 14, 1, 8, 15, 15, 12, 10, 4, 15, 11, 3, 6, 10, 7, 4, 0, 10, 9, 11, 7, 1, 14, 4, 14, 3, 14, 10, 4, 13, 12, 5, 3, 12, 7, 10, 8, 0, 3}, + {9, 11, 6, 15, 14, 10, 0, 4, 7, 7, 6, 0, 7, 7, 12, 15, 5, 4, 12, 3, 7, 3, 0, 12, 2, 7, 11, 6, 7, 3, 2, 8, 5, 11, 9, 4, 3, 8, 11, 12, 3, 5, 14, 12, 4, 13, 12, 0, 3, 14, 4, 9, 1, 1, 9, 14, 10, 14, 8, 15, 6, 14, 10, 15}, + {10, 14, 10, 0, 10, 12, 15, 0, 3, 9, 11, 10, 3, 5, 1, 1, 9, 1, 7, 15, 7, 8, 10, 10, 12, 11, 5, 1, 10, 3, 6, 6, 13, 0, 13, 1, 4, 5, 9, 4, 9, 15, 8, 4, 13, 13, 4, 5, 5, 11, 1, 13, 15, 3, 10, 15, 7, 11, 10, 15, 8, 12, 10, 3}, + {8, 5, 11, 3, 8, 13, 15, 15, 3, 12, 1, 13, 1, 7, 1, 5, 6, 13, 7, 8, 5, 1, 12, 3, 10, 7, 12, 6, 14, 12, 15, 5, 3, 12, 2, 15, 11, 13, 1, 13, 8, 5, 8, 0, 13, 15, 7, 13, 6, 13, 10, 1, 11, 0, 8, 9, 5, 11, 2, 9, 9, 10, 4, 15}, + {0, 4, 12, 14, 3, 1, 7, 5, 11, 13, 5, 3, 11, 12, 6, 8, 10, 15, 11, 8, 7, 10, 0, 2, 5, 15, 6, 10, 4, 2, 3, 1, 13, 7, 6, 12, 14, 7, 6, 14, 12, 10, 6, 14, 12, 0, 12, 11, 6, 9, 3, 1, 12, 15, 15, 3, 5, 5, 10, 11, 7, 15, 13, 3}, + {12, 14, 2, 14, 13, 6, 15, 7, 8, 8, 14, 13, 9, 2, 2, 10, 3, 15, 6, 10, 11, 7, 13, 0, 12, 1, 5, 8, 8, 12, 1, 11, 1, 3, 2, 4, 10, 7, 7, 7, 3, 10, 7, 2, 2, 3, 0, 1, 13, 5, 8, 2, 14, 0, 11, 13, 9, 3, 13, 2, 14, 2, 15, 4}, + {0, 0, 13, 6, 9, 12, 15, 7, 8, 0, 7, 4, 12, 15, 3, 2, 7, 1, 14, 4, 9, 3, 13, 12, 11, 12, 9, 9, 3, 7, 10, 9, 1, 9, 10, 2, 10, 14, 11, 0, 14, 4, 15, 12, 12, 9, 9, 8, 14, 1, 9, 14, 0, 6, 1, 0, 13, 9, 7, 6, 13, 2, 3, 9}, + {8, 0, 10, 13, 0, 7, 9, 7, 5, 1, 0, 3, 7, 10, 3, 15, 1, 15, 3, 11, 2, 6, 3, 10, 0, 10, 10, 3, 4, 15, 8, 6, 11, 11, 7, 5, 8, 5, 7, 15, 1, 11, 7, 13, 13, 6, 13, 13, 4, 2, 3, 15, 9, 5, 10, 6, 6, 6, 3, 11, 15, 13, 1, 15}, + {1, 1, 2, 10, 2, 2, 9, 5, 9, 2, 0, 1, 14, 2, 11, 6, 11, 6, 1, 0, 13, 7, 14, 1, 15, 14, 13, 7, 12, 11, 8, 11, 2, 11, 6, 10, 2, 3, 0, 0, 15, 0, 4, 6, 4, 12, 5, 5, 7, 14, 10, 6, 0, 3, 13, 0, 8, 1, 13, 10, 5, 1, 7, 5}, + {0, 5, 2, 12, 10, 2, 5, 1, 14, 0, 1, 4, 15, 11, 8, 7, 11, 14, 15, 6, 4, 1, 6, 6, 7, 13, 12, 5, 13, 2, 1, 6, 2, 13, 5, 15, 0, 8, 8, 6, 5, 5, 2, 0, 3, 13, 14, 2, 10, 5, 7, 6, 14, 5, 1, 4, 11, 2, 11, 1, 8, 15, 2, 4}, + {9, 9, 4, 5, 2, 5, 3, 12, 14, 5, 1, 3, 3, 0, 0, 6, 7, 14, 0, 15, 14, 11, 3, 10, 1, 9, 4, 14, 7, 14, 1, 0, 15, 11, 5, 9, 4, 0, 0, 10, 4, 4, 0, 7, 8, 15, 12, 8, 10, 8, 1, 2, 1, 11, 12, 14, 14, 14, 8, 10, 1, 5, 13, 10}, + {5, 10, 4, 4, 11, 10, 0, 6, 0, 12, 10, 5, 9, 11, 8, 10, 11, 3, 11, 14, 12, 9, 4, 6, 11, 12, 8, 7, 6, 14, 0, 6, 12, 4, 5, 3, 9, 0, 11, 6, 1, 3, 2, 12, 8, 9, 7, 12, 14, 7, 12, 6, 11, 13, 0, 2, 1, 3, 1, 8, 12, 2, 15, 15}, + {10, 11, 2, 3, 11, 10, 1, 7, 1, 10, 10, 14, 5, 13, 10, 3, 11, 15, 9, 14, 11, 11, 3, 15, 11, 6, 15, 13, 13, 1, 1, 10, 5, 1, 5, 11, 10, 3, 9, 12, 12, 1, 5, 6, 3, 3, 1, 1, 12, 8, 3, 15, 6, 2, 8, 14, 3, 4, 10, 9, 7, 13, 2, 6}, + {12, 0, 1, 0, 4, 3, 3, 6, 8, 3, 1, 13, 6, 12, 1, 1, 1, 4, 12, 4, 4, 9, 9, 14, 15, 3, 6, 4, 11, 1, 12, 5, 6, 0, 10, 9, 1, 8, 14, 5, 2, 8, 4, 15, 12, 13, 7, 14, 12, 2, 6, 9, 4, 13, 0, 15, 10, 10, 6, 12, 7, 12, 9, 10}, + {0, 8, 5, 11, 12, 12, 11, 7, 2, 9, 2, 15, 1, 1, 0, 0, 6, 5, 10, 1, 11, 12, 8, 7, 1, 7, 10, 4, 2, 8, 2, 5, 1, 1, 2, 9, 2, 0, 3, 7, 5, 1, 5, 5, 3, 1, 4, 3, 14, 8, 11, 7, 8, 0, 2, 13, 3, 15, 1, 13, 14, 15, 11, 13}, + {8, 13, 5, 14, 2, 9, 9, 13, 15, 8, 2, 14, 4, 2, 6, 0, 1, 13, 10, 13, 6, 12, 15, 11, 6, 11, 9, 9, 2, 9, 6, 14, 2, 9, 12, 1, 13, 9, 5, 11, 10, 4, 4, 5, 8, 9, 13, 10, 9, 0, 5, 15, 4, 12, 7, 10, 6, 5, 5, 15, 8, 8, 11, 14}, + {6, 9, 6, 7, 1, 15, 0, 1, 4, 15, 5, 3, 10, 9, 15, 9, 14, 12, 7, 6, 3, 0, 12, 8, 12, 2, 11, 8, 11, 8, 1, 10, 10, 7, 7, 5, 3, 5, 1, 2, 13, 11, 2, 5, 2, 10, 10, 1, 14, 14, 8, 1, 11, 1, 2, 6, 15, 10, 8, 7, 10, 7, 0, 3}, + {12, 6, 11, 1, 1, 7, 8, 1, 5, 5, 8, 4, 6, 5, 6, 4, 2, 8, 4, 1, 0, 0, 14, 2, 10, 14, 14, 11, 2, 9, 14, 15, 12, 14, 9, 3, 7, 14, 4, 7, 12, 9, 3, 5, 1, 0, 12, 9, 10, 5, 11, 12, 10, 10, 6, 14, 6, 13, 13, 5, 5, 10, 13, 10}, + {12, 6, 13, 0, 8, 0, 10, 6, 15, 15, 7, 3, 0, 10, 13, 14, 10, 13, 5, 13, 15, 14, 3, 4, 10, 10, 9, 6, 6, 15, 2, 7, 0, 10, 6, 14, 2, 9, 11, 7, 5, 5, 13, 14, 11, 15, 9, 4, 2, 0, 15, 5, 4, 14, 14, 1, 3, 4, 5, 8, 1, 1, 10, 12}, + {2, 5, 0, 4, 11, 5, 5, 6, 10, 4, 6, 7, 10, 3, 0, 14, 14, 0, 12, 15, 11, 12, 13, 7, 6, 3, 9, 1, 9, 8, 8, 8, 4, 10, 3, 1, 7, 10, 3, 2, 12, 6, 15, 14, 0, 6, 8, 10, 1, 9, 12, 12, 15, 7, 1, 11, 15, 13, 0, 4, 10, 0, 12, 11}, + {8, 12, 14, 15, 14, 15, 10, 0, 2, 14, 3, 1, 2, 6, 0, 2, 1, 7, 9, 0, 15, 13, 5, 14, 6, 8, 15, 4, 15, 6, 10, 6, 15, 3, 12, 8, 5, 4, 10, 5, 3, 0, 4, 13, 10, 9, 8, 4, 6, 3, 9, 6, 12, 11, 9, 13, 8, 10, 9, 9, 8, 12, 1, 2}, + {11, 10, 15, 15, 5, 14, 15, 7, 5, 9, 14, 14, 7, 11, 6, 6, 3, 8, 2, 3, 4, 14, 11, 1, 12, 15, 11, 6, 0, 0, 13, 7, 14, 3, 12, 14, 0, 15, 6, 1, 11, 2, 11, 8, 3, 13, 4, 12, 10, 13, 7, 14, 9, 13, 3, 10, 2, 14, 13, 4, 12, 13, 14, 10}, + {1, 11, 2, 12, 1, 10, 7, 12, 3, 3, 14, 9, 1, 10, 0, 11, 8, 10, 12, 12, 4, 12, 2, 11, 5, 0, 3, 15, 8, 2, 14, 3, 10, 2, 1, 13, 6, 14, 0, 0, 8, 11, 6, 13, 15, 10, 12, 7, 7, 11, 14, 9, 2, 7, 6, 8, 14, 9, 14, 10, 11, 9, 9, 12}, + {5, 10, 14, 2, 1, 4, 11, 5, 10, 2, 13, 9, 6, 12, 11, 5, 13, 4, 5, 14, 8, 7, 15, 9, 8, 4, 5, 2, 9, 11, 5, 3, 12, 2, 6, 1, 7, 4, 11, 4, 15, 0, 5, 2, 13, 11, 11, 2, 15, 10, 0, 12, 5, 8, 10, 1, 4, 11, 3, 13, 11, 7, 9, 14}, + {9, 8, 10, 5, 0, 2, 5, 8, 7, 3, 3, 6, 11, 1, 13, 15, 4, 4, 11, 6, 2, 6, 13, 11, 2, 6, 9, 4, 5, 13, 12, 2, 8, 7, 7, 12, 14, 15, 5, 12, 7, 0, 15, 15, 0, 5, 15, 0, 3, 9, 10, 15, 9, 11, 10, 10, 5, 3, 9, 3, 12, 13, 0, 13}, + {1, 11, 15, 0, 10, 5, 3, 5, 6, 7, 1, 11, 4, 11, 4, 2, 5, 12, 2, 5, 5, 6, 1, 5, 14, 9, 1, 5, 14, 12, 6, 10, 0, 8, 5, 11, 11, 11, 12, 10, 8, 10, 10, 1, 14, 1, 0, 8, 4, 7, 0, 11, 3, 1, 11, 12, 11, 8, 14, 15, 9, 3, 1, 14}, + {14, 11, 12, 12, 4, 6, 8, 14, 15, 1, 11, 2, 13, 3, 6, 2, 7, 1, 8, 1, 4, 9, 11, 15, 8, 1, 10, 13, 4, 13, 2, 7, 7, 10, 5, 2, 12, 12, 12, 3, 10, 8, 2, 11, 0, 3, 8, 9, 4, 2, 15, 7, 15, 6, 4, 6, 12, 7, 14, 9, 9, 8, 14, 12}, + {15, 4, 8, 12, 11, 11, 9, 5, 0, 0, 7, 6, 10, 5, 8, 2, 5, 6, 14, 11, 13, 0, 13, 15, 5, 4, 9, 15, 13, 12, 14, 15, 10, 2, 3, 6, 10, 14, 1, 8, 6, 7, 10, 1, 14, 9, 12, 13, 7, 2, 12, 10, 6, 11, 15, 1, 15, 11, 13, 0, 6, 13, 7, 15}, + {3, 3, 12, 5, 14, 9, 14, 14, 8, 0, 9, 1, 2, 2, 14, 11, 7, 1, 3, 1, 14, 15, 12, 8, 14, 2, 4, 13, 10, 5, 10, 8, 1, 7, 6, 5, 4, 2, 11, 5, 4, 13, 14, 6, 13, 15, 6, 6, 7, 12, 11, 5, 13, 10, 9, 13, 9, 14, 5, 6, 7, 14, 11, 7}, + {14, 12, 11, 5, 0, 5, 10, 5, 7, 1, 7, 11, 1, 0, 13, 6, 5, 14, 3, 0, 5, 14, 6, 7, 8, 5, 8, 6, 6, 3, 6, 1, 8, 3, 10, 7, 15, 6, 11, 6, 6, 7, 13, 2, 2, 0, 0, 11, 1, 15, 2, 14, 5, 1, 4, 8, 0, 1, 8, 0, 1, 1, 2, 2}, + {10, 13, 13, 3, 15, 14, 9, 12, 15, 15, 8, 5, 8, 10, 5, 9, 6, 6, 7, 15, 1, 0, 14, 9, 1, 11, 6, 11, 13, 4, 6, 14, 9, 12, 13, 8, 14, 6, 14, 2, 3, 15, 4, 4, 14, 4, 9, 12, 8, 0, 9, 11, 13, 10, 8, 14, 3, 5, 7, 11, 6, 7, 15, 2}, + {9, 9, 11, 6, 11, 0, 5, 4, 8, 10, 8, 11, 2, 12, 8, 7, 11, 13, 6, 1, 13, 13, 11, 4, 5, 7, 7, 9, 6, 4, 12, 0, 11, 8, 6, 12, 11, 4, 15, 11, 12, 8, 11, 11, 1, 3, 6, 14, 9, 6, 7, 5, 0, 10, 3, 15, 13, 7, 0, 1, 13, 15, 1, 14}, + {10, 6, 8, 7, 3, 6, 9, 15, 1, 3, 10, 14, 9, 0, 0, 10, 0, 15, 2, 0, 0, 0, 6, 0, 13, 9, 9, 1, 8, 6, 13, 2, 1, 9, 14, 9, 1, 4, 8, 4, 2, 0, 8, 5, 0, 11, 12, 15, 13, 1, 14, 14, 15, 7, 8, 4, 4, 12, 1, 12, 8, 3, 9, 5}, + {12, 11, 1, 4, 10, 14, 8, 12, 2, 4, 15, 2, 9, 7, 7, 11, 15, 12, 10, 11, 7, 4, 13, 0, 8, 6, 8, 8, 10, 5, 5, 13, 3, 7, 9, 13, 13, 14, 6, 8, 1, 5, 7, 12, 4, 4, 6, 9, 13, 1, 6, 1, 6, 14, 5, 8, 2, 10, 4, 10, 1, 9, 6, 15}, + {4, 13, 4, 9, 6, 11, 1, 8, 7, 11, 11, 1, 3, 10, 12, 11, 1, 10, 6, 10, 0, 7, 3, 0, 0, 6, 3, 9, 2, 1, 4, 8, 2, 10, 2, 15, 9, 15, 14, 14, 15, 14, 3, 2, 7, 6, 6, 10, 8, 8, 4, 11, 1, 13, 6, 0, 2, 10, 0, 11, 15, 14, 6, 9}, + {15, 0, 12, 13, 0, 9, 10, 4, 11, 5, 10, 0, 8, 7, 3, 2, 12, 6, 3, 8, 5, 15, 14, 2, 13, 13, 6, 11, 5, 6, 9, 10, 14, 5, 14, 4, 9, 7, 5, 11, 13, 2, 7, 1, 14, 9, 0, 7, 8, 12, 11, 15, 2, 1, 5, 11, 3, 7, 5, 1, 6, 3, 8, 6}, + {0, 3, 8, 1, 4, 6, 3, 1, 3, 8, 2, 0, 15, 15, 14, 15, 13, 10, 11, 9, 2, 11, 5, 12, 3, 3, 0, 1, 5, 3, 11, 6, 10, 11, 8, 5, 7, 15, 4, 12, 8, 8, 12, 12, 12, 1, 9, 4, 11, 6, 10, 11, 1, 12, 8, 12, 5, 6, 1, 14, 2, 10, 3, 0}, + {10, 13, 6, 9, 11, 1, 4, 10, 0, 13, 8, 7, 4, 12, 15, 5, 14, 12, 6, 9, 0, 0, 10, 5, 13, 10, 15, 3, 0, 8, 7, 0, 9, 8, 10, 6, 11, 8, 10, 13, 11, 7, 5, 5, 9, 13, 1, 15, 0, 5, 15, 5, 4, 7, 9, 9, 15, 8, 2, 6, 3, 8, 5, 8}, + {14, 0, 6, 2, 4, 12, 2, 13, 6, 10, 5, 2, 2, 1, 6, 11, 1, 6, 9, 13, 0, 13, 9, 3, 12, 4, 3, 8, 7, 0, 9, 12, 0, 1, 7, 10, 10, 7, 3, 9, 13, 5, 15, 4, 13, 0, 8, 5, 4, 14, 11, 3, 3, 13, 15, 9, 9, 12, 9, 5, 2, 0, 1, 14}, + {4, 14, 13, 0, 14, 15, 11, 10, 11, 1, 3, 3, 9, 1, 12, 8, 6, 5, 15, 11, 1, 7, 5, 3, 8, 13, 0, 13, 11, 5, 8, 1, 8, 6, 13, 4, 13, 7, 12, 6, 5, 5, 7, 0, 12, 1, 1, 8, 1, 6, 4, 2, 8, 8, 15, 11, 11, 11, 4, 4, 4, 7, 13, 12}, + {14, 15, 10, 0, 4, 3, 1, 9, 13, 7, 9, 9, 15, 5, 0, 3, 9, 6, 4, 7, 13, 11, 3, 2, 7, 1, 6, 8, 13, 7, 10, 4, 3, 9, 5, 9, 2, 6, 10, 7, 9, 13, 2, 14, 2, 14, 7, 2, 14, 2, 8, 8, 0, 9, 0, 9, 12, 6, 7, 7, 6, 8, 12, 13}, + {5, 15, 8, 12, 11, 3, 13, 4, 5, 14, 10, 4, 15, 15, 1, 10, 9, 14, 6, 6, 4, 12, 4, 9, 12, 2, 15, 13, 2, 5, 12, 2, 3, 2, 15, 11, 12, 2, 6, 2, 11, 6, 7, 9, 12, 10, 5, 1, 1, 5, 9, 6, 14, 11, 3, 11, 6, 10, 11, 11, 0, 12, 15, 1}, + {12, 6, 8, 10, 2, 5, 7, 9, 8, 14, 15, 15, 13, 10, 15, 3, 10, 10, 6, 10, 14, 10, 7, 5, 3, 7, 6, 12, 11, 12, 8, 9, 12, 9, 15, 15, 15, 7, 8, 3, 15, 14, 1, 12, 0, 0, 4, 0, 9, 10, 8, 7, 14, 10, 8, 14, 6, 2, 8, 1, 11, 10, 0, 1}, + {12, 1, 2, 12, 7, 10, 4, 11, 5, 14, 10, 2, 2, 9, 4, 13, 3, 14, 3, 15, 5, 0, 14, 7, 7, 15, 6, 5, 2, 8, 15, 9, 6, 6, 13, 10, 9, 8, 6, 3, 14, 7, 12, 9, 7, 8, 13, 12, 14, 13, 6, 0, 5, 1, 9, 12, 14, 0, 11, 11, 6, 3, 11, 7}, + {15, 4, 8, 12, 8, 11, 4, 15, 1, 6, 2, 13, 1, 7, 7, 12, 0, 8, 14, 14, 10, 14, 0, 12, 0, 3, 3, 11, 7, 4, 2, 13, 0, 0, 11, 2, 5, 8, 12, 11, 6, 5, 6, 0, 0, 4, 0, 0, 1, 9, 9, 11, 3, 2, 13, 4, 13, 9, 15, 4, 7, 8, 3, 2}, + {3, 13, 8, 8, 12, 10, 5, 4, 7, 13, 10, 13, 14, 3, 2, 12, 11, 0, 9, 5, 6, 4, 14, 4, 6, 9, 2, 5, 10, 3, 9, 10, 5, 0, 12, 5, 15, 5, 15, 15, 2, 12, 3, 11, 0, 15, 9, 14, 1, 5, 6, 6, 14, 5, 8, 0, 5, 9, 3, 7, 7, 12, 15, 1}, + {1, 11, 7, 4, 13, 3, 0, 8, 11, 9, 15, 1, 4, 12, 2, 12, 10, 4, 14, 3, 9, 14, 14, 2, 3, 11, 12, 4, 5, 10, 6, 15, 2, 13, 13, 9, 9, 1, 11, 12, 12, 14, 1, 5, 15, 1, 7, 14, 12, 10, 11, 13, 13, 5, 2, 4, 7, 7, 9, 4, 14, 15, 13, 10}, + {14, 15, 9, 14, 9, 5, 13, 2, 0, 0, 14, 8, 6, 2, 0, 7, 11, 10, 2, 13, 2, 14, 9, 6, 4, 11, 5, 14, 6, 1, 6, 14, 6, 3, 9, 5, 2, 9, 3, 11, 1, 14, 5, 4, 12, 5, 3, 5, 11, 3, 11, 6, 13, 7, 13, 7, 4, 9, 4, 13, 8, 3, 5, 11}, + {13, 12, 12, 13, 8, 2, 4, 2, 10, 6, 3, 5, 7, 7, 6, 13, 8, 6, 15, 4, 12, 7, 15, 4, 3, 9, 8, 15, 0, 3, 12, 1, 9, 8, 13, 10, 15, 4, 14, 1, 6, 15, 0, 4, 8, 9, 3, 1, 3, 15, 5, 5, 1, 11, 11, 10, 11, 10, 8, 8, 5, 4, 13, 0}, + {8, 4, 15, 9, 14, 9, 5, 8, 8, 10, 5, 15, 9, 8, 12, 5, 11, 10, 2, 12, 13, 1, 0, 2, 6, 13, 11, 9, 12, 0, 5, 0, 11, 5, 14, 12, 3, 4, 2, 10, 3, 12, 5, 15, 4, 8, 14, 1, 0, 13, 9, 5, 2, 4, 13, 8, 2, 5, 8, 9, 15, 3, 5, 5}, + {0, 3, 3, 4, 6, 5, 5, 1, 3, 2, 14, 5, 10, 7, 15, 11, 7, 13, 15, 4, 0, 12, 9, 15, 12, 0, 3, 1, 14, 1, 12, 9, 13, 8, 9, 15, 12, 3, 5, 11, 3, 11, 4, 1, 9, 4, 13, 7, 4, 10, 6, 14, 13, 0, 9, 11, 15, 15, 3, 3, 13, 15, 10, 15}, +} diff --git a/domain/consensus/utils/pow/pow.go b/domain/consensus/utils/pow/pow.go new file mode 100644 index 0000000..49ae7f8 --- /dev/null +++ b/domain/consensus/utils/pow/pow.go @@ -0,0 +1,113 @@ +package pow + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/hashes" + "github.com/spectre-project/spectred/domain/consensus/utils/serialization" + "github.com/spectre-project/spectred/util/difficulty" + + "math/big" + + "github.com/pkg/errors" +) + +// State is an intermediate data structure with pre-computed values to speed up mining. +type State struct { + mat matrix + Timestamp int64 + Nonce uint64 + Target big.Int + prePowHash externalapi.DomainHash +} + +// NewState creates a new state with pre-computed values to speed up mining +// It takes the target from the Bits field +func NewState(header externalapi.MutableBlockHeader) *State { + target := difficulty.CompactToBig(header.Bits()) + // Zero out the time and nonce. + timestamp, nonce := header.TimeInMilliseconds(), header.Nonce() + header.SetTimeInMilliseconds(0) + header.SetNonce(0) + prePowHash := consensushashing.HeaderHash(header) + header.SetTimeInMilliseconds(timestamp) + header.SetNonce(nonce) + + return &State{ + Target: *target, + prePowHash: *prePowHash, + mat: *generateMatrix(prePowHash), + Timestamp: timestamp, + Nonce: nonce, + } +} + +// CalculateProofOfWorkValue hashes the internal header and returns its big.Int value +func (state *State) CalculateProofOfWorkValue() *big.Int { + // PRE_POW_HASH || TIME || 32 zero byte padding || NONCE + writer := hashes.NewPoWHashWriter() + writer.InfallibleWrite(state.prePowHash.ByteSlice()) + err := serialization.WriteElement(writer, state.Timestamp) + if err != nil { + panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error")) + } + zeroes := [32]byte{} + writer.InfallibleWrite(zeroes[:]) + err = serialization.WriteElement(writer, state.Nonce) + if err != nil { + panic(errors.Wrap(err, "this should never happen. Hash digest should never return an error")) + } + powHash := writer.Finalize() + heavyHash := state.mat.HeavyHash(powHash) + return toBig(heavyHash) +} + +// IncrementNonce the nonce in State by 1 +func (state *State) IncrementNonce() { + state.Nonce++ +} + +// CheckProofOfWork check's if the block has a valid PoW according to the provided target +// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network +func (state *State) CheckProofOfWork() bool { + // The block pow must be less than the claimed target + powNum := state.CalculateProofOfWorkValue() + + // The block hash must be less or equal than the claimed target. + return powNum.Cmp(&state.Target) <= 0 +} + +// CheckProofOfWorkByBits check's if the block has a valid PoW according to its Bits field +// it does not check if the difficulty itself is valid or less than the maximum for the appropriate network +func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool { + return NewState(header).CheckProofOfWork() +} + +// ToBig converts a externalapi.DomainHash into a big.Int treated as a little endian string. +func toBig(hash *externalapi.DomainHash) *big.Int { + // We treat the Hash as little-endian for PoW purposes, but the big package wants the bytes in big-endian, so reverse them. + buf := hash.ByteSlice() + blen := len(buf) + for i := 0; i < blen/2; i++ { + buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] + } + + return new(big.Int).SetBytes(buf) +} + +// BlockLevel returns the block level of the given header. +func BlockLevel(header externalapi.BlockHeader, maxBlockLevel int) int { + // Genesis is defined to be the root of all blocks at all levels, so we define it to be the maximal + // block level. + if len(header.DirectParents()) == 0 { + return maxBlockLevel + } + + proofOfWorkValue := NewState(header.ToMutable()).CalculateProofOfWorkValue() + level := maxBlockLevel - proofOfWorkValue.BitLen() + // If the block has a level lower than genesis make it zero. + if level < 0 { + level = 0 + } + return level +} diff --git a/domain/consensus/utils/pow/xoshiro.go b/domain/consensus/utils/pow/xoshiro.go new file mode 100644 index 0000000..e23e6f8 --- /dev/null +++ b/domain/consensus/utils/pow/xoshiro.go @@ -0,0 +1,38 @@ +package pow + +import ( + "encoding/binary" + "math/bits" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type xoShiRo256PlusPlus struct { + s0 uint64 + s1 uint64 + s2 uint64 + s3 uint64 +} + +func newxoShiRo256PlusPlus(hash *externalapi.DomainHash) *xoShiRo256PlusPlus { + hashArray := hash.ByteArray() + return &xoShiRo256PlusPlus{ + s0: binary.LittleEndian.Uint64(hashArray[:8]), + s1: binary.LittleEndian.Uint64(hashArray[8:16]), + s2: binary.LittleEndian.Uint64(hashArray[16:24]), + s3: binary.LittleEndian.Uint64(hashArray[24:32]), + } +} + +func (x *xoShiRo256PlusPlus) Uint64() uint64 { + res := bits.RotateLeft64(x.s0+x.s3, 23) + x.s0 + t := x.s1 << 17 + x.s2 ^= x.s0 + x.s3 ^= x.s1 + x.s1 ^= x.s2 + x.s0 ^= x.s3 + + x.s2 ^= t + x.s3 = bits.RotateLeft64(x.s3, 45) + return res +} diff --git a/domain/consensus/utils/pow/xoshiro_test.go b/domain/consensus/utils/pow/xoshiro_test.go new file mode 100644 index 0000000..6f98159 --- /dev/null +++ b/domain/consensus/utils/pow/xoshiro_test.go @@ -0,0 +1,17 @@ +package pow + +import "testing" + +// Test vectors are from here: https://github.com/rust-random/rngs/blob/17aa826cc38d3e8408c9489ac859fa9397acd479/rand_xoshiro/src/xoshiro256plusplus.rs#L121 +func TestXoShiRo256PlusPlus_Uint64(t *testing.T) { + state := xoShiRo256PlusPlus{1, 2, 3, 4} + expected := []uint64{41943041, 58720359, 3588806011781223, 3591011842654386, + 9228616714210784205, 9973669472204895162, 14011001112246962877, + 12406186145184390807, 15849039046786891736, 10450023813501588000} + for _, ex := range expected { + val := state.Uint64() + if val != ex { + t.Errorf("expected: %d, found: %d", ex, val) + } + } +} diff --git a/domain/consensus/utils/reachabilitydata/reachability_data.go b/domain/consensus/utils/reachabilitydata/reachability_data.go new file mode 100644 index 0000000..91484be --- /dev/null +++ b/domain/consensus/utils/reachabilitydata/reachability_data.go @@ -0,0 +1,112 @@ +package reachabilitydata + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type reachabilityData struct { + children []*externalapi.DomainHash + parent *externalapi.DomainHash + interval *model.ReachabilityInterval + futureCoveringSet model.FutureCoveringTreeNodeSet +} + +// If this doesn't compile, it means the type definition has been changed, so it's +// an indication to update Equal and Clone accordingly. +var _ = &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, +} + +// EmptyReachabilityData constructs an empty MutableReachabilityData object +func EmptyReachabilityData() model.MutableReachabilityData { + return &reachabilityData{} +} + +// New constructs a ReachabilityData object filled with given fields +func New(children []*externalapi.DomainHash, + parent *externalapi.DomainHash, + interval *model.ReachabilityInterval, + futureCoveringSet model.FutureCoveringTreeNodeSet) model.ReachabilityData { + + return &reachabilityData{ + children: children, + parent: parent, + interval: interval, + futureCoveringSet: futureCoveringSet, + } +} + +func (rd *reachabilityData) Children() []*externalapi.DomainHash { + return rd.children +} + +func (rd *reachabilityData) Parent() *externalapi.DomainHash { + return rd.parent +} + +func (rd *reachabilityData) Interval() *model.ReachabilityInterval { + return rd.interval +} + +func (rd *reachabilityData) FutureCoveringSet() model.FutureCoveringTreeNodeSet { + return rd.futureCoveringSet +} + +func (rd *reachabilityData) CloneMutable() model.MutableReachabilityData { + //return rd + return &reachabilityData{ + children: externalapi.CloneHashes(rd.children), + parent: rd.parent, + interval: rd.interval.Clone(), + futureCoveringSet: rd.futureCoveringSet.Clone(), + } +} + +func (rd *reachabilityData) AddChild(child *externalapi.DomainHash) { + rd.children = append(rd.children, child) +} + +func (rd *reachabilityData) SetParent(parent *externalapi.DomainHash) { + rd.parent = parent +} + +func (rd *reachabilityData) SetInterval(interval *model.ReachabilityInterval) { + rd.interval = interval +} + +func (rd *reachabilityData) SetFutureCoveringSet(futureCoveringSet model.FutureCoveringTreeNodeSet) { + rd.futureCoveringSet = futureCoveringSet +} + +// Equal returns whether rd equals to other +func (rd *reachabilityData) Equal(other model.ReachabilityData) bool { + otherReachabilityData, ok := other.(*reachabilityData) + if !ok { + return false + } + if rd == nil || otherReachabilityData == nil { + return rd == otherReachabilityData + } + + if !externalapi.HashesEqual(rd.children, otherReachabilityData.Children()) { + return false + } + + if !rd.parent.Equal(otherReachabilityData.Parent()) { + return false + } + + if !rd.interval.Equal(otherReachabilityData.Interval()) { + return false + } + + if !rd.futureCoveringSet.Equal(otherReachabilityData.FutureCoveringSet()) { + return false + } + + return true +} diff --git a/domain/consensus/utils/reachabilitydata/reachabilitydata_equal_clone_test.go b/domain/consensus/utils/reachabilitydata/reachabilitydata_equal_clone_test.go new file mode 100644 index 0000000..79b8aa9 --- /dev/null +++ b/domain/consensus/utils/reachabilitydata/reachabilitydata_equal_clone_test.go @@ -0,0 +1,313 @@ +package reachabilitydata + +import ( + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestReachabilityData_Equal(t *testing.T) { + type dataToCompare struct { + data *reachabilityData + expectedResult bool + } + tests := []struct { + baseData *reachabilityData + dataToCompareTo []dataToCompare + }{ + // Test nil data + { + baseData: nil, + dataToCompareTo: nil, + }, + // Test empty data + { + baseData: &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, + }, + dataToCompareTo: []dataToCompare{ + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: true, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + }, // Changed + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), // Changed + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{100, 0}, // Changed start + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{0, 100}, // Changed end + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, // Changed + }, + expectedResult: false, + }, + }, + }, + // Test filled data + { + baseData: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + dataToCompareTo: []dataToCompare{ + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: true, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{}, // Changed + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{200, 200}, // Changed start + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + nil, //Changed + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 100}, // Changed end + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{}, // Changed + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + &externalapi.DomainHash{}, // Changed + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{}, // Changed + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{}, // Changed + }, + expectedResult: false, + }, + { + data: &reachabilityData{ + nil, + nil, + nil, + model.FutureCoveringTreeNodeSet{}, + }, + expectedResult: false, + }, + { + data: nil, + expectedResult: false, + }, + }, + }, + } + + for i, test := range tests { + for j, subTest := range test.dataToCompareTo { + result1 := test.baseData.Equal(subTest.data) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + + result2 := subTest.data.Equal(test.baseData) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} + +func TestReachabilityData_CloneWritable(t *testing.T) { + testData := []*reachabilityData{ + { + []*externalapi.DomainHash{}, + &externalapi.DomainHash{}, + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{}, + }, + { + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + { + []*externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + { + []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + { + []*externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + &model.ReachabilityInterval{100, 200}, + model.FutureCoveringTreeNodeSet{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2})}, + }, + } + + for i, data := range testData { + clone := data.CloneMutable() + if !clone.Equal(data) { + t.Fatalf("Test #%d: clone should be equal to the original", i) + } + + if !reflect.DeepEqual(data, clone) { + t.Fatalf("Test #%d: clone should be equal to the original", i) + } + } +} diff --git a/domain/consensus/utils/serialization/common.go b/domain/consensus/utils/serialization/common.go new file mode 100644 index 0000000..6e383b6 --- /dev/null +++ b/domain/consensus/utils/serialization/common.go @@ -0,0 +1,236 @@ +package serialization + +import ( + "io" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/binaryserializer" +) + +// errNoEncodingForType signifies that there's no encoding for the given type. +var errNoEncodingForType = errors.New("there's no encoding for this type") + +var errMalformed = errors.New("errMalformed") + +// WriteElement writes the little endian representation of element to w. +func WriteElement(w io.Writer, element interface{}) error { + // Attempt to write the element based on the concrete type via fast + // type assertions first. + switch e := element.(type) { + case []byte: + err := WriteElement(w, uint64(len(e))) + if err != nil { + return err + } + _, err = w.Write(e) + if err != nil { + return err + } + return nil + case int16: + err := binaryserializer.PutUint16(w, uint16(e)) + if err != nil { + return err + } + return nil + case uint16: + err := binaryserializer.PutUint16(w, e) + if err != nil { + return err + } + return nil + case int32: + err := binaryserializer.PutUint32(w, uint32(e)) + if err != nil { + return err + } + return nil + + case uint32: + err := binaryserializer.PutUint32(w, e) + if err != nil { + return err + } + return nil + + case int64: + err := binaryserializer.PutUint64(w, uint64(e)) + if err != nil { + return err + } + return nil + + case uint64: + err := binaryserializer.PutUint64(w, e) + if err != nil { + return err + } + return nil + + case uint8: + err := binaryserializer.PutUint8(w, e) + if err != nil { + return err + } + return nil + + case bool: + var err error + if e { + err = binaryserializer.PutUint8(w, 0x01) + } else { + err = binaryserializer.PutUint8(w, 0x00) + } + if err != nil { + return err + } + return nil + + case externalapi.DomainHash: + _, err := w.Write(e.ByteSlice()) + if err != nil { + return err + } + return nil + + case *externalapi.DomainHash: + _, err := w.Write(e.ByteSlice()) + if err != nil { + return err + } + return nil + + case externalapi.DomainTransactionID: + _, err := w.Write(e.ByteSlice()) + if err != nil { + return err + } + return nil + + case externalapi.DomainSubnetworkID: + _, err := w.Write(e[:]) + if err != nil { + return err + } + return nil + + case *externalapi.DomainSubnetworkID: + _, err := w.Write(e[:]) + if err != nil { + return err + } + return nil + } + + return errors.Wrapf(errNoEncodingForType, "couldn't find a way to write type %T", element) +} + +// WriteElements writes multiple items to w. It is equivalent to multiple +// calls to writeElement. +func WriteElements(w io.Writer, elements ...interface{}) error { + for _, element := range elements { + err := WriteElement(w, element) + if err != nil { + return err + } + } + return nil +} + +// ReadElement reads the next sequence of bytes from r using little endian +// depending on the concrete type of element pointed to. +func ReadElement(r io.Reader, element interface{}) error { + // Attempt to read the element based on the concrete type via fast + // type assertions first. + switch e := element.(type) { + case *int16: + rv, err := binaryserializer.Uint16(r) + if err != nil { + return err + } + *e = int16(rv) + return nil + + case *uint16: + rv, err := binaryserializer.Uint16(r) + if err != nil { + return err + } + *e = rv + return nil + case *int32: + rv, err := binaryserializer.Uint32(r) + if err != nil { + return err + } + *e = int32(rv) + return nil + + case *uint32: + rv, err := binaryserializer.Uint32(r) + if err != nil { + return err + } + *e = rv + return nil + + case *int64: + rv, err := binaryserializer.Uint64(r) + if err != nil { + return err + } + *e = int64(rv) + return nil + + case *uint64: + rv, err := binaryserializer.Uint64(r) + if err != nil { + return err + } + *e = rv + return nil + + case *uint8: + rv, err := binaryserializer.Uint8(r) + if err != nil { + return err + } + *e = rv + return nil + + case *bool: + rv, err := binaryserializer.Uint8(r) + if err != nil { + return err + } + if rv == 0x00 { + *e = false + } else if rv == 0x01 { + *e = true + } else { + return errors.Wrapf(errMalformed, "in order to keep serialization canonical, true has to"+ + " always be 0x01") + } + return nil + } + + return errors.Wrapf(errNoEncodingForType, "couldn't find a way to read type %T", element) +} + +// ReadElements reads multiple items from r. It is equivalent to multiple +// calls to ReadElement. +func ReadElements(r io.Reader, elements ...interface{}) error { + for _, element := range elements { + err := ReadElement(r, element) + if err != nil { + return err + } + } + return nil +} + +// IsMalformedError returns whether the error indicates a malformed data source +func IsMalformedError(err error) bool { + return errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, errMalformed) +} diff --git a/domain/consensus/utils/sorters/timesorter.go b/domain/consensus/utils/sorters/timesorter.go new file mode 100644 index 0000000..e17e96c --- /dev/null +++ b/domain/consensus/utils/sorters/timesorter.go @@ -0,0 +1,32 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package sorters + +import "sort" + +// Int64Slice implements sort.Interface to allow a slice of timestamps to +// be sorted. +type Int64Slice []int64 + +// Len returns the number of timestamps in the slice. It is part of the +// sort.Interface implementation. +func (s Int64Slice) Len() int { + return len(s) +} + +// Swap swaps the timestamps at the passed indices. It is part of the +// sort.Interface implementation. +func (s Int64Slice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less returns whether the timstamp with index i should sort before the +// timestamp with index j. It is part of the sort.Interface implementation. +func (s Int64Slice) Less(i, j int) bool { + return s[i] < s[j] +} + +// Sort is a convenience method: s.Sort() calls sort.Sort(s). +func (s Int64Slice) Sort() { sort.Sort(s) } diff --git a/domain/consensus/utils/subnetworks/compare.go b/domain/consensus/utils/subnetworks/compare.go new file mode 100644 index 0000000..1869707 --- /dev/null +++ b/domain/consensus/utils/subnetworks/compare.go @@ -0,0 +1,12 @@ +package subnetworks + +import ( + "bytes" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// Less returns true iff id a is less than id b +func Less(a, b externalapi.DomainSubnetworkID) bool { + return bytes.Compare(a[:], b[:]) < 0 +} diff --git a/domain/consensus/utils/subnetworks/from_bytes.go b/domain/consensus/utils/subnetworks/from_bytes.go new file mode 100644 index 0000000..e81d665 --- /dev/null +++ b/domain/consensus/utils/subnetworks/from_bytes.go @@ -0,0 +1,17 @@ +package subnetworks + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// FromBytes creates a DomainSubnetworkID from the given byte slice +func FromBytes(subnetworkIDBytes []byte) (*externalapi.DomainSubnetworkID, error) { + if len(subnetworkIDBytes) != externalapi.DomainSubnetworkIDSize { + return nil, errors.Errorf("invalid hash size. Want: %d, got: %d", + externalapi.DomainSubnetworkIDSize, len(subnetworkIDBytes)) + } + var domainSubnetworkID externalapi.DomainSubnetworkID + copy(domainSubnetworkID[:], subnetworkIDBytes) + return &domainSubnetworkID, nil +} diff --git a/domain/consensus/utils/subnetworks/from_string.go b/domain/consensus/utils/subnetworks/from_string.go new file mode 100644 index 0000000..f929fad --- /dev/null +++ b/domain/consensus/utils/subnetworks/from_string.go @@ -0,0 +1,16 @@ +package subnetworks + +import ( + "encoding/hex" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// FromString creates a DomainSubnetworkID from the given byte slice +func FromString(str string) (*externalapi.DomainSubnetworkID, error) { + subnetworkIDBytes, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + return FromBytes(subnetworkIDBytes) +} diff --git a/domain/consensus/utils/subnetworks/subnetworks.go b/domain/consensus/utils/subnetworks/subnetworks.go new file mode 100644 index 0000000..7af256f --- /dev/null +++ b/domain/consensus/utils/subnetworks/subnetworks.go @@ -0,0 +1,27 @@ +package subnetworks + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +var ( + // SubnetworkIDNative is the default subnetwork ID which is used for transactions without related payload data + SubnetworkIDNative = externalapi.DomainSubnetworkID{} + + // SubnetworkIDCoinbase is the subnetwork ID which is used for the coinbase transaction + SubnetworkIDCoinbase = externalapi.DomainSubnetworkID{1} + + // SubnetworkIDRegistry is the subnetwork ID which is used for adding new sub networks to the registry + SubnetworkIDRegistry = externalapi.DomainSubnetworkID{2} +) + +// IsBuiltIn returns true if the subnetwork is a built in subnetwork, which +// means all nodes, including partial nodes, must validate it, and its transactions +// always use 0 gas. +func IsBuiltIn(id externalapi.DomainSubnetworkID) bool { + return id == SubnetworkIDCoinbase || id == SubnetworkIDRegistry +} + +// IsBuiltInOrNative returns true if the subnetwork is the native or a built in subnetwork, +// see IsBuiltIn for further details +func IsBuiltInOrNative(id externalapi.DomainSubnetworkID) bool { + return id == SubnetworkIDNative || IsBuiltIn(id) +} diff --git a/domain/consensus/utils/testutils/create_transaction.go b/domain/consensus/utils/testutils/create_transaction.go new file mode 100644 index 0000000..005d6d4 --- /dev/null +++ b/domain/consensus/utils/testutils/create_transaction.go @@ -0,0 +1,38 @@ +package testutils + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +// CreateTransaction create a transaction that spends the first output of provided transaction. +// Assumes that the output being spent has opTrueScript as it's scriptPublicKey +// Creates the value of the spent output minus 1 sompi +func CreateTransaction(txToSpend *externalapi.DomainTransaction, fee uint64) (*externalapi.DomainTransaction, error) { + scriptPublicKey, redeemScript := OpTrueScript() + + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, nil) + if err != nil { + return nil, err + } + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(txToSpend), + Index: 0, + }, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum, + } + output := &externalapi.DomainTransactionOutput{ + ScriptPublicKey: scriptPublicKey, + Value: txToSpend.Outputs[0].Value - fee, + } + return &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: []*externalapi.DomainTransactionOutput{output}, + Payload: []byte{}, + }, nil +} diff --git a/domain/consensus/utils/testutils/for_all_nets.go b/domain/consensus/utils/testutils/for_all_nets.go new file mode 100644 index 0000000..f8f369e --- /dev/null +++ b/domain/consensus/utils/testutils/for_all_nets.go @@ -0,0 +1,29 @@ +package testutils + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +// ForAllNets runs the passed testFunc with all available networks +// if setDifficultyToMinumum = true - will modify the net params to have minimal difficulty, like in SimNet +func ForAllNets(t *testing.T, skipPow bool, testFunc func(*testing.T, *consensus.Config)) { + allParams := []dagconfig.Params{ + dagconfig.MainnetParams, + dagconfig.TestnetParams, + dagconfig.SimnetParams, + dagconfig.DevnetParams, + } + + for _, params := range allParams { + consensusConfig := consensus.Config{Params: params} + t.Run(consensusConfig.Name, func(t *testing.T) { + t.Parallel() + consensusConfig.SkipProofOfWork = skipPow + t.Logf("Running test for %s", consensusConfig.Name) + testFunc(t, &consensusConfig) + }) + } +} diff --git a/domain/consensus/utils/testutils/op_true_script.go b/domain/consensus/utils/testutils/op_true_script.go new file mode 100644 index 0000000..1f16412 --- /dev/null +++ b/domain/consensus/utils/testutils/op_true_script.go @@ -0,0 +1,21 @@ +package testutils + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +// OpTrueScript returns a P2SH script paying to an anyone-can-spend address, +// The second return value is a redeemScript to be used with txscript.PayToScriptHashSignatureScript +func OpTrueScript() (*externalapi.ScriptPublicKey, []byte) { + var err error + redeemScript := []byte{txscript.OpTrue} + scriptPublicKeyScript, err := txscript.PayToScriptHashScript(redeemScript) + if err != nil { + panic(errors.Wrapf(err, "Couldn't parse opTrueScript. This should never happen")) + } + scriptPublicKey := &externalapi.ScriptPublicKey{Script: scriptPublicKeyScript, Version: constants.MaxScriptPublicKeyVersion} + return scriptPublicKey, redeemScript +} diff --git a/domain/consensus/utils/testutils/test_ghostdag.go b/domain/consensus/utils/testutils/test_ghostdag.go new file mode 100644 index 0000000..349819a --- /dev/null +++ b/domain/consensus/utils/testutils/test_ghostdag.go @@ -0,0 +1,49 @@ +package testutils + +import ( + "sort" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" +) + +type testGhostDAGSorter struct { + slice []*externalapi.DomainHash + tc testapi.TestConsensus + test testing.TB + stagingArea *model.StagingArea +} + +// NewTestGhostDAGSorter returns a sort.Interface over the slice, so you can sort it via GhostDAG ordering +func NewTestGhostDAGSorter(stagingArea *model.StagingArea, slice []*externalapi.DomainHash, tc testapi.TestConsensus, + t testing.TB) sort.Interface { + + return testGhostDAGSorter{ + slice: slice, + tc: tc, + test: t, + stagingArea: stagingArea, + } +} + +func (sorter testGhostDAGSorter) Len() int { + return len(sorter.slice) +} + +func (sorter testGhostDAGSorter) Less(i, j int) bool { + ghostdagDataI, err := sorter.tc.GHOSTDAGDataStore().Get(sorter.tc.DatabaseContext(), sorter.stagingArea, sorter.slice[i], false) + if err != nil { + sorter.test.Fatalf("TestGhostDAGSorter: Failed getting ghostdag data for %s", err) + } + ghostdagDataJ, err := sorter.tc.GHOSTDAGDataStore().Get(sorter.tc.DatabaseContext(), sorter.stagingArea, sorter.slice[j], false) + if err != nil { + sorter.test.Fatalf("TestGhostDAGSorter: Failed getting ghostdag data for %s", err) + } + return !sorter.tc.GHOSTDAGManager().Less(sorter.slice[i], ghostdagDataI, sorter.slice[j], ghostdagDataJ) +} + +func (sorter testGhostDAGSorter) Swap(i, j int) { + sorter.slice[i], sorter.slice[j] = sorter.slice[j], sorter.slice[i] +} diff --git a/domain/consensus/utils/transactionhelper/coinbase.go b/domain/consensus/utils/transactionhelper/coinbase.go new file mode 100644 index 0000000..be15a0f --- /dev/null +++ b/domain/consensus/utils/transactionhelper/coinbase.go @@ -0,0 +1,19 @@ +package transactionhelper + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +// CoinbaseTransactionIndex is the index of the coinbase transaction in every block +const CoinbaseTransactionIndex = 0 + +// IsCoinBase determines whether or not a transaction is a coinbase transaction. A coinbase +// transaction is a special transaction created by miners that distributes fees and block subsidy +// to the previous blocks' miners, and to specify the scriptPubKey that will be used to pay the current +// miner in future blocks. Each input of the coinbase transaction should set index to maximum +// value and reference the relevant block id, instead of previous transaction id. +func IsCoinBase(tx *externalapi.DomainTransaction) bool { + // A coinbase transaction must have subnetwork id SubnetworkIDCoinbase + return tx.SubnetworkID == subnetworks.SubnetworkIDCoinbase +} diff --git a/domain/consensus/utils/transactionhelper/new.go b/domain/consensus/utils/transactionhelper/new.go new file mode 100644 index 0000000..4122f38 --- /dev/null +++ b/domain/consensus/utils/transactionhelper/new.go @@ -0,0 +1,40 @@ +package transactionhelper + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +// NewSubnetworkTransaction returns a new trsnactions in the specified subnetwork with specified gas and payload +func NewSubnetworkTransaction(version uint16, inputs []*externalapi.DomainTransactionInput, + outputs []*externalapi.DomainTransactionOutput, subnetworkID *externalapi.DomainSubnetworkID, + gas uint64, payload []byte) *externalapi.DomainTransaction { + + return &externalapi.DomainTransaction{ + Version: version, + Inputs: inputs, + Outputs: outputs, + LockTime: 0, + SubnetworkID: *subnetworkID, + Gas: gas, + Payload: payload, + Fee: 0, + Mass: 0, + } +} + +// NewNativeTransaction returns a new native transaction +func NewNativeTransaction(version uint16, inputs []*externalapi.DomainTransactionInput, + outputs []*externalapi.DomainTransactionOutput) *externalapi.DomainTransaction { + return &externalapi.DomainTransaction{ + Version: version, + Inputs: inputs, + Outputs: outputs, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: []byte{}, + Fee: 0, + Mass: 0, + } +} diff --git a/domain/consensus/utils/transactionid/from_bytes.go b/domain/consensus/utils/transactionid/from_bytes.go new file mode 100644 index 0000000..c141b52 --- /dev/null +++ b/domain/consensus/utils/transactionid/from_bytes.go @@ -0,0 +1,14 @@ +package transactionid + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// FromBytes creates a DomainTransactionID from the given byte slice +func FromBytes(transactionIDBytes []byte) (*externalapi.DomainTransactionID, error) { + hash, err := externalapi.NewDomainHashFromByteSlice(transactionIDBytes) + if err != nil { + return nil, err + } + return (*externalapi.DomainTransactionID)(hash), nil +} diff --git a/domain/consensus/utils/transactionid/from_string.go b/domain/consensus/utils/transactionid/from_string.go new file mode 100644 index 0000000..72b08f3 --- /dev/null +++ b/domain/consensus/utils/transactionid/from_string.go @@ -0,0 +1,11 @@ +package transactionid + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// FromString creates a new DomainTransactionID from the given string +func FromString(str string) (*externalapi.DomainTransactionID, error) { + hash, err := externalapi.NewDomainHashFromString(str) + return (*externalapi.DomainTransactionID)(hash), err +} diff --git a/domain/consensus/utils/txscript/README.md b/domain/consensus/utils/txscript/README.md new file mode 100644 index 0000000..56806de --- /dev/null +++ b/domain/consensus/utils/txscript/README.md @@ -0,0 +1,24 @@ +# Transaction Script Language + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](https://godoc.org/github.com/spectre-project/spectred/txscript?status.png)](http://godoc.org/github.com/spectre-project/spectred/txscript) + +Package txscript implements the spectre transaction script language. +There is a comprehensive test suite. + +## Spectre Scripts + +Spectre provides a stack-based, FORTH-like language for the scripts in +the spectre transactions. This language is not turing complete although +it is still fairly powerful. + +## Examples + +* [Standard Pay-to-pubkey Script](http://godoc.org/github.com/spectre-project/spectred/txscript#example-PayToAddrScript) + Demonstrates creating a script which pays to a spectre address. It + also prints the created script hex and uses the DisasmString + function to display the disassembled script. + +* [Extracting Details from Standard Scripts](http://godoc.org/github.com/spectre-project/spectred/txscript#example-ExtractPkScriptAddrs) + Demonstrates extracting information from a standard public key + script. diff --git a/domain/consensus/utils/txscript/data/LICENSE b/domain/consensus/utils/txscript/data/LICENSE new file mode 100644 index 0000000..9cb24e2 --- /dev/null +++ b/domain/consensus/utils/txscript/data/LICENSE @@ -0,0 +1,8 @@ +The json files in this directory originate from the bitcoind project +(https://github.com/bitcoin/bitcoin) and is released under the following +license: + + Copyright (c) 2012-2014 The Bitcoin Core developers + Distributed under the MIT/X11 software license, see the accompanying + file COPYING or http://www.opensource.org/licenses/mit-license.php. + diff --git a/domain/consensus/utils/txscript/data/script_tests.json b/domain/consensus/utils/txscript/data/script_tests.json new file mode 100644 index 0000000..5bc5e7a --- /dev/null +++ b/domain/consensus/utils/txscript/data/script_tests.json @@ -0,0 +1,5309 @@ +[ + [ + "Format is: [[wit..., amount]?, scriptSig, scriptPubKey, flags, expected_scripterror, ... comments]" + ], + [ + "It is evaluated as if there was a crediting coinbase transaction with two 0" + ], + [ + "pushes as scriptSig, and one output of 0 satoshi and given scriptPubKey," + ], + [ + "followed by a spending transaction which spends this output as only input (and" + ], + [ + "correct prevout hash), using the given scriptSig. All nLockTimes are 0, all" + ], + [ + "nSequences are max." + ], + [ + "", + "DEPTH 0 EQUAL", + "", + "OK", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + " ", + "DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK", + "Similarly whitespace around and between symbols" + ], + [ + "1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + " 1 2 ", + "2 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "1", + "", + "", + "OK" + ], + [ + "0x02 0x01 0x00", + "", + "", + "OK", + "all bytes are significant, not only the last one" + ], + [ + "0x09 0x00000000 0x00000000 0x10", + "", + "", + "OK", + "equals zero when cast to Int64" + ], + [ + "0x01 0x11", + "17 EQUAL", + "", + "OK", + "push 1 byte" + ], + [ + "0x02 0x417a", + "'Az' EQUAL", + "", + "OK" + ], + [ + "0x4b 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "push 75 bytes" + ], + [ + "0x4c 0x4c 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4c is OP_PUSHDATA1 (push 76 bytes)" + ], + [ + "0x4d 0x0001 0x417a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a", + "'Azzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' EQUAL", + "", + "OK", + "0x4d is OP_PUSHDATA2" + ], + [ + "0x4f 1000", + "ADD 999 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0x50 ENDIF 1", + "", + "OK", + "0x50 is reserved (ok if not executed)" + ], + [ + "0x51", + "0x5f ADD 0x60 EQUAL", + "", + "OK", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "1", + "NOP", + "", + "OK" + ], + [ + "0", + "IF VER ELSE 1 ENDIF", + "", + "OK", + "VER non-functional (ok if not executed)" + ], + [ + "0", + "IF RESERVED RESERVED1 RESERVED2 ELSE 1 ENDIF", + "", + "OK", + "RESERVED ok in un-executed IF" + ], + [ + "1", + "DUP IF ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK" + ], + [ + "1", + "DUP IF ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "IF ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "OK" + ], + [ + "1 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and executed inverts on each ELSE encountered" + ], + [ + "1", + "IF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "1", + "IF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "1", + "IF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 1", + "IF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "1", + "NOTIF 0 ELSE 1 ELSE 0 ENDIF", + "", + "OK", + "Multiple ELSE's are valid and execution inverts on each ELSE encountered" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF ELSE 0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "NOTIF 1 ELSE 0 ELSE 1 ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "'' 0", + "NOTIF SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ELSE ELSE SHA256 ENDIF 0x20 0x2c49a55fe0ca3e7a005420c19a527865df8f17e468d234f562ef238d4236a632 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 1 IF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 1 IF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK", + "Nested ELSE ELSE" + ], + [ + "1", + "NOTIF 0 NOTIF RETURN ELSE RETURN ELSE RETURN ENDIF ELSE 0 NOTIF 1 ELSE RETURN ELSE 1 ENDIF ELSE RETURN ENDIF ADD 2 EQUAL", + "", + "OK" + ], + [ + "0", + "IF RETURN ENDIF 1", + "", + "OK", + "RETURN only works if executed" + ], + [ + "1 1", + "VERIFY", + "", + "OK" + ], + [ + "1 0x05 0x01 0x00 0x00 0x00 0x00", + "VERIFY", + "", + "OK", + "values >4 bytes can be cast to boolean" + ], + [ + "0x01 0x80", + "VERIFY TRUE", + "", + "VERIFY", + "negative 0 is false" + ], + [ + "10 0 11", + "TOALTSTACK DROP FROMALTSTACK ADD 21 EQUAL", + "", + "OK" + ], + [ + "'gavin_was_here'", + "TOALTSTACK 11 FROMALTSTACK 'gavin_was_here' EQUALVERIFY 11 EQUAL", + "", + "OK" + ], + [ + "0", + "IFDUP DEPTH 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "1", + "IFDUP DEPTH 2 EQUALVERIFY 1 EQUALVERIFY 1 EQUAL", + "", + "OK" + ], + [ + "0x05 0x0100000000", + "IFDUP DEPTH 2 EQUALVERIFY 0x05 0x0100000000 EQUALVERIFY DROP TRUE", + "", + "OK", + "IFDUP dups non ints" + ], + [ + "0", + "DROP DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "0", + "DUP 1 ADD 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT 22 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP 20 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "22 21 20", + "ROT DROP DROP 21 EQUAL", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT 21 EQUAL 2DROP", + "", + "OK" + ], + [ + "22 21 20", + "ROT ROT ROT 20 EQUALVERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 24 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT DROP 25 EQUALVERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 20 EQUALVERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP DROP 21 EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP 22 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2DROP 2DROP DROP 23 EQUALVERIFY TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 22 EQUALVERIFY 2DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "25 24 23 22 21 20", + "2ROT 2ROT 2ROT 20 EQUALVERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP 1 EQUALVERIFY 0 EQUAL", + "", + "OK" + ], + [ + "0 1", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "OK" + ], + [ + "13 14", + "2DUP ROT EQUALVERIFY EQUAL", + "", + "OK" + ], + [ + "-1 0 1 2", + "3DUP DEPTH 7 EQUALVERIFY ADD ADD 3 EQUALVERIFY 2DROP 0 EQUALVERIFY", + "", + "OK" + ], + [ + "1 2 3 5", + "2OVER ADD ADD 8 EQUALVERIFY ADD ADD 6 EQUAL", + "", + "OK" + ], + [ + "1 3 5 7", + "2SWAP ADD 4 EQUALVERIFY ADD 12 EQUAL", + "", + "OK" + ], + [ + "0", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-1", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-127", + "SIZE 1 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-128", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32767", + "SIZE 2 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-32768", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388607", + "SIZE 3 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-8388608", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483647", + "SIZE 4 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-2147483648", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813887", + "SIZE 5 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-549755813888", + "SIZE 6 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "-9223372036854775807", + "SIZE 8 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SIZE 26 EQUALVERIFY DROP TRUE", + "", + "OK" + ], + [ + "42", + "SIZE 1 EQUALVERIFY 42 EQUAL", + "", + "OK", + "SIZE does not consume argument" + ], + [ + "2 -2", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "2147483647 -2147483647", + "ADD 0 EQUAL", + "", + "OK" + ], + [ + "-1 -1", + "ADD -2 EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "1 1", + "ADD 2 EQUAL", + "", + "OK" + ], + [ + "1", + "1ADD 2 EQUAL", + "", + "OK" + ], + [ + "111", + "1SUB 110 EQUAL", + "", + "OK" + ], + [ + "111 1", + "ADD 12 SUB 100 EQUAL", + "", + "OK" + ], + [ + "0", + "ABS 0 EQUAL", + "", + "OK" + ], + [ + "16", + "ABS 16 EQUAL", + "", + "OK" + ], + [ + "-16", + "ABS -16 NEGATE EQUAL", + "", + "OK" + ], + [ + "0", + "NOT NOP", + "", + "OK" + ], + [ + "1", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "11", + "NOT 0 EQUAL", + "", + "OK" + ], + [ + "0", + "0NOTEQUAL 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "-111", + "0NOTEQUAL 1 EQUAL", + "", + "OK" + ], + [ + "1 1", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 1", + "BOOLAND NOT", + "", + "OK" + ], + [ + "0 0", + "BOOLAND NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLAND NOP", + "", + "OK" + ], + [ + "1 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "1 0", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 1", + "BOOLOR NOP", + "", + "OK" + ], + [ + "0 0", + "BOOLOR NOT", + "", + "OK" + ], + [ + "16 17", + "BOOLOR NOP", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUAL", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "11 10 1", + "ADD NUMNOTEQUAL NOT", + "", + "OK" + ], + [ + "111 10 1", + "ADD NUMNOTEQUAL", + "", + "OK" + ], + [ + "11 10", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 11", + "LESSTHAN", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHAN", + "", + "OK" + ], + [ + "11 10", + "GREATERTHAN", + "", + "OK" + ], + [ + "4 4", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "10 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHAN NOT", + "", + "OK" + ], + [ + "11 10", + "LESSTHANOREQUAL NOT", + "", + "OK" + ], + [ + "4 4", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 11", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "-11 -10", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "11 10", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "4 4", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "10 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 11", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "-11 -10", + "GREATERTHANOREQUAL NOT", + "", + "OK" + ], + [ + "1 0", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 1", + "MIN 0 NUMEQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN -1 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MIN -2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "2147483647 0", + "MAX 2147483647 NUMEQUAL", + "", + "OK" + ], + [ + "0 100", + "MAX 100 NUMEQUAL", + "", + "OK" + ], + [ + "-100 0", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 -2147483647", + "MAX 0 NUMEQUAL", + "", + "OK" + ], + [ + "0 0 1", + "WITHIN", + "", + "OK" + ], + [ + "1 0 1", + "WITHIN NOT", + "", + "OK" + ], + [ + "0 -2147483647 2147483647", + "WITHIN", + "", + "OK" + ], + [ + "-1 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "11 -100 100", + "WITHIN", + "", + "OK" + ], + [ + "-2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 -100 100", + "WITHIN NOT", + "", + "OK" + ], + [ + "2147483647 2147483647", + "SUB 0 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "DUP ADD 4294967294 EQUAL", + "", + "OK", + ">32 bit EQUAL is valid" + ], + [ + "2147483647", + "NEGATE DUP ADD -4294967294 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "SHA256 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 EQUAL", + "", + "OK" + ], + [ + "'a'", + "SHA256 0x20 0xca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "SHA256 0x20 0x71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 EQUAL", + "", + "OK" + ], + [ + "''", + "NOP BLAKE2B 0x20 0x0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8 EQUAL", + "", + "OK" + ], + [ + "'a'", + "BLAKE2B NOP 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "'abcdefghijklmnopqrstuvwxyz'", + "NOP BLAKE2B 0x20 0x117ad6b940f5e8292c007d9c7e7350cd33cf85b5887e8da71c7957830f536e7c EQUAL", + "", + "OK", + "The NOP is added so the script won't be interpreted as P2SH" + ], + [ + "'a'", + "NOP BLAKE2B 0x20 0x8928aae63c84d87ea098564d1e03ad813f107add474e56aedd286349c0c03ea4 EQUAL", + "", + "OK" + ], + [ + "0", + "IF 0xb2 ELSE 1 ENDIF", + "", + "OK", + "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" + ], + [ + "0", + "IF 0xbd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xbf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xc9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xca ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xce ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xcf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xd9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xda ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xde ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xdf ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xe9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xea ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xeb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xec ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xed ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xee ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xef ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf0 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf1 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf2 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf3 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf4 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf5 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf6 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf7 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf8 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xf9 ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfa ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfb ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfc ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfd ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xfe ELSE 1 ENDIF", + "", + "OK" + ], + [ + "0", + "IF 0xff ELSE 1 ENDIF", + "", + "OK" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "OK", + "520 byte push" + ], + [ + "1", + "0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OK", + "201 opcodes executed. 0x61 is NOP" + ], + [ + "1 2 3 4 5", + "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 1 2 3 4 5 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d75", + "", + "OK", + "244 stack size (0x6f is 3DUP, 0x6d is 2DROP, and 0x75 is DROP)" + ], + [ + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP DROP 0x6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d 0x61616161", + "", + "OK", + "Max-size (10,000-byte), max-push(520 bytes), max-opcodes(201), max stack size(244 items). 0x6f is 3DUP, 0x61 is NOP, 0x6d is 2DROP" + ], + [ + "0", + "IF 0x5050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050505050 ENDIF 1", + "", + "OK", + ">201 opcodes, but RESERVED (0x50) doesn't count towards opcode limit." + ], + [ + "", + "1", + "", + "OK" + ], + [ + "127", + "0x01 0x7F EQUAL", + "", + "OK" + ], + [ + "128", + "0x02 0x8000 EQUAL", + "", + "OK", + "Leave room for the sign bit" + ], + [ + "32767", + "0x02 0xFF7F EQUAL", + "", + "OK" + ], + [ + "32768", + "0x03 0x008000 EQUAL", + "", + "OK" + ], + [ + "8388607", + "0x03 0xFFFF7F EQUAL", + "", + "OK" + ], + [ + "8388608", + "0x04 0x00008000 EQUAL", + "", + "OK" + ], + [ + "2147483647", + "0x04 0xFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "2147483648", + "0x05 0x0000008000 EQUAL", + "", + "OK" + ], + [ + "549755813887", + "0x05 0xFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "549755813888", + "0x06 0xFFFFFFFF7F EQUALVERIFY 2DROP TRUE", + "", + "OK" + ], + [ + "9223372036854775807", + "0x08 0xFFFFFFFFFFFFFF7F EQUAL", + "", + "OK" + ], + [ + "-2", + "0x01 0x82 EQUAL", + "", + "OK", + "Numbers are little-endian with the MSB being a sign bit" + ], + [ + "-127", + "0x01 0xFF EQUAL", + "", + "OK" + ], + [ + "-128", + "0x02 0x8080 EQUAL", + "", + "OK" + ], + [ + "-32767", + "0x02 0xFFFF EQUAL", + "", + "OK" + ], + [ + "-32768", + "0x03 0x008080 EQUAL", + "", + "OK" + ], + [ + "-8388607", + "0x03 0xFFFFFF EQUAL", + "", + "OK" + ], + [ + "-8388608", + "0x04 0x00008080 EQUAL", + "", + "OK" + ], + [ + "-2147483647", + "0x04 0xFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-2147483648", + "0x05 0x0000008080 EQUAL", + "", + "OK" + ], + [ + "-4294967295", + "0x05 0xFFFFFFFF80 EQUAL", + "", + "OK" + ], + [ + "-549755813887", + "0x05 0xFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "-549755813888", + "0x06 0x000000008080 EQUAL", + "", + "OK" + ], + [ + "-9223372036854775807", + "0x08 0xFFFFFFFFFFFFFFFF EQUAL", + "", + "OK" + ], + [ + "2147483647", + "1ADD 2147483648 EQUAL", + "", + "OK", + "We can do math on 4-byte integers, and compare 5-byte ones" + ], + [ + "2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "-2147483647", + "1ADD DROP 1", + "", + "OK" + ], + [ + "1", + "0x02 0x0100 EQUAL NOT", + "", + "OK", + "Not the same byte array..." + ], + [ + "0", + "0x01 0x80 EQUAL NOT", + "", + "OK" + ], + [ + "", + "NOP 1", + "", + "OK", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "1", + "IF 1 ENDIF", + "", + "OK", + "They are here to catch copy-and-paste errors" + ], + [ + "0", + "NOTIF 1 ENDIF", + "", + "OK", + "Most of them are duplicated elsewhere," + ], + [ + "1", + "VERIFY 1", + "", + "OK", + "but, hey, more is always better, right?" + ], + [ + "0", + "TOALTSTACK 1", + "", + "OK" + ], + [ + "1", + "TOALTSTACK FROMALTSTACK", + "", + "OK" + ], + [ + "0 0", + "2DROP 1", + "", + "OK" + ], + [ + "0 1", + "2DUP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 0 1", + "3DUP VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2OVER VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0 0 0", + "2ROT VERIFY DROP DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "0 1 0 0", + "2SWAP VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "IFDUP VERIFY", + "", + "OK" + ], + [ + "", + "DEPTH 1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0", + "DROP 1", + "", + "OK" + ], + [ + "1", + "DUP VERIFY", + "", + "OK" + ], + [ + "0 1", + "NIP", + "", + "OK" + ], + [ + "1 0", + "OVER VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "PICK VERIFY DROP DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "PICK VERIFY DROP TRUE", + "", + "OK" + ], + [ + "1 0 0 0 3", + "ROLL VERIFY DROP DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "ROLL", + "", + "OK" + ], + [ + "1 0 0", + "ROT VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1 0", + "SWAP VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 1", + "TUCK VERIFY DROP DROP TRUE", + "", + "OK" + ], + [ + "1", + "SIZE VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0 0", + "EQUAL", + "", + "OK" + ], + [ + "0 0", + "EQUALVERIFY 1", + "", + "OK" + ], + [ + "0 0 1", + "EQUAL EQUAL", + "", + "OK", + "OP_0 and bools must have identical byte representations" + ], + [ + "0", + "1ADD", + "", + "OK" + ], + [ + "2", + "1SUB", + "", + "OK" + ], + [ + "-1", + "NEGATE", + "", + "OK" + ], + [ + "-1", + "ABS", + "", + "OK" + ], + [ + "0", + "NOT", + "", + "OK" + ], + [ + "-1", + "0NOTEQUAL", + "", + "OK" + ], + [ + "1 0", + "ADD", + "", + "OK" + ], + [ + "1 0", + "SUB", + "", + "OK" + ], + [ + "-1 -1", + "BOOLAND", + "", + "OK" + ], + [ + "-1 0", + "BOOLOR", + "", + "OK" + ], + [ + "0 0", + "NUMEQUAL", + "", + "OK" + ], + [ + "5 4", + "NUMEQUAL FALSE EQUAL", + "", + "OK" + ], + [ + "0 0", + "NUMEQUALVERIFY 1", + "", + "OK" + ], + [ + "-1 0", + "NUMNOTEQUAL", + "", + "OK" + ], + [ + "-1 0", + "LESSTHAN", + "", + "OK" + ], + [ + "1 0", + "GREATERTHAN", + "", + "OK" + ], + [ + "0 0", + "LESSTHANOREQUAL", + "", + "OK" + ], + [ + "0 0", + "GREATERTHANOREQUAL", + "", + "OK" + ], + [ + "-1 0", + "MIN", + "", + "OK" + ], + [ + "1 0", + "MAX", + "", + "OK" + ], + [ + "-1 -1 0", + "WITHIN", + "", + "OK" + ], + [ + "0", + "SHA256", + "", + "OK" + ], + [ + "0", + "BLAKE2B", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "CHECKMULTISIG is allowed to have zero keys and/or sigs" + ], + [ + "", + "0 0 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 0 1 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Zero sigs means no sigs are checked" + ], + [ + "", + "0 0 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK", + "Test from up to 20 pubkeys, all not checked" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG VERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 1 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 2 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 3 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 4 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 5 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 6 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 7 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 8 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 9 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 10 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 11 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 12 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 13 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 14 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 15 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 16 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 17 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 18 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 19 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "", + "0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DEPTH 0 EQUAL", + "", + "OK" + ], + [ + "1", + "0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY 0 0 CHECKMULTISIGVERIFY", + "", + "OK", + "nOpCount is incremented by the number of keys evaluated in addition to the usual one op per op. In this case we have zero keys, so we can execute 201 CHECKMULTISIGS" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY DROP DROP DROP DROP DROP DROP DROP TRUE", + "", + "OK", + "Even though there are no signatures being checked nOpCount is incremented by the number of keys." + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OK" + ], + [ + "0x01 1", + "BLAKE2B 0x20 0xce57216285125006ec18197bd8184221cefa559bb0798410d99a5bba5b07cd1d EQUAL", + "", + "OK", + "Very basic P2SH" + ], + [ + "0x00", + "SIZE 0 EQUALVERIFY DROP TRUE", + "", + "OK", + "Basic OP_0 execution" + ], + [ + "Numeric pushes" + ], + [ + "-1", + "0x4f EQUAL", + "", + "OK", + "OP1_NEGATE pushes 0x81" + ], + [ + "1", + "0x51 EQUAL", + "", + "OK", + "OP_1 pushes 0x01" + ], + [ + "2", + "0x52 EQUAL", + "", + "OK", + "OP_2 pushes 0x02" + ], + [ + "3", + "0x53 EQUAL", + "", + "OK", + "OP_3 pushes 0x03" + ], + [ + "4", + "0x54 EQUAL", + "", + "OK", + "OP_4 pushes 0x04" + ], + [ + "5", + "0x55 EQUAL", + "", + "OK", + "OP_5 pushes 0x05" + ], + [ + "6", + "0x56 EQUAL", + "", + "OK", + "OP_6 pushes 0x06" + ], + [ + "7", + "0x57 EQUAL", + "", + "OK", + "OP_7 pushes 0x07" + ], + [ + "8", + "0x58 EQUAL", + "", + "OK", + "OP_8 pushes 0x08" + ], + [ + "9", + "0x59 EQUAL", + "", + "OK", + "OP_9 pushes 0x09" + ], + [ + "10", + "0x5a EQUAL", + "", + "OK", + "OP_10 pushes 0x0a" + ], + [ + "11", + "0x5b EQUAL", + "", + "OK", + "OP_11 pushes 0x0b" + ], + [ + "12", + "0x5c EQUAL", + "", + "OK", + "OP_12 pushes 0x0c" + ], + [ + "13", + "0x5d EQUAL", + "", + "OK", + "OP_13 pushes 0x0d" + ], + [ + "14", + "0x5e EQUAL", + "", + "OK", + "OP_14 pushes 0x0e" + ], + [ + "15", + "0x5f EQUAL", + "", + "OK", + "OP_15 pushes 0x0f" + ], + [ + "16", + "0x60 EQUAL", + "", + "OK", + "OP_16 pushes 0x10" + ], + [ + "Unevaluated non-minimal pushes are ignored" + ], + [ + "0", + "IF 0x4c 0x00 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA1 ignored" + ], + [ + "0", + "IF 0x4d 0x0000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA2 ignored" + ], + [ + "0", + "IF 0x4c 0x00000000 ENDIF 1 ", + "", + "OK", + "non-minimal PUSHDATA4 ignored" + ], + [ + "0", + "IF 0x01 0x81 ENDIF 1 ", + "", + "OK", + "1NEGATE equiv" + ], + [ + "0", + "IF 0x01 0x01 ENDIF 1 ", + "", + "OK", + "OP_1 equiv" + ], + [ + "0", + "IF 0x01 0x02 ENDIF 1 ", + "", + "OK", + "OP_2 equiv" + ], + [ + "0", + "IF 0x01 0x03 ENDIF 1 ", + "", + "OK", + "OP_3 equiv" + ], + [ + "0", + "IF 0x01 0x04 ENDIF 1 ", + "", + "OK", + "OP_4 equiv" + ], + [ + "0", + "IF 0x01 0x05 ENDIF 1 ", + "", + "OK", + "OP_5 equiv" + ], + [ + "0", + "IF 0x01 0x06 ENDIF 1 ", + "", + "OK", + "OP_6 equiv" + ], + [ + "0", + "IF 0x01 0x07 ENDIF 1 ", + "", + "OK", + "OP_7 equiv" + ], + [ + "0", + "IF 0x01 0x08 ENDIF 1 ", + "", + "OK", + "OP_8 equiv" + ], + [ + "0", + "IF 0x01 0x09 ENDIF 1 ", + "", + "OK", + "OP_9 equiv" + ], + [ + "0", + "IF 0x01 0x0a ENDIF 1 ", + "", + "OK", + "OP_10 equiv" + ], + [ + "0", + "IF 0x01 0x0b ENDIF 1 ", + "", + "OK", + "OP_11 equiv" + ], + [ + "0", + "IF 0x01 0x0c ENDIF 1 ", + "", + "OK", + "OP_12 equiv" + ], + [ + "0", + "IF 0x01 0x0d ENDIF 1 ", + "", + "OK", + "OP_13 equiv" + ], + [ + "0", + "IF 0x01 0x0e ENDIF 1 ", + "", + "OK", + "OP_14 equiv" + ], + [ + "0", + "IF 0x01 0x0f ENDIF 1 ", + "", + "OK", + "OP_15 equiv" + ], + [ + "0", + "IF 0x01 0x10 ENDIF 1 ", + "", + "OK", + "OP_16 equiv" + ], + [ + "Numeric minimaldata rules are only applied when a stack item is numerically evaluated; the push itself is allowed" + ], + [ + "0x01 0x00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x01 0x80", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0180", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0100", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0200", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0300", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0400", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0500", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0600", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0700", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0800", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0900", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0a00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0b00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0c00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0d00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0e00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x0f00", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "0x02 0x1000", + "1 VERIFY DROP TRUE", + "", + "OK" + ], + [ + "While not really correctly DER encoded, the empty signature is allowed" + ], + [ + "to provide a compact way to provide a delibrately invalid signature." + ], + [ + "0", + "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", + "", + "OK" + ], + [ + "0", + "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", + "", + "OK" + ], + [ + "TRUE DATA_8 0x0000000000000080", + "CHECKSEQUENCEVERIFY", + "", + "OK", + "CSV passes if stack top bit 1 << 63 is set" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE", + "Test the test: we should have an empty stack after scriptSig evaluation" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE", + "and multiple spaces should not change that." + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + " ", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "DEPTH", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP", + "", + "EVAL_FALSE" + ], + [ + "", + "NOP DEPTH", + "", + "EVAL_FALSE" + ], + [ + "0x4c01", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA1 with not enough bytes" + ], + [ + "0x4d0200ff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA2 with not enough bytes" + ], + [ + "0x4e03000000ffff", + "0x01 NOP", + "", + "BAD_OPCODE", + "PUSHDATA4 with not enough bytes" + ], + [ + "1", + "IF 0x50 ENDIF 1", + "", + "BAD_OPCODE", + "0x50 is reserved" + ], + [ + "0x52", + "0x5f ADD 0x60 EQUAL", + "", + "EVAL_FALSE", + "0x51 through 0x60 push 1 through 16 onto stack" + ], + [ + "0", + "NOP", + "", + "EVAL_FALSE", + "" + ], + [ + "1", + "IF VER ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VER non-functional" + ], + [ + "0", + "IF VERIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERIF ENDIF", + "", + "BAD_OPCODE", + "VERIF illegal everywhere" + ], + [ + "0", + "IF VERNOTIF ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "IF ELSE 1 ELSE VERNOTIF ENDIF", + "", + "BAD_OPCODE", + "VERNOTIF illegal everywhere" + ], + [ + "0", + "DUP IF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "DUP IF ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "IF 1 ELSE ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0", + "NOTIF ELSE 1 ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "IF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 0", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "IF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 1", + "NOTIF IF 1 ELSE 0 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "0 0", + "NOTIF IF 1 ELSE 0 ENDIF ELSE IF 0 ELSE 1 ENDIF ENDIF", + "", + "EVAL_FALSE" + ], + [ + "1", + "IF RETURN ELSE ELSE 1 ENDIF", + "", + "OP_RETURN", + "Multiple ELSEs" + ], + [ + "1", + "IF 1 ELSE ELSE RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "Malformed IF/ELSE/ENDIF sequence" + ], + [ + "1", + "ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "ENDIF ELSE IF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ENDIF ELSE ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "IF ELSE ELSE ENDIF ENDIF", + "", + "UNBALANCED_CONDITIONAL" + ], + [ + "1", + "RETURN", + "", + "OP_RETURN" + ], + [ + "1", + "DUP IF RETURN ENDIF", + "", + "OP_RETURN" + ], + [ + "1", + "RETURN 'data'", + "", + "OP_RETURN", + "canonical prunable txout format" + ], + [ + "0", + "VERIFY 1", + "", + "VERIFY" + ], + [ + "1", + "VERIFY", + "", + "EVAL_FALSE" + ], + [ + "1", + "VERIFY 0", + "", + "EVAL_FALSE" + ], + [ + "", + "IFDUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP DEPTH 0 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "DUP 1 ADD 2 EQUALVERIFY 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 0 NIP", + "", + "EVAL_FALSE" + ], + [ + "", + "OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "PICK 19 EQUALVERIFY DEPTH 2 EQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 PICK 20 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 PICK 21 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 PICK 22 EQUALVERIFY DEPTH 3 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "0 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "-1 ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "19 20 21", + "0 ROLL 20 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "1 ROLL 21 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "19 20 21", + "2 ROLL 22 EQUALVERIFY DEPTH 2 EQUAL", + "", + "EQUALVERIFY" + ], + [ + "", + "ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1 2 ROT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0 1", + "SWAP 1 EQUALVERIFY", + "", + "EQUALVERIFY" + ], + [ + "", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 0", + "TUCK DEPTH 3 EQUALVERIFY SWAP 2DROP", + "", + "EVAL_FALSE" + ], + [ + "", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 2", + "3DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2OVER 1 VERIFY DROP DROP DROP DROP TRUE", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2OVER 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2 3 2SWAP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "'a' 'b'", + "CAT", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'a' 'b' 0", + "IF CAT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "CAT disabled" + ], + [ + "'abc' 1 1", + "SUBSTR", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 1 1 0", + "IF SUBSTR ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "SUBSTR disabled" + ], + [ + "'abc' 2 0", + "IF LEFT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "LEFT disabled" + ], + [ + "'abc' 2 0", + "IF RIGHT ELSE 1 ENDIF", + "", + "DISABLED_OPCODE", + "RIGHT disabled" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOP", + "", + "EMPTY_STACK", + "Checks EMPTY_STACK error" + ], + [ + "'abc'", + "INVERT VERIFY TRUE", + "", + "DISABLED_OPCODE", + "INVERT disabled" + ], + [ + "1 2 0", + "IF AND ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "AND disabled" + ], + [ + "1 2 0", + "IF OR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "OR disabled" + ], + [ + "1 2 0", + "IF XOR ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "XOR disabled" + ], + [ + "2 0", + "IF 2MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2MUL disabled" + ], + [ + "2 0", + "IF 2DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "2DIV disabled" + ], + [ + "2 2 0", + "IF MUL ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MUL disabled" + ], + [ + "2 2 0", + "IF DIV ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "DIV disabled" + ], + [ + "2 2 0", + "IF MOD ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "MOD disabled" + ], + [ + "2 2 0", + "IF LSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "LSHIFT disabled" + ], + [ + "2 2 0", + "IF RSHIFT ELSE 1 ENDIF NOP", + "", + "DISABLED_OPCODE", + "RSHIFT disabled" + ], + [ + "", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are no stack items" + ], + [ + "0", + "EQUAL NOT", + "", + "INVALID_STACK_OPERATION", + "EQUAL must error when there are not 2 stack items" + ], + [ + "0 1", + "EQUAL", + "", + "EVAL_FALSE" + ], + [ + "1 1", + "ADD 0 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "11 1", + "ADD 12 SUB 11 EQUAL", + "", + "EVAL_FALSE" + ], + [ + "2147483648 0", + "ADD NOP", + "", + "UNKNOWN_ERROR", + "arithmetic operands must be in range [-2^31...2^31] " + ], + [ + "-2147483648 0", + "ADD NOP", + "", + "UNKNOWN_ERROR", + "arithmetic operands must be in range [-2^31...2^31] " + ], + [ + "2147483647", + "DUP ADD 4294967294 NUMEQUAL", + "", + "UNKNOWN_ERROR", + "NUMEQUAL must be in numeric range" + ], + [ + "'abcdef'", + "NOT 0 EQUAL", + "", + "UNKNOWN_ERROR", + "NOT is an arithmetic operand" + ], + [ + "2", + "DUP MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "DUP DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2MUL 4 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2", + "2DIV 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "7 3", + "MOD 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 2", + "LSHIFT 8 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "2 1", + "RSHIFT 1 EQUAL", + "", + "DISABLED_OPCODE", + "disabled" + ], + [ + "0x50", + "1", + "", + "BAD_OPCODE", + "opcode 0x50 is reserved" + ], + [ + "1", + "IF 0xb2 ELSE 1 ENDIF", + "", + "BAD_OPCODE", + "opcodes above OP_CHECKSEQUENCEVERIFY invalid if executed" + ], + [ + "1", + "IF 0xb3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xb9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xba ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xbb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xbc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xbd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xbe ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xbf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xc9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xca ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xce ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xcf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xd9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xda ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xde ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xdf ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xe9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xea ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xeb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xec ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xed ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xee ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xef ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf0 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf1 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf2 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf3 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf4 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf5 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf6 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf7 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf8 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xf9 ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfa ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfb ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfc ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfd ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xfe ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "1", + "IF 0xff ELSE 1 ENDIF", + "", + "BAD_OPCODE" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'", + "", + "PUSH_SIZE", + ">520 byte push" + ], + [ + "0", + "IF 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' ENDIF 1", + "", + "PUSH_SIZE", + ">520 byte push in non-executed IF branch" + ], + [ + "1", + "0x61616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "OP_COUNT", + ">201 opcodes executed. 0x61 is NOP" + ], + [ + "0", + "IF 0x6161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161 ENDIF 1", + "", + "OP_COUNT", + ">201 opcodes including non-executed IF branch. 0x61 is NOP" + ], + [ + "", + "1 2 3 4 5 6 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack size (0x6f is 3DUP)" + ], + [ + "", + "1 TOALTSTACK 2 TOALTSTACK 3 4 5 6 7 8 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f", + "", + "STACK_SIZE", + ">244 stack+altstack size" + ], + [ + "", + "0 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb' 0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f 2DUP 0x616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161", + "", + "SCRIPT_SIZE", + "10,001-byte scriptPubKey" + ], + [ + "1", + "VER", + "", + "BAD_OPCODE", + "OP_VER is reserved" + ], + [ + "1", + "VERIF", + "", + "BAD_OPCODE", + "OP_VERIF is reserved" + ], + [ + "1", + "VERNOTIF", + "", + "BAD_OPCODE", + "OP_VERNOTIF is reserved" + ], + [ + "1", + "RESERVED", + "", + "BAD_OPCODE", + "OP_RESERVED is reserved" + ], + [ + "1", + "RESERVED1", + "", + "BAD_OPCODE", + "OP_RESERVED1 is reserved" + ], + [ + "1", + "RESERVED2", + "", + "BAD_OPCODE", + "OP_RESERVED2 is reserved" + ], + [ + "1", + "0xb2", + "", + "BAD_OPCODE", + "0xb2 == OP_CHECKSEQUENCEVERIFY + 1" + ], + [ + "2147483648", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 5-byte integers" + ], + [ + "2147483648", + "NEGATE 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 5-byte integers" + ], + [ + "-2147483648", + "1ADD 1", + "", + "UNKNOWN_ERROR", + "Because we use a sign bit, -2147483648 is also 5 bytes" + ], + [ + "2147483647", + "1ADD 1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 5-byte integers, even if the result is 4-bytes" + ], + [ + "2147483648", + "1SUB 1", + "", + "UNKNOWN_ERROR", + "We cannot do math on 5-byte integers, even if the result is 4-bytes" + ], + [ + "2147483648 1", + "BOOLOR 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLOR on 5-byte integers (but we can still do IF etc)" + ], + [ + "2147483648 1", + "BOOLAND 1", + "", + "UNKNOWN_ERROR", + "We cannot do BOOLAND on 5-byte integers" + ], + [ + "1", + "1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "ENDIF without IF" + ], + [ + "1", + "IF 1", + "", + "UNBALANCED_CONDITIONAL", + "IF without ENDIF" + ], + [ + "", + "IF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "The following tests check the if(stack.size() < N) tests in each opcode" + ], + [ + "", + "NOTIF 1 ENDIF", + "", + "UNBALANCED_CONDITIONAL", + "They are here to catch copy-and-paste errors" + ], + [ + "", + "VERIFY 1", + "", + "INVALID_STACK_OPERATION", + "Most of them are duplicated elsewhere," + ], + [ + "", + "TOALTSTACK 1", + "", + "INVALID_STACK_OPERATION", + "but, hey, more is always better, right?" + ], + [ + "1", + "FROMALTSTACK", + "", + "INVALID_ALTSTACK_OPERATION" + ], + [ + "1", + "2DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "2DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "3DUP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 1 1", + "2ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1", + "2SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "IFDUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DROP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "DUP 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NIP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "OVER", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "PICK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "PICK 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1 1 3", + "ROLL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "0", + "ROLL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "ROT", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SWAP", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "TUCK", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SIZE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "EQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1ADD 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "1SUB 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NEGATE 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "ABS 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "NOT 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "0NOTEQUAL 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "ADD", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "SUB", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLAND", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "BOOLOR", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMEQUALVERIFY 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "NUMNOTEQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHAN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "LESSTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "GREATERTHANOREQUAL", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1", + "MAX", + "", + "INVALID_STACK_OPERATION" + ], + [ + "1 1", + "WITHIN", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "SHA256 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "", + "BLAKE2B 1", + "", + "INVALID_STACK_OPERATION" + ], + [ + "Increase CHECKSIG and CHECKMULTISIG negative test coverage" + ], + [ + "", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are no stack items" + ], + [ + "0", + "CHECKSIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKSIG must error when there are not 2 stack items" + ], + [ + "", + "CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are no stack items" + ], + [ + "", + "-1 CHECKMULTISIG NOT", + "", + "PUBKEY_COUNT", + "CHECKMULTISIG must error when the specified number of pubkeys is negative" + ], + [ + "", + "1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough pubkeys on the stack" + ], + [ + "", + "-1 0 CHECKMULTISIG NOT", + "", + "SIG_COUNT", + "CHECKMULTISIG must error when the specified number of signatures is negative" + ], + [ + "", + "1 'pk1' 1 CHECKMULTISIG NOT", + "", + "INVALID_STACK_OPERATION", + "CHECKMULTISIG must error when there are not enough signatures on the stack" + ], + [ + "", + "0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG 0 0 CHECKMULTISIG", + "", + "OP_COUNT", + "202 CHECKMULTISIGS, fails due to 201 op limit" + ], + [ + "", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIG", + "", + "OP_COUNT", + "Fails due to 201 script operation limit" + ], + [ + "1", + "NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP NOP 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY 0 0 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 20 CHECKMULTISIGVERIFY", + "", + "OP_COUNT", + "" + ], + [ + "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21", + "21 CHECKMULTISIG 1", + "", + "PUBKEY_COUNT", + "nPubKeys > 20" + ], + [ + "0 'sig' 1 0", + "CHECKMULTISIG 1", + "", + "SIG_COUNT", + "nSigs > nPubKeys" + ], + [ + "NOP 0x01 1", + "BLAKE2B 0x20 0xda1745e9b549bd0bfa1a569971c77eba30cd5a4b EQUAL", + "", + "SIG_PUSHONLY", + "Tests for Script.IsPushOnly()" + ], + [ + "0 0x01 0x50", + "BLAKE2B 0x20 0xece424a6bb6ddf4db592c0faed60685047a361b1 EQUAL", + "", + "BAD_OPCODE", + "OP_RESERVED in P2SH should fail" + ], + [ + "0 0x01", + "VER BLAKE2B 0x20 0x0f4d7845db968f2a81b530b6f3c1d6246d4c7e01 EQUAL", + "", + "BAD_OPCODE", + "OP_VER in P2SH should fail" + ], + [ + "0x00", + "'00' EQUAL", + "", + "EVAL_FALSE", + "Basic OP_0 execution" + ], + [ + "MINIMALDATA enforcement for PUSHDATAs" + ], + [ + "0x4c 0x00", + "DROP 1", + "", + "MINIMALDATA", + "Empty vector minimally represented by OP_0" + ], + [ + "0x01 0x81", + "DROP 1", + "", + "MINIMALDATA", + "-1 minimally represented by OP_1NEGATE" + ], + [ + "0x01 0x01", + "DROP 1", + "", + "MINIMALDATA", + "1 to 16 minimally represented by OP_1 to OP_16" + ], + [ + "0x01 0x02", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x03", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x04", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x05", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x06", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x07", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x08", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x09", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0a", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0b", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0c", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0d", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0e", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x0f", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x01 0x10", + "DROP 1", + "", + "MINIMALDATA" + ], + [ + "0x4c 0x48 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA1 of 72 bytes minimally represented by direct push" + ], + [ + "0x4d 0xFF00 0x111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA2 of 255 bytes minimally represented by PUSHDATA1" + ], + [ + "0x4e 0x00010000 0x11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "DROP 1", + "", + "MINIMALDATA", + "PUSHDATA4 of 256 bytes minimally represented by PUSHDATA2" + ], + [ + "MINIMALDATA enforcement for numeric arguments" + ], + [ + "0x01 0x00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x01 0x80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "0x80 (negative zero) numequals 0" + ], + [ + "0x02 0x0080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 0" + ], + [ + "0x02 0x0500", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x03 0x050000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals 5" + ], + [ + "0x02 0x0580", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0x050080", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "numequals -5" + ], + [ + "0x03 0xff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff" + ], + [ + "0x03 0xff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xff7f" + ], + [ + "0x04 0xffff7f80", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffffff" + ], + [ + "0x04 0xffff7f00", + "NOT DROP 1", + "", + "UNKNOWN_ERROR", + "Minimal encoding is 0xffff7f" + ], + [ + "Test every numeric-accepting opcode for correct handling of the numeric minimal encoding rule" + ], + [ + "1 0x02 0x0000", + "PICK DROP", + "", + "UNKNOWN_ERROR" + ], + [ + "1 0x02 0x0000", + "ROLL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "1SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NEGATE DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "ABS DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "NOT DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000", + "0NOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "ADD DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "SUB DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLAND DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "BOOLOR DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 1", + "NUMEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMEQUALVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "NUMNOTEQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHAN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "LESSTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "GREATERTHANOREQUAL DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "MAX DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000 0", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0 0x02 0x0000", + "WITHIN DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0 1", + "CHECKMULTISIG DROP 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0 0x02 0x0000", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "0x02 0x0000 0", + "CHECKMULTISIGVERIFY 1", + "", + "UNKNOWN_ERROR" + ], + [ + "Check MINIMALIF" + ], + [ + "2", + "IF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "2", + "NOTIF TRUE ELSE FALSE", + "", + "MINIMALIF" + ], + [ + "Order of CHECKMULTISIG evaluation tests, inverted by swapping the order of" + ], + [ + "pubkeys/signatures so they fail due to the STRICTENC rules on validly encoded" + ], + [ + "signatures and pubkeys." + ], + [ + "0x41 0x833682d4f60cc916a22a2c263e658fa662c49badb1e2a8c6208987bf99b1abd740498371480069e7a7a6e7471bf78c27bd9a1fd04fb212a92017346250ac187b01 0x41 0xea4a8d20562a950f4695dc24804565482e9fa111704886179d0c348f2b8a15fe691a305cd599c59c131677146661d5b98cb935330989a85f33afc70d0a21add101", + "2 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 0 2 CHECKMULTISIG NOT", + "", + "PUBKEYFORMAT", + "2-of-2 CHECKMULTISIG NOT with the first pubkey invalid, and both signatures validly encoded." + ], + [ + "CHECKSEQUENCEVERIFY tests" + ], + [ + "", + "CHECKSEQUENCEVERIFY", + "", + "INVALID_STACK_OPERATION", + "CSV automatically fails on a empty stack" + ], + [ + "0", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is set and the tx version < 2" + ], + [ + "4294967296", + "CHECKSEQUENCEVERIFY", + "", + "UNSATISFIED_LOCKTIME", + "CSV fails if stack top bit 1 << 31 is not set, and tx version < 2" + ], + [ + "NULLFAIL should cover all signatures and signatures only" + ], + [ + "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "OK", + "BIP66 and NULLFAIL-compliant" + ], + [ + "0x09 0x300602010102010101 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0", + "0x01 0x14 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0x01 0x14 CHECKMULTISIG NOT", + "", + "NULLFAIL", + "BIP66-compliant but not NULLFAIL-compliant 4" + ], + [ + "The End" + ] +] diff --git a/domain/consensus/utils/txscript/data/sighash.json b/domain/consensus/utils/txscript/data/sighash.json new file mode 100644 index 0000000..35f7f8a --- /dev/null +++ b/domain/consensus/utils/txscript/data/sighash.json @@ -0,0 +1,3505 @@ +[ + [ + "raw_transaction, script, input_index, hashType, signature_hash (result)" + ], + [ + "907c2bc503ade11cc3b04eb2918b6f547b0630ab569273824748c87ea14b0696526c66ba74020000000165fd1f9bdd4ef073c7afc4ae00da8a66f429c917a0081ad1e1dabce28d373eab81d8628de802000000076a525352000052ad042b5f25efb33beec9f3364e8a9139e8439d9d7e26529c3c30b6c3fd89f8684cfd68ea020000000753526500636a52599ac2fe02a526ed040000000008535300516352515164370e010000000003006300ab2ec229", + "", + 2, + 1864164639, + "31af167a6cf3f9d5f6875caa4d31704ceb0eba078d132b78dab52c3b8997317e" + ], + [ + "a0aa3126041621a6dea5b800141aa696daf28408959dfb2df96095db9fa425ad3f427f2f6103000000015360290e9c6063fa26912c2e7fb6a0ad80f1c5fea1771d42f12976092e7a85a4229fdb6e890000000000c109f6e47688ac0e4682988785744602b8c87228fcef0695085edf19088af1a9db126e93000000000665516aac536affffffff8fe53e0806e12dfd05d67ac68f4768fdbe23fc48ace22a5aa8ba04c96d58e2750300000007ac51ac635153650524aa680455ce7b000000000000499e50030000000008636a00ac526563ac5051ee030000000001acd2b6fe000000000003516563910fb6b5", + "65", + 0, + -1391424484, + "b74712825433040244a0fcb8aff24d98addeac56dc0581744faa97f95981c0c4" + ], + [ + "6e7e9d4b04ce17afa1e8546b627bb8d89a6a7fefd9d892ec8a192d79c2ceafc01694a6a7e7030000000953ac6a51006353636a33bced1544f797f08ceed02f108da22cd24c9e7809a446c61eb3895914508ac91f07053a01000000045163516affffffff11dc54eee8f9e4ff0bcf6b1a1a35b1cd10d63389571375501af7444073bcec3c02000000036a53514a821f0ce3956e235f71e4c69d91abe1e93fb703bd33039ac567249ed339bf0ba0883ef30000000008006365000065ac654bec3cc504bcf4990200000000036a52ac64eb060100000000076a6a5351650053bbbc130100000000036a6a53d6e1380100000000026a51c4e509b8", + "ac655151", + 0, + 479279909, + "7249457d31183868014df9f90fb86ebc439fc6d387c0951b5ac7d2b4ad6b222e" + ], + [ + "73107cbd025c22ebc8c3e0a47b2a760739216a528de8d4dab5d45cbeb3051cebae73b01ca102000000066353656a636affffffffe26816dffc670841e6a6c8c61c586da401df1261a330a6c6b3dd9f9a0789bc9e000000000800ac6552ac6aac51ffffffff0174a8f0010000000004ac52515100000000", + "5163ac63635151ac", + 1, + 1190874345, + "06e328de263a87b09beabe222a21627a6ea5c7f560030da31610c4611f4a46bc" + ], + [ + "e93bbf6902be872933cb987fc26ba0f914fcfc2f6ce555258554dd9939d12032a8536c8802030000000453ac5353eabb6451e074e6fef9de211347d6a45900ea5aaf2636ef7967f565dce66fa451805c5cd10000000003525253ffffffff047dc3e6020000000006516565ac656aec9eea010000000001633e46e600000000000015080a03000000000000000000", + "5300ac6a536a", + 1, + -886562767, + "13b4c982c2e4943a4e5a2174ad3367878e599b3aea889effeb3d19db6daadc80" + ], + [ + "50818f4c01b464538b1e7e7f5ae4ed96ad23c68c830e78da9a845bc19b5c3b0b20bb82e5e9030000000763526a63655352ffffffff023b3f9c040000000008630051516a6a5163a83caf0100000000045365510000000000", + "6aac", + 0, + 946795545, + "5742901d59e51c1957fd30f907083609eb2d009bd6cb3a3a8c65517d33185a0c" + ], + [ + "a93e93440250f97012d466a6cc24839f572def241c814fe6ae94442cf58ea33eb0fdd9bcc1030000000600636a0065acffffffff5dee3a6e7e5ad6310dea3e5b3ddda1a56bf8de7d3b75889fc024b5e233ec10f80300000006ac5363525353ffffffff0160468b04000000000800526a5300ac526a00000000", + "ac00636a53", + 1, + 1773442520, + "5c9d3a2ce9365bb72cfabbaa4579c843bb8abf200944612cf8ae4b56a908bcbd" + ], + [ + "ce7d371f0476dda8b811d4bf3b64d5f86204725deeaa3937861869d5b2766ea7d17c57e40b0100000003535265ffffffff7e7e9188f76c34a46d0bbe856bde5cb32f089a07a70ea96e15e92abb37e479a101000000046552655225bcab06d1c2896709f364b1e372814d842c9c671356a1aa5ca4e060462c65ae55acc02d0000000005ac0063ac5281b33e332f96beebdbc6a379ebe6aea36af115c067461eb99d22ba1afbf59462b59ae0bd0200000003635365be15c23801724a1704000000000965006a65ac00000052ca555572", + "53530051", + 1, + 2030598449, + "c336b2f7d3702fbbdeffc014d106c69e3413c7c71e436ba7562d8a7a2871f181" + ], + [ + "d3b7421e011f4de0f1cea9ba7458bf3486bee722519efab711a963fa8c100970cf7488b7bb0200000003525352dcd61b300148be5d05000000000000000000", + "535251536aac536a", + 0, + -1960128125, + "29aa6d2d752d3310eba20442770ad345b7f6a35f96161ede5f07b33e92053e2a" + ], + [ + "04bac8c5033460235919a9c63c42b2db884c7c8f2ed8fcd69ff683a0a2cccd9796346a04050200000003655351fcad3a2c5a7cbadeb4ec7acc9836c3f5c3e776e5c566220f7f965cf194f8ef98efb5e3530200000007526a006552526526a2f55ba5f69699ece76692552b399ba908301907c5763d28a15b08581b23179cb01eac030000000653636a516351073942c2025aa98a05000000000565006aac65d7ffa6030000000004516a655200000000", + "53ac6365ac526a", + 1, + 764174870, + "f41e06c31cfcb5ee5b8e907fe8956af7891b1a61c05a7e81bd3f7e59eb4b8d91" + ], + [ + "c363a70c01ab174230bbe4afe0c3efa2d7f2feaf179431359adedccf30d1f69efe0c86ed39020000000151558648fe0231318b04000000000151662170000000000008ac5300006a63acac00000000", + "", + 0, + 2146479410, + "191ab180b0d753763671717d051f138d4866b7cb0d1d4811472e64de595d2c70" + ], + [ + "8d437a7304d8772210a923fd81187c425fc28c17a5052571501db05c7e89b11448b36618cd02000000026a6340fec14ad2c9298fde1477f1e8325e5747b61b7e2ff2a549f3d132689560ab6c45dd43c3010000000963ac00ac000051516a447ed907a7efffebeb103988bf5f947fc688aab2c6a7914f48238cf92c337fad4a79348102000000085352ac526a5152517436edf2d80e3ef06725227c970a816b25d0b58d2cd3c187a7af2cea66d6b27ba69bf33a0300000006000063526553f3f0d6140386815d0300000000026300de138f00000000000800525153515265ac1f87040300000000036aac6500000000", + "51", + 3, + -315779667, + "44d3386a47c4659b83316fc71735d0813fe3df6a336fd4a2665ff6510dc4b4fd" + ], + [ + "fd878840031e82fdbe1ad1d745d1185622b0060ac56638290ec4f66b1beef4450817114a2c0000000007516a6353650051ffffffff37b7a10322b5418bfd64fb09cd8a27ddf57731aeb1f1f920ffde7cb2dfb6cdb70300000008536a5365ac53515369ecc034f1594690dbe189094dc816d6d57ea75917de764cbf8eccce4632cbabe7e116cd0100000003515352ffffffff035777fc000000000003515200abe9140300000000050063005165bed6d1020000000006630053636365195e9110", + "635265", + 0, + 1729787658, + "b03c53df11ccf92ce40561f43170935d81acdc31e843fa6e964536c9b12583a7" + ], + [ + "f40a750702af06efff3ea68e5d56e42bc41cdb8b6065c98f1221fe04a325a898cb61f3d7ee030000000363acacffffffffb5788174aef79788716f96af779d7959147a0c2e0e5bfb6c2dba2df5b4b97894030000000965510065535163ac6affffffff0445e6fd0200000000096aac536365526a526aa6546b000000000007ac656a6552535141a0fd010000000000c897ea030000000007526500526a6a631b39dba3", + "005163ac", + 1, + -1778064747, + "cb5ff8056a9ee628c3a37bdeb46654924eed844f267121c8a713cb933325bb54" + ], + [ + "a63bc673049c75211aa2c09ecc38e360eaa571435fedd2af1116b5c1fa3d0629c269ecccbf0000000007ac65516352ac52ffffffffbf1a76fdda7f451a5f0baff0f9ccd0fe9136444c094bb8c544b1af0fa2774b06010000000463535253ffffffff13d6b7c3ddceef255d680d87181e100864eeb11a5bb6a3528cb0d70d7ee2bbbc02000000036a0052951241809623313b198bb520645c15ec96bfcc74a2b0f3db7ad61d455cc32db04afc5cc702000000016309c9ae25014d94730200000000026aac3bb1e803", + "", + 3, + -232881718, + "85eccb602ecba5358916c37f1fe4424a27bd543b591ff0f253117875277dacbd" + ], + [ + "4c565efe04e7d32bac03ae358d63140c1cfe95de15e30c5b84f31bb0b65bb542d637f49e0f010000000351536348ae32b31c7d3132030a510a1b1aacf7b7c3f19ce8dc49944ef93e5fa5fe2d356b4a73a00100000007ac635163ac00514c8bc57b6b844e04555c0a4f4fb426df139475cd2396ae418bc7015820e852f711519bc202000000076a00510000ac52488ff4aec72cbcfcc98759c58e20a8d2d9725aa4a80f83964e69bc4e793a4ff25cd75dc701000000086a52ac6aac5351532ec6b10802463e0200000000000553005265523e08680100000000002f39a6b0", + "", + 3, + 70712784, + "c6076b6a45e6fcfba14d3df47a34f6aadbacfba107e95621d8d7c9c0e40518ed" + ], + [ + "1233d5e703403b3b8b4dae84510ddfc126b4838dcb47d3b23df815c0b3a07b55bf3098110e010000000163c5c55528041f480f40cf68a8762d6ed3efe2bd402795d5233e5d94bf5ddee71665144898030000000965525165655151656affffffff6381667e78bb74d0880625993bec0ea3bd41396f2bcccc3cc097b240e5e92d6a01000000096363acac6a63536365ffffffff04610ad602000000000452516552e90d680200000000046351516ae30e9801000000000652520063656a671856010000000004ac6aac514c84e383", + "6a636300", + 1, + -114996813, + "544e8fa044e9dd49d7504be895038472026389a641108c80dfaa4bf5745e2536" + ], + [ + "0c69702103b25ceaed43122cc2672de84a3b9aa49872f2a5bb458e19a52f8cc75973abb9f102000000055365656aacffffffff3ffb1cf0f76d9e3397de0942038c856b0ebbea355dc9d8f2b06036e19044b0450100000000ffffffff4b7793f4169617c54b734f2cd905ed65f1ce3d396ecd15b6c426a677186ca0620200000008655263526551006a181a25b703240cce010000000003635253dee22903000000000865526a6a516a51005e12160200000000055252ac655200000000", + "6a516a63", + 1, + -2040012771, + "591e3bbba963126bd5f4f10b264e1696dfe5e2b40a90991127ad581ca6ab02d9" + ], + [ + "fd22692802db8ae6ab095aeae3867305a954278f7c076c542f0344b2591789e7e33e4d29f4020000000151ffffffffb9409129cfed9d3226f3b6bab7a2c83f99f48d039100eeb5796f00903b0e5e5e0100000005656552ac63d226abac0403e64900000000000551ac5100ac8035f10000000000095165006a63526a52510d42db030000000006635365ac6a6324ef59010000000003536a0000000000", + "536a52516aac6a", + 1, + 309309168, + "ecd699538b26e25365c8ab23c991f630bb221f2b325b3adc5d67aea8838f3d9a" + ], + [ + "a43f85f701ffa54a3cc57177510f3ea28ecb6db0d4431fc79171cad708a6054f6e5b4f89170000000008ac6a006a536551652bebeaa2013e779c05000000000665ac5363635100000000", + "ac", + 0, + 2028978692, + "58294f0d7f2e68fe1fd30c01764fe1619bcc7961d68968944a0e263af6550437" + ], + [ + "c2b0b99001acfecf7da736de0ffaef8134a9676811602a6299ba5a2563a23bb09e8cbedf9300000000026300ffffffff042997c50300000000045252536a2724370300000000066553536363ac6637520300000000016a6d5c900000000000046a6a526500000000", + "52ac525163515251", + 0, + -894181723, + "66b68d1cc93f59f362a2e0406b2ea86b33125e145e07ddb66129ca63d87b9b60" + ], + [ + "82f9f10304c17a9d954cf3380db817814a8c738d2c811f0412284b2c791ec75515f38c4f8c0200000001655729ca7db1b79abee66c8a757221f29280d0681355cb522149525f36da760548dbd7080a0100000001510b477bd9ce9ad5bb81c0306273a3a7d051e053f04ecf3a1dbeda543e20601a5755c0cfae030000000451ac656affffffff71141a04134f6c292c2e0d415e6705dfd8dcee892b0d0807828d5aeb7d11f5ef0300000001520b6c6dc802a6f3dd0000000000046a515163bfb6800300000000015300000000", + "", + 3, + -635779440, + "cb7c2a8e3ebbb54a41e4bae79a06183022bf018199eb7772cc0c357c5018892f" + ], + [ + "8edcf5a1014b604e53f0d12fe143cf4284f86dc79a634a9f17d7e9f8725f7beb95e8ffcd2403000000036aac52ffffffff01c402b50400000000046a63525100000000", + "6351525251ac6a", + 0, + 1520147826, + "6f1ebdd03eceef3201911027eeeec1a7ca696cb5fe56af6e9ce3690e9bebcafd" + ], + [ + "2074bad5011847f14df5ea7b4afd80cd56b02b99634893c6e3d5aaad41ca7c8ee8e5098df003000000026a6affffffff018ad59700000000000900ac656a526551635300000000", + "65635265", + 0, + -1804671183, + "663c999a52288c9999bff36c9da2f8b78d5c61b8347538f76c164ccba9868d0a" + ], + [ + "7100b11302e554d4ef249ee416e7510a485e43b2ba4b8812d8fe5529fe33ea75f36d392c4403000000020000ffffffff3d01a37e075e9a7715a657ae1bdf1e44b46e236ad16fd2f4c74eb9bf370368810000000007636553ac536365ffffffff01db696a0400000000065200ac656aac00000000", + "63005151", + 0, + -1210499507, + "b9c3aee8515a4a3b439de1ffc9c156824bda12cb75bfe5bc863164e8fd31bd7a" + ], + [ + "02c1017802091d1cb08fec512db7b012fe4220d57a5f15f9e7676358b012786e1209bcff950100000003ac6352ffffffff799bc282724a970a6fea1828984d0aeb0f16b67776fa213cbdc4838a2f1961a3010000000751516a5365526affffffff016c7b4b03000000000765ac5253ac5352b70195ad", + "65655200516a", + 0, + -241626954, + "53756e3d2ba9173c6688f69eed52c0af5acda26c2b075791774b962bb7c510aa" + ], + [ + "cb3178520136cd294568b83bb2520f78fecc507898f4a2db2674560d72fd69b9858f75b3b502000000066aac00515100ffffffff03ab005a01000000000563526363006e3836030000000000fbda32000000000005650065006500000000", + "516a0063006a5300", + 0, + 1182109299, + "9a97d60d09aaae9887640a41ae35eb6e37d28a025ee1168afb78c2f3c03a2891" + ], + [ + "18a4b0c004702cf0e39686ac98aab78ad788308f1d484b1ddfe70dc1997148ba0e28515c310300000000ffffffff05275a52a23c59da91129093364e275da5616c4070d8a05b96df5a2080ef259500000000086aac51656a6aac5366e64966b3b36a07dd2bb40242dd4a3743d3026e7e1e0d9e9e18f11d068464b989661321030000000265ac383339c4fae63379cafb63b0bab2eca70e1f5fc7d857eb5c88ccd6c0465093924bba8b2a000000000300636ab5e0545402bc2c4c010000000000cd41c002000000000000000000", + "ac635253656a00", + 3, + 2052372230, + "32db877b6b1ca556c9e859442329406f0f8246706522369839979a9f7a235a32" + ], + [ + "1d9c5df20139904c582285e1ea63dec934251c0f9cf5c47e86abfb2b394ebc57417a81f67c010000000353515222ba722504800d3402000000000353656a3c0b4a0200000000000fb8d20500000000066300005200516462f30400000000015200000000", + "65", + 0, + -210854112, + "ee638e53d2ffa8bd15bb833ee5046bb229508506d2a13817a62ec35a93d5e3f6" + ], + [ + "4504cb1904c7a4acf375ddae431a74de72d5436efc73312cf8e9921f431267ea6852f9714a01000000066a656a656553a2fbd587c098b3a1c5bd1d6480f730a0d6d9b537966e20efc0e352d971576d0f87df0d6d01000000016321aeec3c4dcc819f1290edb463a737118f39ab5765800547522708c425306ebfca3f396603000000055300ac656a1d09281d05bfac57b5eb17eb3fa81ffcedfbcd3a917f1be0985c944d473d2c34d245eb350300000007656a51525152ac263078d9032f470f0500000000066aac00000052e12da60200000000003488410200000000066365006300539981e432", + "52536a52526a", + 1, + -31909119, + "c7391b817e7c725f54f1a7f19d27bf014b86f83eb900d6c91a0c9c31133919e9" + ], + [ + "14bc7c3e03322ec0f1311f4327e93059c996275302554473104f3f7b46ca179bfac9ef753503000000016affffffff9d405eaeffa1ca54d9a05441a296e5cc3a3e32bb8307afaf167f7b57190b07e0030000000351526345533aa242c61bca90dd15d46079a0ab0841d85df67b29ba87f2393cd764a6997c372b55030000000452005263ffffffff0250f40e02000000000651516a0063630e95ab0000000000046a5151ac00000000", + "6a65005151", + 0, + -1460947095, + "aa418d096929394c9147be8818d8c9dafe6d105945ab9cd7ec682df537b5dd79" + ], + [ + "2b3bd0dd04a1832f893bf49a776cd567ec4b43945934f4786b615d6cb850dfc0349b33301a000000000565ac000051cf80c670f6ddafab63411adb4d91a69c11d9ac588898cbfb4cb16061821cc104325c895103000000025163ffffffffa9e2d7506d2d7d53b882bd377bbcc941f7a0f23fd15d2edbef3cd9df8a4c39d10200000009ac63006a52526a5265ffffffff44c099cdf10b10ce87d4b38658d002fd6ea17ae4a970053c05401d86d6e75f9900000000076353526a525263ffffffff035af69c01000000000100ba9b8b0400000000004cead10500000000026a520b77d667", + "52ac526553", + 3, + -1955078165, + "eb9ceecc3b401224cb79a44d23aa8f428e29f1405daf69b4e01910b848ef1523" + ], + [ + "35df11f004a48ba439aba878fe9df20cc935b4a761c262b1b707e6f2b33e2bb7565cd68b130000000000ffffffffb2a2f99abf64163bb57ca900500b863f40c02632dfd9ea2590854c5fb4811da90200000006ac006363636affffffffaf9d89b2a8d2670ca37c8f7c140600b81259f2e037cb4590578ec6e37af8bf200000000004ac6a655270a4751eb551f058a93301ffeda2e252b6614a1fdd0e283e1d9fe53c96c5bbaafaac57b8030000000153ffffffff020d9f3b02000000000100ed7008030000000003ac000000000000", + "ac", + 3, + 593793071, + "57037535cf3b9bcf7f4fa9f9d48c2d89c7f5c09aeac29ae3fa49ad799b88e5e8" + ], + [ + "a08ff466049fb7619e25502ec22fedfb229eaa1fe275aa0b5a23154b318441bf547989d05100000000045363636affffffff2b0e335cb5383886751cdbd993dc0720817745a6b1c9b8ab3d15547fc9aafd03000000000965656a536a52656a532b53d10584c290d3ac1ab74ab0a19201a4a039cb59dc58719821c024f6bf2eb26322b33f010000000865ac6aac00536353ffffffff048decba6ebbd2db81e416e39dde1f821ba69329725e702bcdea20c5cc0ecc64020000000763635351ac6551466e377b0468c0fa0000000000055153ac6a513461c6010000000008636a636365535100eeb3dc010000000006526a52ac516a43f362010000000005000063536500000000", + "0063516a", + 1, + -1158911348, + "90b014f63f99e2ab93bb7132f5b717bedceda3c5bf71be7c41d51f4e0b288971" + ], + [ + "5ac2f17d03bc902e2bac2469907ec7d01a62b5729340bc58c343b7145b66e6b97d434b30fa000000000163ffffffff44028aa674192caa0d0b4ebfeb969c284cb16b80c312d096efd80c6c6b094cca000000000663acac516a52ffffffff10c809106e04b10f9b43085855521270fb48ab579266e7474657c6c625062d2d030000000351636595a0a97004a1b69603000000000365005352ad68010000000008636a5263acac5100da7105010000000001ac90325200000000000000000000", + "6a6a516a63526353", + 2, + 1518400956, + "9821dc391a0efc812c783477fd94b696c3c12d4207ec73866f05411cc444ea51" + ], + [ + "aeb2e11902dc3770c218b97f0b1960d6ee70459ecb6a95eff3f05295dc1ef4a0884f10ba460300000005516352526393e9b1b3e6ae834102d699ddd3845a1e159aa7cf7635edb5c02003f7830fee3788b795f20100000008006a526553ac006ad8809c570469290e040000000003000000b10fd5040000000005655263ac53630b180300000000009d9993040000000002516300000000", + "5351ac6a65", + 0, + 1084852870, + "7e5d1656f7f92f7cd3a759e3c661f16c2b49b06ca53198d3130b598e557c6c2e" + ], + [ + "9860ca9a0294ff4812534def8c3a3e3db35b817e1a2ddb7f0bf673f70eab71bb79e90a2f3100000000086a636551acac5165ffffffffed4d6d3cd9ff9b2d490e0c089739121161a1445844c3e204296816ab06e0a83702000000035100ac88d0db5201c3b59a050000000004ac6a005100000000", + "535263006a526a", + 1, + -962088116, + "2c9b60742e1aa409973c70e0ba6a4ce0dcdb350125002f98bdee95da25ee2c56" + ], + [ + "4ddaa680026ec4d8060640304b86823f1ac760c260cef81d85bd847952863d629a3002b54b0200000007526365636a656a65457861fc6c24bdc760c8b2e906b6656edaf9ed22b5f50e1fb29ec076ceadd9e8ebcb6b000000000152ffffffff033ff04f00000000000551526a00657a1d900300000000002153af040000000003006a6300000000", + "526a53ac", + 0, + 1055317633, + "7f21b62267ed52462e371a917eb3542569a4049b9dfca2de3c75872b39510b26" + ], + [ + "01e76dcd02ad54cbc8c71d68eaf3fa7c883b65d74217b30ba81f1f5144ef80b706c0dc82ca0000000002526a078ec18bcd0514825feced2e8b8ea1ccb34429fae41c70cc0b73a2799e85603613c68700020000000763636365536a53ffffffff043acea90000000000016ad20e1803000000000100fa00830200000000056352515351e864ee000000000007655352536a6551d0c46672", + "6a6365ac", + 0, + -1420559003, + "769129ee13526557a2ff8ed9143a781b6c409291e05d2f0afafbe68f39469dca" + ], + [ + "fa00b26402670b97906203434aa967ce1559d9bd097d56dbe760469e6032e7ab61accb54160100000006635163630052fffffffffe0d3f4f0f808fd9cfb162e9f0c004601acf725cd7ea5683bbdc9a9a433ef15a020000000452536563d09c7bef049040f305000000000153a7c7b9020000000003ac6352847a250300000000045300655390ed8001000000000400655352860671d4", + "53656552", + 0, + 799022412, + "4137db1db2a52bf9b7d3f65d0ef847932b48601a9badb57cac403313793a6741" + ], + [ + "cb5c06dc01b022ee6105ba410f0eb12b9ce5b5aa185b28532492d839a10cef33d06134b91b010000000153ffffffff02cec0530400000000005e1e4504000000000865656551acacac6a00000000", + "53", + 0, + -1514251329, + "136beb95459fe6b126cd6cefd54eb5d971524b0e883e41a292a78f78015cb8d5" + ], + [ + "f10a0356031cd569d652dbca8e7a4d36c8da33cdff428d003338602b7764fe2c96c505175b010000000465ac516affffffffbb54563c71136fa944ee20452d78dc87073ac2365ba07e638dce29a5d179da600000000003635152ffffffff9a411d8e2d421b1e6085540ee2809901e590940bbb41532fa38bd7a16b68cc350100000007535251635365636195df1603b61c4501000000000165bf6a310400000000026352fcbba10200000000016aa30b7ff0", + "5351", + 0, + 1552495929, + "e4bbbeaa67cfd27f435e1b0f17ffe37707d1be50d933d06dbbbfd1a731c90265" + ], + [ + "c3325c9b012f659466626ca8f3c61dfd36f34670abc054476b7516a1839ec43cd0870aa0c0000000000753525265005351e7e3f04b0112650500000000000363ac6300000000", + "acac", + 0, + -68961433, + "5ca70e727d91b1a42b78488af2ed551642c32d3de4712a51679f60f1456a8647" + ], + [ + "2333e54c044370a8af16b9750ac949b151522ea6029bacc9a34261599549581c7b4e5ece470000000006510052006563ffffffff80630fc0155c750ce20d0ca4a3d0c8e8d83b014a5b40f0b0be0dd4c63ac28126020000000465000000ffffffff1b5f1433d38cdc494093bb1d62d84b10abbdae57e3d04e82e600857ab3b1dc990300000003515100b76564be13e4890a908ea7508afdad92ec1b200a9a67939fadce6eb7a29eb4550a0a28cb0300000001acffffffff02926c930300000000016373800201000000000153d27ee740", + "6365516a53", + 3, + 598653797, + "2be27a686eb7940dd32c44ff3a97c1b28feb7ab9c5c0b1593b2d762361cfc2db" + ], + [ + "b500ca48011ec57c2e5252e5da6432089130603245ffbafb0e4c5ffe6090feb629207eeb0e0100000004526a636a8302c9d2042b44f40500000000015278c05a050000000004ac5251524be080020000000007636aac63ac5252c93a9a040000000007656553636a5352d91f9ddb", + "52005100", + 0, + -2024394677, + "a1c7d6660d3382d6c93bd2c46b12eafb6513ec4be9a5a1489d4383524f1edcf9" + ], + [ + "f52ff64b02ee91adb01f3936cc42e41e1672778962b68cf013293d649536b519bc3271dd2c00000000020065afee11313784849a7c15f44a61cd5fd51ccfcdae707e5896d131b082dc9322a19e12858501000000036aac654e8ca882022deb7c020000000005006a515352d3defc0000000000016300000000", + "63520063", + 0, + 1130989496, + "ec2f130931c2d4acdf04556cbe9bab0c7297c410d010918a128640997de843af" + ], + [ + "ab7d6f36027a7adc36a5cf7528fe4fb5d94b2c96803a4b38a83a675d7806dda62b380df86a0000000003000000ffffffff5bc00131e29e22057c04be854794b4877dda42e416a7a24706b802ff9da521b20000000007ac6a0065ac52ac957cf45501b9f06501000000000400ac636325f1110b", + "00526500536a635253", + 0, + 911316637, + "1797caf6329f3d52afac7a58d81bc3228ae3d2fdec25c0bc43d84eec1003183c" + ], + [ + "f940888f023dce6360263c850372eb145b864228fdbbb4c1186174fa83aab890ff38f8c9a90300000000ffffffff01e80ccdb081e7bbae1c776531adcbfb77f2e5a7d0e5d0d0e2e6c8758470e85f00000000020053ffffffff03b49088050000000003656a52428bd60400000000085163006563ac636a0cbacf0400000000070063ac5265ac53d6e16604", + "ac63", + 0, + 39900215, + "96c6cca56df96cf63f98859d9be6fcedd2f97f25567c4e7e719869c96fd2cb17" + ], + [ + "530ecd0b01ec302d97ef6f1b5a6420b9a239714013e20d39aa3789d191ef623fc215aa8b940200000004ac53516a3823ab8202572eaa040000000006526a51526563fd8a270100000000036a006581a798f0", + "525153656a0063", + 0, + 1784562684, + "d7727f208a685cea80c63c66ac7613a3f0bf5081a6083950b2e022091e999a6b" + ], + [ + "5d781d9303acfcce964f50865ddfddab527ea971aee91234c88e184979985c00b4de15204b01000000026352a009c8ab01f93c8ef2447386c434b4498538f061845862c3f9d5751ad0fce52af442b3a902000000025165b909c66b5a3e7c81b3c45396b944be13b8aacfc0204f3f3c105a66fa8fa6402f1b5efddb01000000086a65ac636aac656ac3c677c402b79fa4050000000003006a5133e358020000000005516351630078c2e025", + "6aac51636a6a005265", + 0, + -882306874, + "2257287674ad60f1a3537aab37ad8a9b14de255e6f8dd6ce65ce0ff2f69c8d04" + ], + [ + "25ee54ef0187387564bb86e0af96baec54289ca8d15e81a507a2ed6668dc92683111dfb7a50100000004005263634cecf17d0429aa4d000000000005636a6a5263daa756010000000001514df70a01000000000151980a890400000000065253ac6a006377fd24e3", + "65", + 0, + 797877378, + "069f38fd5d47abff46f04ee3ae27db03275e9aa4737fa0d2f5394779f9654845" + ], + [ + "a9c57b1a018551bcbc781b256642532bbc09967f1cbe30a227d352a19365d219d3f11649a3030000000451655352b14094220318289403000000000400ac6a654add350400000000003d379505000000000453acac00e1739d36", + "5363", + 0, + -1069721025, + "067e85b25142810ca22954698263e485b1ee3e7fc8542dc90c4ac9cdf9370530" + ], + [ + "05c4fb94040f5119dc0b10aa9df054871ed23c98c890f1e931a98ffb0683dac45e98619fdc0200000006ac6a525263513e7495651c9794c4d60da835d303eb4ee6e871f8292f6ad0b32e85ef08c9dc7aa4e03c9c01000000040052acacfffffffffee953259cf14ced323fe8d567e4c57ba331021a1ef5ac2fa90f7789340d7c550100000007ac6aacac6a6a53ffffffff08d9dc820d00f18998af247319f9de5c0bbd52a475ea587f16101af3afab7c210100000003535363569bca7c0468e34f00000000000863536353ac51ac6584e3190100000000056500526a533debea030000000003ac0053ee7070020000000006ac52005253ac00000000", + "6351005253", + 2, + 1386916157, + "59d1078fc84ae1d50363c10b0052e39900c250d5b2ab47c5201bcbbe887083a6" + ], + [ + "c95ab19104b63986d7303f4363ca8f5d2fa87c21e3c5d462b99f1ebcb7c402fc012f5034780000000009006aac63ac65655265ffffffffbe91afa68af40a8700fd579c86d4b706c24e47f7379dad6133de389f815ef7f501000000036aac00ffffffff1520db0d81be4c631878494668d258369f30b8f2b7a71e257764e9a27f24b48701000000076a515100535300b0a989e1164db9499845bac01d07a3a7d6d2c2a76e4c04abe68f808b6e2ef5068ce6540e0100000008ac53636a6365656affffffff0309aac60500000000046563656a6067e8020000000003ac536aec91c803000000000865525165ac6a53acc7a45bc5", + "63526a65ac", + 1, + 512079270, + "7b43923888bcb249b559b3767057664aa9bfab4dd42351c143bbb85cdc3a8035" + ], + [ + "ca66ae10049533c2b39f1449791bd6d3f039efe0a121ab7339d39ef05d6dcb200ec3fb2b3b020000000465006a53ffffffff534b8f97f15cc7fb4f4cea9bf798472dc93135cd5b809e4ca7fe4617a61895980100000000ddd83c1dc96f640929dd5e6f1151dab1aa669128591f153310d3993e562cc7725b6ae3d903000000046a52536582f8ccddb8086d8550f09128029e1782c3f2624419abdeaf74ecb24889cc45ac1a64492a0100000002516a4867b41502ee6ccf03000000000552acac526a4b7ba800000000000651510052536300000000", + "6553", + 2, + -62969257, + "cf83f09721753dfe8da695ddcd2ae11028aebef4aa8f57c8d69437ac34ed2069" + ], + [ + "ba646d0b0453999f0c70cb0430d4cab0e2120457bb9128ed002b6e9500e9c7f8d7baa20abe0200000001652a4e42935b21db02b56bf6f08ef4be5adb13c38bc6a0c3187ed7f6197607ba6a2c47bc8a03000000040052516affffffffa55c3cbfc19b1667594ac8681ba5d159514b623d08ed4697f56ce8fcd9ca5b0b00000000086a6a5263ac65526366728c2720fdeabdfdf8d9fb2bfe88b295d3b87590e26a1e456bad5991964165f888c03a0200000006630051ac00acffffffff0176fafe0100000000070063acac65515200000000", + "63", + 1, + 2002322280, + "9db4e320208185ee70edb4764ee195deca00ba46412d5527d9700c1cf1c3d057" + ], + [ + "2ddb8f84039f983b45f64a7a79b74ff939e3b598b38f436def7edd57282d0803c7ef34968d02000000026a537eb00c4187de96e6e397c05f11915270bcc383959877868ba93bac417d9f6ed9f627a7930300000003516551ffffffffacc12f1bb67be3ae9f1d43e55fda8b885340a0df1175392a8bbd9f959ad3605003000000025163ffffffff02ff0f4700000000000070bd99040000000002ac53f8440b42", + "", + 2, + -393923011, + "c8798ae08d2cf6af56eb468f3c040be24866af35e03d2ea6d92d3b2aa735bc21" + ], + [ + "b21fc15403b4bdaa994204444b59323a7b8714dd471bd7f975a4e4b7b48787e720cbd1f5f00000000000ffffffff311533001cb85c98c1d58de0a5fbf27684a69af850d52e22197b0dc941bc6ca903000000056563635351a8ae2c2c7141ece9a4ff75c43b7ea9d94ec79b7e28f63e015ac584d984a526a73fe1e04e0100000007526352536a5365ffffffff02a0a9ea03000000000152cfc4f300000000000465525253e8e0f342", + "000000", + 1, + 1305253970, + "b3c229c947c27b41cece5549b8fba359a14ef7b06f81fa00532e75b18121361d" + ], + [ + "d1704d6601acf710b19fa753e307cfcee2735eada0d982b5df768573df690f460281aad12d0000000007656300005100acffffffff0232205505000000000251632ca1bc0300000000016300000000", + "ac656551", + 0, + 165179664, + "63fcd4f003a59d705a1ec2a2579f813c4c42bbf5f1aa5d3386503461c94ef884" + ], + [ + "d2f6c096025cc909952c2400bd83ac3d532bfa8a1f8f3e73c69b1fd7b8913379793f3ce92202000000066a006a53516ade5332d81d58b22ed47b2a249ab3a2cb3a6ce9a6b5a6810e18e3e1283c1a1b3bd73e3ab00300000001acffffffff01a9b2d4050000000003635200dc4b7f69", + "0065", + 0, + -78019184, + "3c5350ae0557040f042a1467ee9e667dba1c38c4aee2d6104e91348856c0b449" + ], + [ + "3e6db1a1019444dba461247224ad5933c997256d15c5d37ade3d700506a0ba0a57824930d7010000000652650000ac00ffffffff03389242020000000000a8465a0200000000076a6a636a510052394e6003000000000953ac51526351000053d21d9800", + "ac5365", + 0, + 1643661850, + "7a55a214a6c62555854cddbd278cbf0b249ea9e6a4821a37d363340c2b00bee5" + ], + [ + "f821a042036ad43634d29913b77c0fc87b4af593ac86e9a816a9d83fd18dfcfc84e1e1d57102000000076a63ac52006351ffffffffbcdaf490fc75086109e2f832c8985716b3a624a422cf9412fe6227c10585d212030000000752525352ac526affffffff2efed01a4b73ad46c7f7bc7fa3bc480f8e32d741252f389eaca889a2e9d2007e000000000353ac53ffffffff032ac8b3020000000009636300000063516300d3d9f2040000000006510065ac656aafa5de00000000000563525300ac9042b57d", + "525365", + 1, + 667065611, + "4055f568c1e384f78f54a55e1b423246874d6cd69a973e0e8e16df9054f40404" + ], + [ + "58e3f0f704a186ef55d3919061459910df5406a9121f375e7502f3be872a449c3f2bb058380100000000f0e858da3ac57b6c973f889ad879ffb2bd645e91b774006dfa366c74e2794aafc8bbc871010000000751ac65516a515131a68f120fd88ca08687ceb4800e1e3fbfea7533d34c84fef70cc5a96b648d580369526d000000000600ac00515363f6191d5b3e460fa541a30a6e83345dedfa3ed31ad8574d46d7bbecd3c9074e6ba5287c24020000000151e3e19d6604162602010000000004005100ac71e17101000000000065b5e9030000000003005353f6b7d101000000000200ac00000000", + "6563", + 1, + -669018604, + "0d29716180aaf3d362452d4fc6a0ecb268452643b3da574f8dc9b589be506bb3" + ], + [ + "efec1cce044a676c1a3d973f810edb5a9706eb4cf888a240f2b5fb08636bd2db482327cf50000000000451656a52ffffffff46ef019d7c03d9456e5134eb0a7b5408d274bd8e33e83df44fab94101f7c5b650200000009ac5100006353630051407aadf6f5aaffbd318fdbbc9cae4bd883e67d524df06bb006ce2f7c7e2725744afb76960100000004536a53acec0d64eae09e2fa1a7c4960354230d51146cf6dc45ee8a51f489e20508a785cbe6ca86fc000000000651536a516300ffffffff014ef598020000000006636aac655265a6ae1b75", + "53516a5363526563", + 2, + -1823982010, + "13e8b5ab4e5b2ceeff0045c625e19898bda2d39fd7af682e2d1521303cfe1154" + ], + [ + "3c436c2501442a5b700cbc0622ee5143b34b1b8021ea7bbc29e4154ab1f5bdfb3dff9d640501000000076a5251ac5252acffffffff0170b9a20300000000056a6351525114b13791", + "63ac5251ac65", + 0, + -2140612788, + "6c44dd81f45d721f4fb8f4b2b3235798721f0477348f69e73bb9bcaa34a3bf3a" + ], + [ + "d62f183e037e0d52dcf73f9b31f70554bce4f693d36d17552d0e217041e01f15ad3840c838000000000863acac6a6a6a6363ffffffffabdfb395b6b4e63e02a763830f536fc09a35ff8a0cf604021c3c751fe4c88f4d03000000046365ac53aa4d30de95a2327bccf9039fb1ad976f84e0b4a0936d82e67eafebc108993f1e57d8ae39000000000165ffffffff04364ad30500000000036a005179fd84010000000006636aac6363519b9023030000000008510065006563ac6acd2a4a02000000000000000000", + "52", + 1, + 595020383, + "43f13b3e823082f194d9eafe225f9622cd99fdb188e773cc710562f75a8f98c0" + ], + [ + "44c200a5021238de8de7d80e7cce905606001524e21c8d8627e279335554ca886454d692e6000000000400acac52bb8d1dc876abb1f514e96b21c6e83f429c66accd961860dc3aed5071e153e556e6cf076d02000000056553526a51870a928d0360a580040000000004516a535290e1e3020000000007516a00510065acdd7fc504000000000651536365636abb1ec182", + "6363", + 0, + -785766894, + "98866f7694e00b4b902ea968c971b7aadc08e972cb75bf792accb6f17a62e25a" + ], + [ + "d682d52d034e9b062544e5f8c60f860c18f029df8b47716cabb6c1b4a4b310a0705e754556020000000400656a0016eeb88eef6924fed207fba7ddd321ff3d84f09902ff958c815a2bf2bb692eb52032c4d803000000076365ac516a520099788831f8c8eb2552389839cfb81a9dc55ecd25367acad4e03cfbb06530f8cccf82802701000000085253655300656a53ffffffff02d543200500000000056a510052ac03978b05000000000700ac51525363acfdc4f784", + "", + 2, + -696035135, + "e1a256854099907050cfee7778f2018082e735a1f1a3d91437584850a74c87bb" + ], + [ + "e8c0dec5026575ddf31343c20aeeca8770afb33d4e562aa8ee52eeda6b88806fdfd4fe0a97030000000653ac65516552ffffffffdde122c2c3e9708874286465f8105f43019e837746686f442666629088a970e0010000000153ffffffff01f98eee0100000000025251fe87379a", + "63", + 1, + 633826334, + "abe441209165d25bc6d8368f2e7e7dc21019056719fef1ace45542aa2ef282e2" + ], + [ + "b288c331011c17569293c1e6448e33a64205fc9dc6e35bc756a1ac8b97d18e912ea88dc0770200000006635300ac6aacfc3c890903a3ccf8040000000004656500ac9c65c90400000000056a6a65ac63ac5f7702000000000365005200000000", + "526a63", + 0, + 1574937329, + "ea9f25d0dcf3b7bb2a98b8416f581569eef203dda8cea3fe8a3d92ea80cc50b0" + ], + [ + "fc0a092003cb275fa9a25a72cf85d69c19e4590bfde36c2b91cd2c9c56385f51cc545530210000000003530063ffffffff729b006eb6d14d6e5e32b1c376acf1c62830a5d9246da38dbdb4db9f51fd1c74020000000463636500ffffffff0ae695c6d12ab7dcb8d3d4b547b03f178c7268765d1de9af8523d244e3836b12030000000151ffffffff0115c1e20100000000056a6aac6a6a1ff59aec", + "0053ac", + 0, + 931831026, + "1a4207a784c0ab325f36f7632cd29a8f98c719a4f2794febfaa9915a38e9ff14" + ], + [ + "0fcae7e004a71a4a7c8f66e9450c0c1785268679f5f1a2ee0fb3e72413d70a9049ecff75de020000000452005251ffffffff99c8363c4b95e7ec13b8c017d7bb6e80f7c04b1187d6072961e1c2479b1dc0320200000000ffffffff7cf03b3d66ab53ed740a70c5c392b84f780fff5472aee82971ac3bfeeb09b2df02000000055265636a0058e4fe9257d7c7c7e82ff187757c6eadc14cceb6664dba2de03a018095fd3006682a5b9600000000056353536a636de26b2303ff76de010000000001acdc0a2e0200000000000a53ed02000000000653006351510088417307", + "ac6aac5165535253", + 2, + -902160694, + "da4a0ce0d5835d2b1c0346ca90c94a267345339db2c4634410e640f6c86cd173" + ], + [ + "612701500414271138e30a46b7a5d95c70c78cc45bf8e40491dac23a6a1b65a51af04e6b94020000000451655153ffffffffeb72dc0e49b2fad3075c19e1e6e4b387f1365dca43d510f6a02136318ddecb7f0200000003536352e115ffc4f9bae25ef5baf534a890d18106fb07055c4d7ec9553ba89ed1ac2101724e507303000000070063006563acac2ff07f69a080cf61a9d19f868239e6a4817c0eeb6a4f33fe254045d8af2bca289a8695de0300000000430736c404d317840500000000066a00ac535165306e05030000000006630051536a6a6c8aca010000000004655163515dcf960100000000016a00000000", + "", + 2, + -604581431, + "bc4865c1566a0cdf045abb6c938987da7c39dd582d0fba6fff8d1837ee4a7166" + ], + [ + "6b68ba00023bb4f446365ea04d68d48539aae66f5b04e31e6b38b594d2723ab82d44512460000000000200acffffffff5dfc6febb484fff69c9eeb7c7eb972e91b6d949295571b8235b1da8955f3137b020000000851ac6352516a535325828c8a03365da801000000000600636aac65510f594d03000000000963ac536365ac63636a45329e010000000004ac53526a00000000", + "005151", + 0, + 1317038910, + "0ae0a34d94e7876dd15da19879f9cea8d4d0bfaa5ce318506583a41abafefda4" + ], + [ + "aff5850c0168a67296cc790c1b04a9ed9ad1ba0469263a9432fcb53676d1bb4e0eea8ea1410100000005ac65526a537d5fcb1d01d9c26d02000000000552655153acc0617ca1", + "51650063", + 0, + 1712981774, + "87c894884c1e673ad986b2ff632c114c307b786e0568912b6612223eea18828a" + ], + [ + "e6d6b9d8042c27aec99af8c12b6c1f7a80453e2252c02515e1f391da185df0874e133696b50300000006ac5165650065ffffffff6a4b60a5bfe7af72b198eaa3cde2e02aa5fa36bdf5f24ebce79f6ecb51f3b554000000000452656aac2ec4c5a6cebf86866b1fcc4c5bd5f4b19785a8eea2cdfe58851febf87feacf6f355324a80100000001537100145149ac1e287cef62f6f5343579189fad849dd33f25c25bfca841cb696f10c5a34503000000046a636a63df9d7c4c018d96e20100000000015100000000", + "53", + 1, + -1924777542, + "f98f95d0c5ec3ac3e699d81f6c440d2e7843eab15393eb023bc5a62835d6dcea" + ], + [ + "046ac25e030a344116489cc48025659a363da60bc36b3a8784df137a93b9afeab91a04c1ed0200000008510000526a65ac51ffffffff6c094a03869fde55b9a8c4942a9906683f0a96e2d3e5a03c73614ea3223b2c29020000000400636a6affffffff3da7aa5ecef9071600866267674b54af1740c5aeb88a290c459caa257a2683cb000000000265657e2a1b900301b916030000000004ac63656308f4ed0300000000075253ac63ac51ac73d62002000000000200008deb1285", + "6a", + 2, + 1299505108, + "12680ed69294aee1203af681520d76a34f58d77a3656a2d170d2534c810545c1" + ], + [ + "bd515acd0130b0ac47c2d87f8d65953ec7d657af8d96af584fc13323d0c182a2e5f9a96573000000000652ac51acac65ffffffff0467aade000000000003655363dc577d0500000000055152525300137f60030000000007535163530065004cdc860500000000036a5265241bf53e", + "ac", + 0, + 621090621, + "9f65007e83f456f919bf343d1ae243f1f6acd25e80c08ccee9399c772141757a" + ], + [ + "ff1ae37103397245ac0fa1c115b079fa20930757f5b6623db3579cb7663313c2dc4a3ffdb300000000076353656a000053ffffffff83c59e38e5ad91216ee1a312d15b4267bae2dd2e57d1a3fd5c2f0f809eeb5d460100000005006a6a5351ffffffff9d5e706c032c1e0ca75915f8c6686f64ec995ebcd2539508b7dd8abc3e4d7d2a01000000006b2bdcda02a8fe070500000000045253000019e31d0400000000050063ac526a00000000", + "53656a6a525251", + 0, + 881938872, + "4a5fbdebe4a5bb645f4ccfb9bbc6397930d3d7842da1907e4adfc46e3854d8d8" + ], + [ + "ff5400dd02fec5beb9a396e1cbedc82bedae09ed44bae60ba9bef2ff375a6858212478844b03000000025253ffffffff01e46c203577a79d1172db715e9cc6316b9cfc59b5e5e4d9199fef201c6f9f0f0000000008006552656a5165acffffffff02e8ce62040000000002515312ce3e00000000000251513f119316", + "", + 0, + 1541581667, + "1e0da47eedbbb381b0e0debbb76e128d042e02e65b11125e17fd127305fc65cd" + ], + [ + "28e3daa603c03626ad91ffd0ff927a126e28d29db5012588b829a06a652ea4a8a57324070302000000036552acffffffff8e643146d3d0568fc2ad854fd7864d43f6f16b84e395db82b739f6f5c84d97b40000000004515165526b01c2dc1469db0198bd884e95d8f29056c48d7e74ff9fd37a9dec53e44b8769a6c99c030200000008006a516a53630065eea8738901002398000000000006ac5363516a51eaef12f5", + "5252515253", + 2, + 1687390463, + "b0951cf9846e21e7453f8143f08237c2c8d1894ae427564e4c363051cfec6a4d" + ], + [ + "b54bf5ac043b62e97817abb892892269231b9b220ba08bc8dbc570937cd1ea7cdc13d9676c0100000003515365a10adb7b35189e1e8c00b86250f769319668189b7993d6bdac012800f1749150415b2deb0200000003655300ffffffff60b9f4fb9a7e17069fd00416d421f804e2ef2f2c67de4ca04e0241b9f9c1cc5d02000000026aacfffffffff048168461cce1d40601b42fbc5c4f904ace0d35654b7cc1937ccf53fe78505a0100000007526563525265acffffffff01dbf4e6040000000007acac656553636500000000", + "63", + 2, + 882302077, + "f5b38b0f06e246e47ce622e5ee27d5512c509f8ac0e39651b3389815eff2ab93" + ], + [ + "ebf628b30360bab3fa4f47ce9e0dcbe9ceaf6675350e638baff0c2c197b2419f8e4fb17e16000000000452516365ac4d909a79be207c6e5fb44fbe348acc42fc7fe7ef1d0baa0e4771a3c4a6efdd7e2c118b0100000003acacacffffffffa6166e9101f03975721a3067f1636cc390d72617be72e5c3c4f73057004ee0ee010000000863636a6a516a5252c1b1e82102d8d54500000000000153324c900400000000015308384913", + "0063516a51", + 1, + -1658428367, + "eb2d8dea38e9175d4d33df41f4087c6fea038a71572e3bad1ea166353bf22184" + ], + [ + "d6a8500303f1507b1221a91adb6462fb62d741b3052e5e7684ea7cd061a5fc0b0e93549fa50100000003ac65acfffffffffdec79bf7e139c428c7cfd4b35435ae94336367c7b5e1f8e9826fcb0ebaaaea30300000000ffffffffd115fdc00713d52c35ea92805414bd57d1e59d0e6d3b79a77ee18a3228278ada020000000453005151ffffffff040231510300000000085100ac6a6a000063c6041c0400000000080000536a6563acac138a0b04000000000163d25fbe03000000000900656a00656aac510000000000", + "ac526aac6a00", + 1, + -2007972591, + "70466179ab02e0a0d1e30fb22e189fe37c317ca4022487f41b6b1e22f52213e3" + ], + [ + "658cb1c1049564e728291a56fa79987a4ed3146775fce078bd2e875d1a5ca83baf6166a82302000000046a6563512170e7d0826cbdb45fda0457ca7689745fd70541e2137bb4f52e7b432dcfe2112807bd720300000007006a0052536351ffffffff8715ca2977696abf86d433d5c920ef26974f50e9f4a20c584fecbb68e530af5101000000009e49d864155bf1d3c757186d29f3388fd89c7f55cc4d9158b4cf74ca27a35a1dd93f945502000000096a535353ac656351510d29fa870230b8090400000000056a6a526a633b41da0500000000036a6a65ed63bf62", + "52acac", + 2, + -1774073281, + "d0c0ad4eb89bb42b233d8b49bfddb58a2a94a5086fced93d5be526f3e26ba57c" + ], + [ + "e92492cc01aec4e62df67ea3bc645e2e3f603645b3c5b353e4ae967b562d23d6e043badecd0100000002ac65ffffffff02c7e5ea04000000000152e1e584010000000005536365515195d16047", + "6551", + 0, + -424930556, + "1b6e90535cc7b99ac9a707fc38fc4339b7ac98c80d7d788937c84d94cba37345" + ], + [ + "02e242db04be2d8ced9179957e98cee395d4767966f71448dd084426844cbc6d15f2182e85030000000200650c8ffce3db9de9c3f9cdb9104c7cb26647a7531ad1ebf7591c259a9c9985503be50f8de30000000007ac6a51636a6353ffffffffa2e33e7ff06fd6469987ddf8a626853dbf30c01719efb259ae768f051f803cd30300000000fffffffffd69d8aead941683ca0b1ee235d09eade960e0b1df3cd99f850afc0af1b73e07030000000060bb602a011659670100000000076363526300acac00000000", + "6353515251", + 3, + 1451100552, + "bbc9069b8615f3a52ac8a77359098dcc6c1ba88c8372d5d5fe080b99eb781e55" + ], + [ + "b28d5f5e015a7f24d5f9e7b04a83cd07277d452e898f78b50aae45393dfb87f94a26ef57720200000006ac630053ac52ffffffff046475ed0400000000075100526363ac65c9834a04000000000151ae26b30100000000040000ac65ceefb900000000000000000000", + "ac6551ac6a536553", + 0, + -1756558188, + "fa793d53ce3009538526eb1d1c0547cad63610aee4425a8c3c01f0e8ef3fbda1" + ], + [ + "efb8b09801f647553b91922a5874f8e4bb2ed8ddb3536ed2d2ed0698fac5e0e3a298012391030000000952ac005263ac52006affffffff04cdfa0f050000000005ac5351ac65b68d1b0200000000045365ac00d057d50000000000016a9e1fda010000000007ac63ac536552ac00000000", + "6aac", + 0, + 1947322973, + "fee40f24dd25601b57caedb968efb0940c31b686b4ca6976fedeb664cfdabf2c" + ], + [ + "68a59fb901c21946797e7d07a4a3ea86978ce43df0479860d7116ac514ba955460bae78fff0000000000ffffffff03979be80100000000036553639300bc040000000008006552006a656565cfa78d0000000000056552ac635100000000", + "65", + 0, + 995583673, + "f1eb78d44ea74232cf2e8555eddd3bae8ccbe9cf678ad3bd826144fe984ffd63" + ], + [ + "67761f2a014a16f3940dcb14a22ba5dc057fcffdcd2cf6150b01d516be00ef55ef7eb07a830100000004636a6a51ffffffff01af67bd050000000008526553526300510000000000", + "6a00", + 0, + 1570943676, + "079fa62e9d9d7654da8b74b065da3154f3e63c315f25751b4d896733a1d67807" + ], + [ + "e20fe96302496eb436eee98cd5a32e1c49f2a379ceb71ada8a48c5382df7c8cd88bdc47ced03000000016556aa0e180660925a841b457aed0aae47fca2a92fa1d7afeda647abf67198a3902a7c80dd00000000085152ac636a535265bd18335e01803c810100000000046500ac52f371025e", + "6363", + 1, + -651254218, + "2921a0e5e3ba83c57ba57c25569380c17986bf34c366ec216d4188d5ba8b0b47" + ], + [ + "4e1bd9fa011fe7aa14eee8e78f27c9fde5127f99f53d86bc67bdab23ca8901054ee8a8b6eb0300000009ac535153006a6a0063ffffffff044233670500000000000a667205000000000452636a51e5bf35030000000003535351d579e50500000000060063006551ac3419ac30", + "52ac52", + 0, + -1807563680, + "18f86c959cf34c3f4748ae43b15ac365986c340a004c3f6b461af4bd0e214f23" + ], + [ + "ec02fbee03120d02fde12574649660c441b40d330439183430c6feb404064d4f507e704f3c0100000000ffffffffe108d99c7a4e5f75cc35c05debb615d52fac6e3240a6964a29c1704d98017fb6020000000163fffffffff726ec890038977adfc9dadbeaf5e486d5fcb65dc23acff0dd90b61b8e2773410000000002ac65e9dace55010f881b010000000004ac00650000000000", + "51ac525152ac6552", + 2, + -1564046020, + "67cdf19c0e2207c39eba0c3227246f9a892b71657b657279289a0de48de36a84" + ], + [ + "23dbdcc1039c99bf11938d8e3ccec53b60c6c1d10c8eb6c31197d62c6c4e2af17f52115c3a0300000006636352000063ffffffff17823880e1df93e63ad98c29bfac12e36efd60254346cac9d3f8ada020afc062030000000263631c26f002ac66e86cd22a25e3ed3cb39d982f47c5118f03253054842daadc88a6c41a2e1500000000086a00636a53635163195314de015570fd0100000000086a5263ac5200005300000000", + "ac6a6553", + 1, + 11586329, + "c9175e72b4bac047b14e198e735c5f611770a704323f8298410ed233befc9a28" + ], + [ + "33b03bf00222c7ca35c2f8870bbdef2a543b70677e413ce50494ac9b22ea673287b6aa55c5000000000400006a52ee4d97b527eb0b427e4514ea4a76c81e68c34900a23838d3e57d0edb5410e62eeb8c92b6000000000553ac6aacac42e59e170326245c000000000007656553536a516ab1a10603000000000652526a516500cc89c802000000000763ac6a63ac516300000000", + "", + 0, + 557416556, + "1eb1b268555b020ce680b0d86c8e86e0ddfb072bd3be6db1193683145b1d4197" + ], + [ + "813eda1103ac8159850b4524ef65e4644e0fc30efe57a5db0c0365a30446d518d9b9aa8fdd0000000003656565c2f1e89448b374b8f12055557927d5b33339c52228f7108228149920e0b77ef0bcd69da60000000003ac006382cdb7978d28630c5e1dc630f332c4245581f787936f0b1e84d38d33892141974c75b4750300000003ac5365ffffffff0137edfb02000000000000000000", + "0063", + 1, + -1948560575, + "71dfcd2eb7f2e6473aed47b16a6d5fcbd0af22813d892e9765023151e07771ec" + ], + [ + "9e45d9aa0248c16dbd7f435e8c54ae1ad086de50c7b25795a704f3d8e45e1886386c653fbf01000000025352fb4a1acefdd27747b60d1fb79b96d14fb88770c75e0da941b7803a513e6d4c908c6445c7010000000163ffffffff014069a8010000000001520a794fb3", + "51ac005363", + 1, + -719113284, + "0d31a221c69bd322ef7193dd7359ddfefec9e0a1521d4a8740326d46e44a5d6a" + ], + [ + "36e42018044652286b19a90e5dd4f8d9f361d0760d080c5c5add1970296ff0f1de630233c8010000000200ac39260c7606017d2246ee14ddb7611586178067e6a4be38e788e33f39a3a95a55a13a6775010000000352ac638bea784f7c2354ed02ea0b93f0240cdfb91796fa77649beee6f7027caa70778b091deee700000000066a65ac656363ffffffff4d9d77ab676d711267ef65363f2d192e1bd55d3cd37f2280a34c72e8b4c559d700000000046a006a00001764e1020d30220100000000075252516aac00534720970400000000086353536a636a5100a56407a1", + "006a53655153", + 0, + 827296034, + "daec2af5622bbe220c762da77bab14dc75e7d28aa1ade9b7f100798f7f0fd97a" + ], + [ + "5e06159a02762b5f3a5edcdfc91fd88c3bff08b202e69eb5ba74743e9f4291c4059ab008200000000001ac348f5446bb069ef977f89dbe925795d59fb5d98562679bafd61f5f5f3150c3559582992d00000000065165515353ac762fc67703847ec6010000000000e200cf040000000001aca64b86010000000007520000515363acb82b491b", + "535253526a", + 0, + -61819505, + "db79951df96e717d7e2da948bf0d9471d6c21fcdadf66b114ba206af0f2655a1" + ], + [ + "a1948872013b543d6d902ccdeead231c585195214ccf5d39f136023855958436a43266911501000000086aac006a6a6a51514951c9b2038a538a04000000000452526563c0f345050000000007526a5252ac526af9be8e03000000000652acac51006306198db2", + "6353", + 0, + -326384076, + "ac7e7c73d421e8db3fde36c0bda6747e11841c2aa9bd35504c8c911601964168" + ], + [ + "c3efabba03cb656f154d1e159aa4a1a4bf9423a50454ebcef07bc3c42a35fb8ad84014864d0000000000d1cc73d260980775650caa272e9103dc6408bdacaddada6b9c67c88ceba6abaa9caa2f7d020000000553536a5265ffffffff9f946e8176d9b11ff854b76efcca0a4c236d29b69fb645ba29d406480427438e01000000066a0065005300ffffffff040419c00100000000026a63cdb5b60100000000080063005352656a63f9fe5e050000000004acac5352611b980100000000086a00acac00006a512d7f0c40", + "0053", + 0, + -59089911, + "5d0c4674661b9ec0295cebe0deb9997b37ca3cd582a28777758e532288c65584" + ], + [ + "efb55c2e04b21a0c25e0e29f6586be9ef09f2008389e5257ebf2f5251051cdc6a79fce2dac020000000351006affffffffaba73e5b6e6c62048ba5676d18c33ccbcb59866470bb7911ccafb2238cfd493802000000026563ffffffffe62d7cb8658a6eca8a8babeb0f1f4fa535b62f5fc0ec70eb0111174e72bbec5e0300000006ac516365526affffffffbf568789e681032d3e3be761642f25e46c20322fa80346c1146cb47ac999cf1b0300000000b3dbd559025288280100000000000aac7b0100000000015300000000", + "acac52", + 3, + 1638140535, + "6a9f07368fe6045aa9d8651087d8fb1c12988a958764c991651c7a1e819d3dbf" + ], + [ + "91d3b21903629209b877b3e1aef09cd59aca6a5a0db9b83e6b3472aceec3bc2109e64ab85a0200000003530065ffffffffca5f92de2f1b7d8478b8261eaf32e5656b9eabbc58dcb2345912e9079a33c4cd0100000005006500536ad530611da41bbd51a389788c46678a265fe85737b8d317a83a8ff7a839debd18892ae5c803000000056aac6551008b86c501038b8a9a05000000000263525b3f7a04000000000453535300d4e3ff04000000000565ac5165630b7b656f", + "6551525151516a00", + 2, + 499657927, + "1d5e3f025b461b1ce59e3c3564ec28633d6f691d7c03086a0eef58c7dfacdad4" + ], + [ + "5d5c41ad0317aa7e40a513f5141ad5fc6e17d3916eebee4ddb400ddab596175b41a111ead20100000005536a5265acffffffff900ecb5e355c5c9f278c2c6ea15ac1558b041738e4bffe5ae06a9346d66d5b2b00000000060000636a656affffffff99f4e08305fa5bd8e38fb9ca18b73f7a33c61ff7b3c68e696b30a04fea87f3ca000000000163d3d1760d019fc13a00000000000000000000", + "53ac6aac6a52", + 2, + 1007461922, + "4012f5ff2f1238a0eb84854074670b4703238ebc15bfcdcd47ffa8498105fcd9" + ], + [ + "ceecfa6c02b7e3345445b82226b15b7a097563fa7d15f3b0c979232b138124b62c0be007890200000008ac51536a63525253ffffffffbae481ccb4f15d94db5ec0d8854c24c1cc8642bd0c6300ede98a91ca13a4539a0200000001ac50b0813d023110f5020000000005acac526563e2b0d0040000000009656aac0063516a536300000000", + "0063526500", + 0, + -1862053821, + "ac45b766abbd5c40316d80844f939938005cf51b5e644fc4fde63b0d96453b03" + ], + [ + "ae62d5fd0380c4083a26642159f51af24bf55dc69008e6b7769442b6a69a603edd980a33000000000003510053ffffffff49d048324d899d4b8ed5e739d604f5806a1104fede4cb9f92cc825a7fa7b4bfe0200000005536a000053ffffffff42e5cea5673c650881d0b4005fa4550fd86de5f21509c4564a379a0b7252ac0e0000000007530000526a53525f26a68a03bfacc3010000000000e2496f0000000000085253acac52636563b11cc600000000000700510065526a6a00000000", + "", + 1, + -1600104856, + "cac54b3a39a48c9a02e39bb968394d08664a5508853aedd6319341307ac0e03c" + ], + [ + "f06f64af04fdcb830464b5efdb3d5ee25869b0744005375481d7b9d7136a0eb8828ad1f0240200000003516563fffffffffd3ba192dabe9c4eb634a1e3079fca4f072ee5ceb4b57deb6ade5527053a92c5000000000165ffffffff39f43401a36ba13a5c6dd7f1190e793933ae32ee3bf3e7bfb967be51e681af760300000009650000536552636a528e34f50b21183952cad945a83d4d56294b55258183e1627d6e8fb3beb8457ec36cadb0630000000003530052334a7128014bbfd10100000000075352006a63656afc424a7c", + "53650051635253ac00", + 2, + 313255000, + "d0b066d280df7df96d13d797a60425a38efbc5694086722202713c0e6c3b8a8d" + ], + [ + "6dfd2f98046b08e7e2ef5fff153e00545faf7076699012993c7a30cb1a50ec528281a9022f030000000152ffffffff1f535e4851920b968e6c437d84d6ecf586984ebddb7d5db6ae035bd02ba222a8010000000551006a5351605072acb3e17939fa0737bc3ee43bc393b4acd58451fc4ffeeedc06df9fc649828822d5010000000253525a4955221715f27788d302382112cf60719be9ae159c51f394519bd5f7e70a4f9816c7020200000008526a6a51636a656a36d3a5ff0445548e0100000000086a6a00516a52655167030b050000000004ac6a63525cfda8030000000000e158200000000000010000000000", + "535263ac6a65515153", + 3, + 585774166, + "72b7da10704c3ca7d1deb60c31b718ee12c70dc9dfb9ae3461edce50789fe2ba" + ], + [ + "187eafed01389a45e75e9dda526d3acbbd41e6414936b3356473d1f9793d161603efdb4567010000000100ffffffff04371c8202000000000563630063523b3bde02000000000753516563006300e9e765010000000005516aac656a373f9805000000000565525352ac08d46763", + "", + 0, + 122457992, + "6a7e59385244ed651f3a9fd3b79c91cef425233a6c1f399ca853ad25f7ac56db" + ], + [ + "7d50b977035d50411d814d296da9f7965ddc56f3250961ca5ba805cadd0454e7c521e31b0300000000003d0416c2cf115a397bacf615339f0e54f6c35ffec95aa009284d38390bdde1595cc7aa7c010000000452ac5365ffffffff4232c6e796544d5ac848c9dc8d25cfa74e32e847a5fc74c74d8f38ca51188562030000000653ac51006a51ffffffff016bd8bb000000000003655253163526f3", + "51526a00005353", + 1, + -1311316785, + "896b409b0dc8586307777d9ed6e47729d2ea5f2ecb4c3d46f275649a0ff3f1d1" + ], + [ + "2a45cd1001bf642a2315d4a427eddcc1e2b0209b1c6abd2db81a800c5f1af32812de42032702000000050051525200ffffffff032177db050000000004530051ac49186f0000000000026a00645c0000000000000665655263acac00000000", + "6a65", + 0, + -1774715722, + "23ef12379dbb35735b930e5fd2cc607d052cee29167d61cb783761f7aa30343f" + ], + [ + "479358c202427f3c8d19e2ea3def6d6d3ef2281b4a93cd76214f0c7d8f040aa042fe19f71f0300000000ffffffffa2709be556cf6ecaa5ef530df9e4d056d0ed57ce96de55a5b1f369fa40d4e74a020000000700006a51635365c426be3f02af57850500000000026363fd8f590500000000055153ac53632dfb14b3", + "52006351", + 1, + -763226778, + "f2b472c43f1a619df3abb4a83654a3f5cef07418868169ba7fd8139d69bb7914" + ], + [ + "76179a8e03bec40747ad65ab0f8a21bc0d125b5c3c17ad5565556d5cb03ade7c83b4f32d98030000000151ffffffff99b900504e0c02b97a65e24f3ad8435dfa54e3c368f4e654803b756d011d24150200000003ac5353617a04ac61bb6cf697cfa4726657ba35ed0031432da8c0ffb252a190278830f9bd54f0320100000006656551005153c8e8fc8803677c77020000000007ac6553535253ac70f442030000000001535be0f20200000000026300bf46cb3a", + "6a52", + 1, + -58495673, + "35e94b3776a6729d20aa2f3ddeeb06d3aad1c14cc4cde52fd21a4efc212ea16c" + ], + [ + "75ae53c2042f7546223ce5d5f9e00a968ddc68d52e8932ef2013fa40ce4e8c6ed0b6195cde01000000056563ac630079da0452c20697382e3dba6f4fc300da5f52e95a9dca379bb792907db872ba751b8024ee0300000009655151536500005163ffffffffe091b6d43f51ff00eff0ccfbc99b72d3aff208e0f44b44dfa5e1c7322cfc0c5f0100000006520000536363ffffffff7e96c3b83443260ac5cfd18258574fbc4225c630d3950df812bf51dceaeb0f9103000000065365655165639a6bf70b01b3e14305000000000563530063ac00000000", + "630000ac", + 2, + 982422189, + "ee4ea49d2aae0dbba05f0b9785172da54408eb1ec67d36759ff7ed25bfc28766" + ], + [ + "1cdfa01e01e1b8078e9c2b0ca5082249bd18fdb8b629ead659adedf9a0dd5a04031871ba1202000000075253515365656affffffff011e28430200000000076a5363636aac52b2febd4a", + "acac63656300", + 0, + 387396350, + "299dcaac2bdaa627eba0dfd74767ee6c6f27c9200b49da8ff6270b1041669e7e" + ], + [ + "cc28c1810113dfa6f0fcd9c7d9c9a30fb6f1d774356abeb527a8651f24f4e6b25cf763c4e00300000002636affffffff02dfc60500000000000700536363510052afd569030000000003535265f6c90d99", + "006551acacac", + 0, + 1299280838, + "de88653f77e3b6549285fc4fb791acf57e99943fa4e4589a05fa03dba27b877f" + ], + [ + "ca816e7802cd43d66b9374cd9bf99a8da09402d69c688d8dcc5283ace8f147e1672b757e020200000003516a5240fb06c95c922342279fcd88ba6cd915933e320d7becac03192e0941e0345b79223e89570300000004005151ac353ecb5d0264dfbd010000000003ac6aacd5d70001000000000752ac53ac6a5151ec257f71", + "63ac", + 1, + 774695685, + "c10f1c008fcc3db006822d9d3f66518d3cd96a1a08c79cd3557dfdd676a25f45" + ], + [ + "b42b955303942fedd7dc77bbd9040aa0de858afa100f399d63c7f167b7986d6c2377f66a7403000000066aac00525100ffffffff0577d04b64880425a3174055f94191031ad6b4ca6f34f6da9be7c3411d8b51fc000000000300526a6391e1cf0f22e45ef1c44298523b516b3e1249df153590f592fcb5c5fc432dc66f3b57cb03000000046a6aac65ffffffff0393a6c9000000000004516a65aca674ac0400000000046a525352c82c370000000000030053538e577f89", + "", + 1, + -1237094944, + "566953eb806d40a9fb684d46c1bf8c69dea86273424d562bd407b9461c8509af" + ], + [ + "92c9fe210201e781b72554a0ed5e22507fb02434ddbaa69aff6e74ea8bad656071f1923f3f02000000056a63ac6a514470cef985ba83dcb8eee2044807bedbf0d983ae21286421506ae276142359c8c6a34d68020000000863ac63525265006aa796dd0102ca3f9d0500000000050052535353cd5c83010000000007ac00525252005322ac75ee", + "5165", + 0, + 97879971, + "ddeff6de8056dbdbf2c0b921543d8a0c2fa3a4156968a6204ffb1f1c94c1bc9c" + ], + [ + "ccca1d5b01e40fe2c6b3ee24c660252134601dab785b8f55bd6201ffaf2fddc7b3e2192325030000000365535100496d4703b4b66603000000000665535253ac633013240000000000015212d2a502000000000851ac636353636a5337b82426", + "0052", + 0, + -1691630172, + "2588673ffa4397bfdd505127331f3ef08912aaedae468f013809b93e32456ac2" + ], + [ + "bc1a7a3c01691e2d0c4266136f12e391422f93655c71831d90935fbda7e840e50770c61da20000000007635253ac516353ffffffff031f32aa020000000003636563786dbc0200000000003e950f00000000000563516a655184b8a1de", + "51536a", + 0, + -1627072905, + "730bc25699b46703d7718fd5f5c34c4b5f00f594a9968ddc247fa7d5175124ed" + ], + [ + "076d209e02d904a6c40713c7225d23e7c25d4133c3c3477828f98c7d6dbd68744023dbb66b03000000065300536565acffffffff10975f1b8db8861ca94c8cc7c7cff086ddcd83e10b5fffd4fc8f2bdb03f9463c0100000000ffffffff029dff76010000000006526365530051a3be6004000000000000000000", + "515253ac65acacac", + 1, + -1207502445, + "66c488603b2bc53f0d22994a1f0f66fb2958203102eba30fe1d37b27a55de7a5" + ], + [ + "690fd1f80476db1f9eebe91317f2f130a60cbc1f4feadd9d6474d438e9cb7f91e4994600af0300000003536a63a15ce9fa6622d0c4171d895b42bff884dc6e8a7452f827fdc68a29c3c88e6fdee364eaf5000000000152ffffffff022dc39d3c0956b24d7f410b1e387859e7a72955f45d6ffb1e884d77888d18fe0300000005ac6a63656afffffffff10b06bce1800f5c49153d24748fdefb0bf514c12863247d1042d56018c3e25c03000000086a63ac6365536a52ffffffff031f162f0500000000050000655265ffbcd40500000000045151ac001a9c8c05000000000652ac53656a6300000000", + "ac5163acac", + 0, + -67986012, + "631c2af46a073eefd20c20a532d88d4b20ed13fe82548f89d6604f45b843bb15" + ], + [ + "49ac2af00216c0307a29e83aa5de19770e6b20845de329290bd69cf0e0db7aed61ae41b39002000000035163ac8b2558ef84635bfc59635150e90b61fc753d34acfd10d97531043053e229cd720133cd95000000000463516a51ffffffff02458471040000000006636a51ac0065545aa80000000000096a6553516a5263ac6a00000000", + "515263005363", + 1, + 1449668540, + "87567ef198b654c923fa489e8f058e5c584a5a3c01d76b29ee494d2301583cf9" + ], + [ + "fa4d868b024b010bd5dce46576c2fb489aa60bb797dac3c72a4836f49812c5c564c258414f03000000007a9b3a585e05027bdd89edbadf3c85ac61f8c3a04c773fa746517ae600ff1a9d6b6c02fb0200000003515163ffffffff01b17d020500000000046a65520000000000", + "53656565635363", + 0, + -1718953372, + "96c2b32f0a00a5925db7ba72d0b5d39922f30ea0f7443b22bc1b734808513c47" + ], + [ + "cac6382d0462375e83b67c7a86c922b569a7473bfced67f17afd96c3cd2d896cf113febf9e0300000003006a53ffffffffaa4913b7eae6821487dd3ca43a514e94dcbbf350f8cc4cafff9c1a88720711b800000000096a6a525300acac6353ffffffff184fc4109c34ea27014cc2c1536ef7ed1821951797a7141ddacdd6e429fae6ff01000000055251655200ffffffff9e7b79b4e6836e290d7b489ead931cba65d1030ccc06f20bd4ca46a40195b33c030000000008f6bc8304a09a2704000000000563655353511dbc73050000000000cf34c500000000000091f76e000000000006520000005100d07208cb", + "0063656a", + 2, + -1488731031, + "75f88c4a66ba5b54534fbb87f5127e2557750e44c418450ca083d16c78237122" + ], + [ + "1711146502c1a0b82eaa7893976fefe0fb758c3f0e560447cef6e1bde11e42de91a125f71c030000000015bd8c04703b4030496c7461482481f290c623be3e76ad23d57a955807c9e851aaaa20270300000000d04abaf20326dcb7030000000001632225350400000000075263ac00520063dddad9020000000000af23d148", + "52520053510063", + 0, + 1852122830, + "e33d5ee08c0f3c130a44d7ce29606450271b676f4a80c52ab9ffab00cecf67f8" + ], + [ + "8d5b124d0231fbfc640c706ddb1d57bb49a18ba8ca0e1101e32c7e6e65a0d4c7971d93ea360100000006acac0000ac65ffffffff8fe0fd7696597b845c079c3e7b87d4a44110c445a330d70342a5501955e17dd70100000003525363ef22e8a90346629f030000000009516a00ac63acac51657bd57b05000000000200acfd4288050000000006ac535200636300000000", + "53ac52655365", + 0, + 1253152975, + "8f0605526d02765e387a2514228dea36ce6cf56afe17c37980b07bdf8b7e7115" + ], + [ + "38146dc502c7430e92b6708e9e107b61cd38e5e773d9395e5c8ad8986e7e4c03ee1c1e1e760100000000c8962ce2ac1bb3b1285c0b9ba07f4d2e5ce87c738c42ac0548cd8cec1100e6928cd6b0b6010000000563636a52527cccefbd04e5f6f8020000000005006aacac65ab2c4a00000000000351635209a6f40100000000026aacce57dc0400000000065353516a516a00000000", + "", + 0, + -1205978252, + "c5d7082c3cf2ba8677fafb850fedf2aff1691ab8690e6b342330b151afd1dda0" + ], + [ + "22d81c740469695a6a83a9a4824f77ecff8804d020df23713990afce2b72591ed7de98500502000000065352526a6a6affffffff90dc85e118379b1005d7bbc7d2b8b0bab104dad7eaa49ff5bead892f17d8c3ba01000000056565630051ffffffff965193879e1d5628b52005d8560a35a2ba57a7f19201a4045b7cbab85133311d0200000003ac005348af21a13f9b4e0ad90ed20bf84e4740c8a9d7129632590349afc03799414b76fd6e826200000000025353ffffffff04a0d40d04000000000060702700000000000652655151516ad31f1502000000000365ac0069a1ac050000000008510065530053525100000000", + "51636a52ac", + 0, + -1644680765, + "add7f5da27262f13da6a1e2cc2feafdc809bd66a67fb8ae2a6f5e6be95373b6f" + ], + [ + "a27dcbc801e3475174a183586082e0914c314bc9d79d1570f29b54591e5e0dff07fbb45a7f0000000003ac5351ffffffff027347f502000000000453535163d0e5c9030000000007ac656a635152007cd632ed", + "ac63636553", + 0, + -686435306, + "895c2c8c653393c9f9b953816f1294d4b5b7941e42b7ae3157ae48480cf93dbe" + ], + [ + "b123ed2204410d4e8aaaa8cdb95234ca86dad9ff77fb4ae0fd4c06ebed36794f0215ede0040100000002ac63ffffffff3b58b81b19b90d8f402701389b238c3a84ff9ba9aeea298bbf15b41a6766d27a01000000046a655300151824d401786153b819831fb15926ff1944ea7b03d884935a8bde01ed069d5fd80220310200000000ffffffffa9c9d246f1eb8b7b382a9032b55567e9a93f86c77f4e32c092aa1738f7f756c3010000000165ffffffff011a2b48000000000000ed44d1fb", + "63005163", + 2, + -1118263883, + "b5dab912bcabedff5f63f6dd395fc2cf030d83eb4dd28214baba68a45b4bfff0" + ], + [ + "1339051503e196f730955c5a39acd6ed28dec89b4dadc3f7c79b203b344511270e5747fa9900000000045151636affffffff378c6090e08a3895cedf1d25453bbe955a274657172491fd2887ed5c9aceca7b0100000000ffffffffcf7cc3c36ddf9d4749edfa9cefed496d2f86e870deb814bfcd3b5637a5496461030000000451006300ffffffff04dcf3fa010000000007526a63005263acb41d84040000000003ac5153800eff020000000005656a535365106c5e00000000000000000000", + "ac5300", + 2, + 2013719928, + "5de3e11871729fc147fa6530bb5f50e17a5015034b680c722e8c9a4e81b671dd" + ], + [ + "0728c606014c1fd6005ccf878196ba71a54e86cc8c53d6db500c3cc0ac369a26fac6fcbc21000000000453ac5365ba9668290182d7870100000000066a000053655100000000", + "65", + 0, + 1789961588, + "ab6baa6da3b2bc853868d166f8996ad31d63ef981179f9104f49968fd61c8427" + ], + [ + "a1134397034bf4067b6c81c581e2b73fb63835a08819ba24e4e92df73074bf773c94577df7000000000465525251ffffffff8b6608feaa3c1f35f49c6330a769716fa01c5c6f6e0cdc2eb10dfc99bbc21e77010000000952656aac005352655180a0bda4bc72002c2ea8262e26e03391536ec36867258cab968a6fd6ec7523b64fa1d8c001000000056a53ac6353ffffffff04dbeeed05000000000453650052cd5d0e0100000000026351104b2e0500000000066aac53ac5165283ca701000000000353525200000000", + "515151516552", + 1, + -324598676, + "6c9fb41040b5e3af0590f6982d177c48f5f47ec2030f98073445cd91f672d3d5" + ], + [ + "bcdafbae04aa18eb75855aeb1f5124f30044741351b33794254a80070940cb10552fa4fa8e0300000001acd0423fe6e3f3f88ae606f2e8cfab7a5ef87caa2a8f0401765ff9a47d718afcfb40c0099b0000000007ac656553ac6aac645308009d680202d600e492b31ee0ab77c7c5883ebad5065f1ce87e4dfe6453e54023a0010000000151ffffffffb9d818b14245899e1d440152827c95268a676f14c3389fc47f5a11a7b38b1bde03000000026300ffffffff03cda22102000000000751ac535263005100a4d20400000000045200536ac8bef405000000000500516563ac00000000", + "6553516a526a", + 1, + -2111409753, + "b9e5621e89a8dd0c8a418db4cbe6b5950db3cffb9e211e882c53f38566cc8002" + ], + [ + "ed3bb93802ddbd08cb030ef60a2247f715a0226de390c9c1a81d52e83f8674879065b5f87d03000000026552ffffffff04d2c5e60a21fb6da8de20bf206db43b720e2a24ce26779bca25584c3f765d1e0200000005656a6aac006e946ded025a811d04000000000751ac6352ac005143cfa3030000000005635200636a00000000", + "5352ac650065535300", + 1, + -668727133, + "e9995065e1fddef72a796eef5274de62012249660dc9d233a4f24e02a2979c87" + ], + [ + "59f4629d030fa5d115c33e8d55a79ea3cba8c209821f979ed0e285299a9c72a73c5bba00150200000002636affffffffd8aca2176df3f7a96d0dc4ee3d24e6cecde1582323eec2ebef9a11f8162f17ac00000000056565ac6553ffffffffeebc10af4f99c7a21cbc1d1074bd9f0ee032482a71800f44f26ee67491208e0403000000065352ac656351ffffffff0434e955040000000003515152caf2b305000000000365ac007b1473030000000002530033da970500000000060051536a5253bb08ab51", + "", + 2, + 396340944, + "c8fd48ce40676668a083c80e6deea3db48fd54dde8c8299f1428f1ac9a358bed" + ], + [ + "286e3eb7043902bae5173ac3b39b44c5950bc363f474386a50b98c7bdab26f98dc83449c4a020000000752ac6a00510051ffffffff4339cd6a07f5a5a2cb5815e5845da70300f5c7833788363bf7fe67595d3225520100000000fffffffff9c2dd8b06ad910365ffdee1a966f124378a2b8021065c8764f6138bb1e9513802000000045153ac6affffffff0370202aba7a68df85436ea7c945139513384ef391fa33d16020420b8ad40e9a0000000007005165526353acffffffff020c1907000000000003ac526a1b490b040000000000df1528f7", + "5353", + 3, + -1407529517, + "71a48b2592d7571ccbac9a8d792889cbe17ebc09d7fa2d44baecdd812093d4ce" + ], + [ + "2e245cf80179e2e95cd1b34995c2aff49fe4519cd7cee93ad7587f7f7e8105fc2dff206cd30200000008006a63516a655352350435a201d5ed2d020000000002526558552c89", + "0053", + 0, + -233917810, + "3928fe71d4d3d96864b32b177ac1af449266829feacc820aec14d301bfdc0333" + ], + [ + "33a98004029d262f951881b20a8d746c8c707ea802cd2c8b02a33b7e907c58699f97e42be80100000006ac53536552acdee04cc01d205fd8a3687fdf265b064d42ab38046d76c736aad8865ca210824b7c622ecf02000000070065006a536a6affffffff01431c5d010000000000270d48ee", + "", + 1, + 921554116, + "ff9d7394002f3f196ea25472ea6c46f753bd879a7244795157bb7235c9322902" + ], + [ + "aac18f2b02b144ed481557c53f2146ae523f24fcde40f3445ab0193b6b276c315dc2894d2300000000075165650000636a233526947dbffc76aec7db1e1baa6868ad4799c76e14794dcbaaec9e713a83967f6a65170200000003ac655127d518be01b652a30000000000015300000000", + "52ac5353", + 1, + 1559377136, + "59fc2959bb7bb24576cc8a237961ed95bbb900679d94da6567734c4390cb6ef5" + ], + [ + "5ab79881033555b65fe58c928883f70ce7057426fbdd5c67d7260da0fe8b1b9e6a2674cb850300000009ac516aac6aac006a6affffffffa5be9223b43c2b1a4d120b5c5b6ec0484f637952a3252181d0f8e813e76e11580200000000e4b5ceb8118cb77215bbeedc9a076a4d087bb9cd1473ea32368b71daeeeacc451ec209010000000005acac5153aced7dc34e02bc5d11030000000005ac5363006a5418580300000000045200636a00000000", + "5100", + 1, + 1927062711, + "e7c0e71008397a8e6ca596d1439a0b8434142c532ee912052f7c364a1b258af3" + ], + [ + "6c2c8fac0124b0b7d4b610c3c5b91dee32b7c927ac71abdf2d008990ca1ac40de0dfd530660300000004ac5253656bd7eada01d847ec000000000004ac52006af4232ec8", + "6a6a6a0051", + 0, + -340809707, + "fb51eb9d7e47d32ff2086205214f90c7c139e08c257a64829ae4d2b301071c6a" + ], + [ + "6e3880af031735a0059c0bb5180574a7dcc88e522c8b56746d130f8d45a52184045f96793e0100000007acac6a526a6553fffffffffe05f14cdef7d12a9169ec0fd37524b5fcd3295f73f48ca35a36e671da4a2f560000000007006a526a635163ffffffffdfbd869ac9e472640a84caf28bdd82e8c6797f42d03b99817a705a24fde2736600000000010090a090a503db956b04000000000752ac536a536a63358390010000000009656a5200525153ac65353ee204000000000663530052526aa6ad83fb", + "5351516300", + 2, + 222014018, + "57a34ddeb1bf36d28c7294dda0432e9228a9c9e5cc5c692db98b6ed2e218d825" + ], + [ + "8df1cd19027db4240718dcaf70cdee33b26ea3dece49ae6917331a028c85c5a1fb7ee3e4750200000007656a00510063636157988bc84d8d55a8ba93cdea001b9bf9d0fa65b5db42be6084b5b1e1556f3602f65d4d0100000004ac000052206c852902b2fb54030000000008ac5252536aacac5378c4a5050000000006acac535163532784439e", + "ac6a", + 0, + 1105620132, + "0ff3d774137adecc3c0cf90888944050ca9d9e7940ca65f3c3b3fd93bdcc153b" + ], + [ + "0e803682024f79337b25c98f276d412bc27e56a300aa422c42994004790cee213008ff1b8303000000080051ac65ac655165f421a331892b19a44c9f88413d057fea03c3c4a6c7de4911fe6fe79cf2e9b3b10184b1910200000005525163630096cb1c670398277204000000000253acf7d5d502000000000863536a6a636a5363381092020000000002ac6a911ccf32", + "6565", + 1, + -1492094009, + "13e7129bfe4915b7aac2f46970b42a22864128b29985c527bce333de6490252d" + ], + [ + "7d71669d03022f9dd90edac323cde9e56354c6804c6b8e687e9ae699f46805aafb8bcaa636000000000153ffffffff698a5fdd3d7f2b8b000c68333e4dd58fa8045b3e2f689b889beeb3156cecdb4903000000065253530051acc53f0aa821cdd69b473ec6e6cf45cf9b38996e1c8f52c27878a01ec8bb02e8cb31ad24e5000000000453530052ffffffff0447a23401000000000365535133aaa0030000000006515163656563057d110300000000056a6aacac52cf13b5000000000003526a5100000000", + "6a6a51", + 1, + -1349253507, + "a86f24204949bed01f885373e045b2b6419a2cd78606025a296b29076d3f072b" + ], + [ + "9ff618e60136f8e6bb7eabaaac7d6e2535f5fba95854be6d2726f986eaa9537cb283c701ff02000000026a65ffffffff012d1c090500000000076500ac6a516a652f9ad240", + "51515253635351ac", + 0, + 1571304387, + "6083b26d56c3bcb1f7e297d8ecf25aa9a344be79731fabac28fb9e70dc1d0af7" + ], + [ + "9fbd43ac025e1462ecd10b1a9182a8e0c542f6d1089322a41822ab94361e214ed7e1dfdd8a020000000263519d0437581538e8e0b6aea765beff5b4f3a4a202fca6e5d19b34c141078c6688f71ba5b8e0100000003ac6552ffffffff02077774050000000008655153655263ac6a0ae4e10100000000035152524c97136b", + "635152", + 0, + 1969622955, + "9401df7836c71e2b66bf7b9d185eb019a6d4e14a6dfcde3f4e2561a69b0a5a4f" + ], + [ + "0117c92004314b84ed228fc11e2999e657f953b6de3b233331b5f0d0cf40d5cc149b93c7b30300000005515263516a083e8af1bd540e54bf5b309d36ba80ed361d77bbf4a1805c7aa73667ad9df4f97e2da4100200000004006351524d04f2179455e794b2fcb3d214670001c885f0802e4b5e015ed13a917514a7618f5f332203000000076a536a51000063ecf029e65a4a009a5d67796c9f1eb358b0d4bd2620c8ad7330fb98f5a802ab92d0038b1002000000036a6551a184a88804b044900000000000086a5152535165526a33d1ab020000000001518e92320000000000002913df04000000000852ac6353525353ac8b19bfdf", + "0000510000", + 0, + 489433059, + "676ee365c8d71b7cc0fee1c6cddbcea8b3d4801907967892b7cd540f51dbdbc7" + ], + [ + "e7f5482903f98f0299e0984b361efb2fddcd9979869102281e705d3001a9d283fe9f3f3a1e02000000025365ffffffffcc5c7fe82feebad32a22715fc30bc584efc9cd9cadd57e5bc4b6a265547e676e0000000000579d21235bc2281e08bf5e7f8f64d3afb552839b9aa5c77cf762ba2366fffd7ebb74e4940000000004526363633df82cf40100982e05000000000453ac535300000000", + "acac", + 2, + -1362931214, + "046de666545330e50d53083eb78c9336416902f9b96c77cc8d8e543da6dfc7e4" + ], + [ + "09adb2e90175ca0e816326ae2dce7750c1b27941b16f6278023dbc294632ab97977852a09d030000000365006affffffff027739cf010000000005515163ac658a5bb601000000000653ac5151520011313cdc", + "ac", + 0, + -76831756, + "0adbc09696f17422eada9c7a29968dc3675e45e2ce3eca3db3d2a397a7b91d1e" + ], + [ + "f973867602e30f857855cd0364b5bbb894c049f44abbfd661d7ae5dbfeaafca89fac8959c2010000000452536a51ffffffffbeceb68a4715f99ba50e131884d8d20f4a179313691150adf0ebf29d05f877030300000005635200ac63ffffffff021fddb90000000000036a656322a177000000000008526500ac5100acac84839083", + "52ac53ac", + 0, + 1407879325, + "db0329439490efc64b7104d6d009b03fbc6fac597cf54fd786fbbb5fd73b92b4" + ], + [ + "fd22ebaa03bd588ad16795bea7d4aa7f7d48df163d75ea3afebe7017ce2f350f6a0c1cb0bb00000000076aac5153526363ffffffff488e0bb22e26a565d77ba07178d17d8f85702630ee665ec35d152fa05af3bda10200000003515163ffffffffeb21035849e85ad84b2805e1069a91bb36c425dc9c212d9bae50a95b6bfde12003000000005df262fd02b698480400000000076363636a6363ace23bf2010000000007655263635253534348c1da", + "006353526563516a00", + 0, + -1491036196, + "7810715dcb7dddf5a7b3d07d600ab6b783f5ffe9b798de359e30318ed219b0b9" + ], + [ + "130b462d01dd49fac019dc4442d0fb54eaa6b1c2d1ad0197590b7df26969a67abd7f3fbb4f0100000006ac65ac536563ffffffff0345f825000000000004ac53acac9d5816020000000000eff8e90500000000076a006552ac6a53a892dc55", + "0065ac530052", + 0, + 944483412, + "42e985a31427d6d955e3d1a6fd0b90bfc9813123184a0c52870be30b1514249b" + ], + [ + "f8e50c2604609be2a95f6d0f31553081f4e1a49a0a30777fe51eb1c596c1a9a92c053cf28c0300000009656a51ac5252630052fffffffff792ed0132ae2bd2f11d4a2aab9d0c4fbdf9a66d9ae2dc4108afccdc14d2b17001000000066a6563ac636a7bfb2fa116122b539dd6a2ab089f88f3bc5923e5050c8262c112ff9ce0a3cd51c6e3e84f02000000066551ac5352650d5e687ddf4cc9a497087cabecf74d236aa4fc3081c3f67b6d323cba795e10e7a171b725000000000752635351635100ffffffff02df5409020000000007ac6a53ac5151004156990200000000045163655200000000", + "ac53ac65005300", + 0, + -173065000, + "3b2e2a9eda4604ceaf2f624c4337e5e129bef26fa7d57ae3dced4d489b5558bc" + ], + [ + "18020dd1017f149eec65b2ec23300d8df0a7dd64fc8558b36907723c03cd1ba672bbb0f51d0300000003656a63ffffffff037cd7ae000000000008516a65005352ac65f1e4360400000000056353530053f118f0040000000007536363006500ac00000000", + "6351ac52ac", + 0, + -550412404, + "9cf6941ac66b36b66470096e1531c2ee48f52a989eab3113ff6603fb21d2c65d" + ], + [ + "b04154610363fdade55ceb6942d5e5a723323863b48a0cb04fdcf56210717955763f56b08d0300000009ac526a525151635151ffffffff93a176e76151a9eabdd7af00ef2af72f9e7af5ecb0aa4d45d00618f394cdd03c030000000074d818b332ebe05dc24c44d776cf9d275c61f471cc01efce12fd5a16464157f1842c65cb00000000066a0000ac6352d3c4134f01d8a1c0030000000005520000005200000000", + "5200656a656351", + 2, + -9757957, + "6e3e5ba77f760b6b5b5557b13043f1262418f3dd2ce7f0298b012811fc8ad5bc" + ], + [ + "9794b3ce033df7b1e32db62d2f0906b589eacdacf5743963dc2255b6b9a6cba211fadd0d4102000000050000650065ffffffffaae00687a6a4131152bbcaafedfaed461c86754b0bde39e2bef720e6d1860a0302000000070065516aac6552ffffffff50e4ef784d6230df7486e972e8918d919f005025bc2d9aacba130f58bed705670300000006526552656a52ffffffff02c6f1a9000000000006005251006363cf450c04000000000563510053ac00000000", + "ac0063515353", + 1, + 2063905082, + "e2f8dafd539773d4777e616143bcbccd472ce098b4a101cd49683425e5c3f93c" + ], + [ + "94533db7015e70e8df715066efa69dbb9c3a42ff733367c18c22ff070392f988f3b93920820000000006535363636300ce4dac3e03169af80300000000080065ac6a53ac65ac39c050020000000004ac6aacac708a02050000000005ac5251520000000000", + "6553", + 0, + -360458507, + "08fe618910ff2d1c270d96f9d1cb1c68e8b1ab1f5d26ad9388fbf8b1f58dfc2d" + ], + [ + "c8597ada04f59836f06c224a2640b79f3a8a7b41ef3efa2602592ddda38e7597da6c639fee0300000008005251635351acacffffffff4c518f347ee694884b9d4072c9e916b1a1f0a7fc74a1c90c63fdf8e5a185b6ae02000000007113af55afb41af7518ea6146786c7c726641c68c8829a52925e8d4afd07d8945f68e72303000000050065650063ffffffffc28e46d7598312c420e11dfaae12add68b4d85adb182ae5b28f8340185394b63000000000165ffffffff04dbabb7010000000000ee2f600000000000065265006a51acb62a27000000000009ac53515300ac006a6345fb7505000000000752516a0051636a00000000", + "", + 3, + 15199787, + "cd4f3268df778f8fb132366883781b308f267771d118024b629cec0a0685c7f8" + ], + [ + "1a28c4f702c8efaad96d879b38ec65c5283b5c084b819ad7db1c086e85e32446c7818dc7a90300000008656351536a525165fa78cef86c982f1aac9c5eb8b707aee8366f74574c8f42ef240599c955ef4401cf578be30200000001518893292204c430eb0100000000016503138a0300000000030053ac60e0eb01000000000452520063567c2d03000000000252006cf81e85", + "51525152", + 1, + 2118315905, + "23f71f7ecb680b47b6d6bd60112a2508b984ed08264369b80adefa8bcec2371c" + ], + [ + "c6c7a87003f772bcae9f3a0ac5e499000b68703e1804b9ddc3e73099663564d53ddc4e1c6e01000000076a536a6aac63636e3102122f4c30056ef8711a6bf11f641ddfa6984c25ac38c3b3e286e74e839198a80a34010000000165867195cd425821dfa2f279cb1390029834c06f018b1e6af73823c867bf3a0524d1d6923b0300000003ac5365ffffffff02fa4c49010000000007656a0052650053e001100400000000008836d972", + "ac526351ac", + 1, + 978122815, + "04a7e4200829e5191788b8fdbda836c7bd3882108a5928e9c392b12bf63a63c6" + ], + [ + "0ea580ac04c9495ab6af3b8d59108bb4194fcb9af90b3511c83f7bb046d87aedbf8423218e02000000075152acac0063639063d7dc25704e0caa5edde1c6f2dd137ded379ff597e055b2977b9c559b07a7134fcef2000000000200aca89e50181f86e9854ae3b453f239e2847cf67300fff802707c8e3867ae421df69274449402000000026365ffffffff47a4760c881a4d7e51c69b69977707bd2fb3bcdc300f0efc61f5840e1ac72cee0000000000ffffffff0460179a0200000000025352a5250c0500000000076565acac6365526c281e02000000000952635100ac006563654e55070400000000046552526500000000", + "526563acac53", + 2, + 1426964167, + "01d5b9ace8a53b2dbe73087b6d3e036f3c63d68e1c1ffccfeee38343d1a9ebc0" + ], + [ + "c33028b301d5093e1e8397270d75a0b009b2a6509a01861061ab022ca122a6ba935b8513320200000000ffffffff013bcf5a0500000000015200000000", + "", + 0, + -513413204, + "6b1459536f51482f5dbf42d7e561896557461e1e3b6bf67871e2b51faae2832c" + ], + [ + "43b2727901a7dd06dd2abf690a1ccedc0b0739cb551200796669d9a25f24f71d8d101379f50300000000ffffffff0418e031040000000000863d770000000000085352ac526563ac5174929e040000000004ac65ac00ec31ac0100000000036a515300000000", + "65", + 0, + -492874289, + "80a102bb4b5936218b262722a40f3df2b092689d36b54641a8a07a0629559d04" + ], + [ + "4763ed4401c3e6ab204bed280528e84d5288f9cac5fb8a2e7bd699c7b98d4df4ac0c40e55303000000056a6aac5165ffffffff015b57f80400000000046a63535100000000", + "ac5153", + 0, + -592611747, + "849033a2321b5755e56ef4527ae6f51e30e3bca50149d5707368479723d744f8" + ], + [ + "d24f647b02f71708a880e6819a1dc929c1a50b16447e158f8ff62f9ccd644e0ca3c592593702000000050053536a00ffffffff67868cd5414b6ca792030b18d649de5450a456407242b296d936bcf3db79e07b02000000005af6319c016022f50100000000036a516300000000", + "6a526353516a6a", + 0, + 1350782301, + "8556fe52d1d0782361dc28baaf8774b13f3ce5ed486ae0f124b665111e08e3e3" + ], + [ + "fe6ddf3a02657e42a7496ef170b4a8caf245b925b91c7840fd28e4a22c03cb459cb498b8d603000000065263656a650071ce6bf8d905106f9f1faf6488164f3decac65bf3c5afe1dcee20e6bc3cb6d052561985a030000000163295b117601343dbb0000000000026563dba521df", + "", + 1, + -1696179931, + "d9684685c99ce48f398fb467a91a1a59629a850c429046fb3071f1fa9a5fe816" + ], + [ + "c61523ef0129bb3952533cbf22ed797fa2088f307837dd0be1849f20decf709cf98c6f032f03000000026563c0f1d378044338310400000000066363516a5165a14fcb0400000000085163536a6a0053657271d60200000000001d953f0500000000010000000000", + "53516353005153", + 0, + 1141615707, + "c9baf4ad5cb4d9487325e0560015ade3aaa5862caad30a578bf1b1e1201c5a15" + ], + [ + "ba3dac6c0182562b0a26d475fe1e36315f0913b6869bdad0ecf21f1339a5fcbccd32056c840200000000ffffffff04300351050000000000220ed405000000000751ac636565ac53dbbd19020000000007636363ac6a52acbb005a0500000000016abd0c78a8", + "63006a635151005352", + 0, + 1359658828, + "32ebab9ecff27e8eb1cd55b2f99c5a1358ab2770b967bb3b204cabf4f694dc06" + ], + [ + "ac27e7f5025fc877d1d99f7fc18dd4cadbafa50e34e1676748cc89c202f93abf36ed46362101000000026300ffffffff958cd5381962b765e14d87fc9524d751e4752dd66471f973ed38b9d562e525620100000003006500ffffffff02b67120050000000004ac51516adc330c0300000000015200000000", + "656352", + 1, + 15049991, + "f3374253d64ac264055bdbcc32e27426416bd595b7c7915936c70f839e504010" + ], + [ + "edb30140029182b80c8c3255b888f7c7f061c4174d1db45879dca98c9aab8c8fed647a6ffc03000000076a535100526300ffffffff82f65f261db62d517362c886c429c8fbbea250bcaad93356be6f86ba573e9d930100000000ffffffff04daaf150400000000016a86d1300100000000096a6353535252ac5165d4ddaf0000000000005f1c6201000000000000000000", + "6a6a00ac", + 0, + -2058017816, + "8c92ba3a1c12176fde460f5c6e64a8051ac9a7cf3227205f4f12abf455792743" + ], + [ + "7e50207303146d1f7ad62843ae8017737a698498d4b9118c7a89bb02e8370307fa4fada41d000000000753006300005152b7afefc85674b1104ba33ef2bf37c6ed26316badbc0b4aa6cb8b00722da4f82ff3555a6c020000000900ac656363ac51ac52ffffffff93fab89973bd322c5d7ad7e2b929315453e5f7ada3072a36d8e33ca8bebee6e0020000000200ac930da52b04384b04000000000004650052ac435e380200000000066a6a5152636aa94947050000000005006a525252af8ba90100000000086565ac526353536a279b17ad", + "acac005263536aac63", + 1, + -34754133, + "2ca4ef0aa28999eda6806954e067800103f8afd4d441bca5de983d90ef25a4be" + ], + [ + "c05764f40244fb4ebe4c54f2c5298c7c798aa90e62c29709acca0b4c2c6ec08430b26167440100000007ac6a6565005253ffffffffc02c2418f398318e7f34a3cf669d034eef2111ea95b9f0978b01493293293a870100000000e563e2e00238ee8d040000000001ac03fb060200000000076500ac656a516aa37f5534", + "526a0065", + 1, + -2033176648, + "60f9b69157ab40d2cc46ed69a66f5d0092f3d66e60518cd8f66b5a5410e35b93" + ], + [ + "5a59e0b9040654a3596d6dab8146462363cd6549898c26e2476b1f6ae42915f73fd9aedfda00000000026363ffffffff9ac9e9ca90be0187be2214251ff08ba118e6bf5e2fd1ba55229d24e50a510d53010000000165ffffffff41d42d799ac4104644969937522873c0834cc2fcdab7cdbecd84d213c0e96fd60000000000ffffffffd838db2c1a4f30e2eaa7876ef778470f8729fcf258ad228b388df2488709f8410300000000fdf2ace002ceb6d903000000000265654c1310040000000003ac00657e91c0ec", + "536a63ac", + 0, + 82144555, + "98ccde2dc14d14f5d8b1eeea5364bd18fc84560fec2fcea8de4d88b49c00695e" + ], + [ + "156ebc8202065d0b114984ee98c097600c75c859bfee13af75dc93f57c313a877efb09f230010000000463536a51ffffffff81114e8a697be3ead948b43b5005770dd87ffb1d5ccd4089fa6c8b33d3029e9c03000000066a5251656351ffffffff01a87f140000000000050000ac51ac00000000", + "00", + 0, + -362221092, + "a903c84d8c5e71134d1ab6dc1e21ac307c4c1a32c90c90f556f257b8a0ec1bf5" + ], + [ + "15e37793023c7cbf46e073428908fce0331e49550f2a42b92468827852693f0532a01c29f70200000007005353636351acffffffff38426d9cec036f00eb56ec1dcd193647e56a7577278417b8a86a78ac53199bc403000000056353006a53ffffffff04a25ce1030000000008005365656a526a63c8eff7030000000004526353537ab6db0200000000016a11a3fa02000000000551acac526500000000", + "53ac6a6a6551", + 0, + 1117532791, + "9e317a57a2e60095fcee7a1ce0e5cbb0246890ab08e768146f4c887696bc5612" + ], + [ + "f7a09f10027250fc1b70398fb5c6bffd2be9718d3da727e841a73596fdd63810c9e4520a6a010000000963ac516a636a65acac1d2e2c57ab28d311edc4f858c1663972eebc3bbc93ed774801227fda65020a7ec1965f780200000005ac5252516a8299fddc01dcbf7200000000000463ac6551960fda03", + "65ac51", + 1, + 2017321737, + "9c5fa02abfd34d0f9dec32bf3edb1089fca70016debdb41f4f54affcb13a2a2a" + ], + [ + "6d97a9a5029220e04f4ccc342d8394c751282c328bf1c132167fc05551d4ca4da4795f6d4e02000000066a0052525165ffffffff9516a205e555fa2a16b73e6db6c223a9e759a7e09c9a149a8f376c0a7233fa1b0100000005ac5163ac6affffffff04868aed04000000000652ac65ac536a396edf01000000000044386c0000000000066a5363655200894d480100000000008ebefc23", + "6351526aac51", + 1, + 1943666485, + "ac9b46f03e0858377fff5f395acf3f08702c8a41484d84a31a088ff3c19a982a" + ], + [ + "8e3fddfb028d9e566dfdda251cd874cd3ce72e9dde837f95343e90bd2a93fe21c5daeb5eed01000000045151525140517dc818181f1e7564b8b1013fd68a2f9a56bd89469686367a0e72c06be435cf99db750000000003635251ffffffff01c051780300000000066552ac6a65ac099766eb", + "51636a5251", + 1, + 1296295812, + "370f619d5471ed61c9839befe350bd7cb313cbb5306cab8d388ceb9010194061" + ], + [ + "a603f37b02a35e5f25aae73d0adc0b4b479e68a734cf722723fd4e0267a26644c36faefdab0200000000ffffffff43374ad26838bf733f8302585b0f9c22e5b8179888030de9bdda180160d770650200000001004c7309ce01379099040000000005526552536500000000", + "005153", + 0, + 1409936559, + "4ca73da4fcd5f1b10da07998706ffe16408aa5dff7cec40b52081a6514e3827e" + ], + [ + "9eeedaa8034471a3a0e3165620d1743237986f060c4434f095c226114dcb4b4ec78274729f03000000086a5365510052ac6afb505af3736e347e3f299a58b1b968fce0d78f7457f4eab69240cbc40872fd61b5bf8b120200000002ac52df8247cf979b95a4c97ecb8edf26b3833f967020cd2fb25146a70e60f82c9ee4b14e88b103000000008459e2fa0125cbcd05000000000000000000", + "525352006353516a", + 0, + -1832576682, + "fb018ae54206fdd20c83ae5873ec82b8e320a27ed0d0662db09cda8a071f9852" + ], + [ + "05921d7c048cf26f76c1219d0237c226454c2a713c18bf152acc83c8b0647a94b13477c07f0300000003ac526afffffffff2f494453afa0cabffd1ba0a626c56f90681087a5c1bd81d6adeb89184b27b7402000000036a6352ffffffff0ad10e2d3ce355481d1b215030820da411d3f571c3f15e8daf22fe15342fed04000000000095f29f7b93ff814a9836f54dc6852ec414e9c4e16a506636715f569151559100ccfec1d100000000055263656a53ffffffff04f4ffef010000000005ac6a6aac6a0e6689040000000004536a5352e364d00500000000086553636365525153807e00010000000003526a63f18003e3", + "6363ac51", + 3, + -375891099, + "bfeb34159406429091ebe4d68ec673f65e6a54798a594569964495783011645f" + ], + [ + "b9b44d9f04b9f15e787d7704e6797d51bc46382190c36d8845ec68dfd63ee64cf7a467b21e00000000086aac00530052636aba1bcb110a80c5cbe073f12c739e3b20836aa217a4507648d133a8eedd3f02cb55c132b203000000076a000063526352b1c288e3a9ff1f2da603f230b32ef7c0d402bdcf652545e2322ac01d725d75f5024048ad0100000000ffffffffffd882d963be559569c94febc0ef241801d09dc69527c9490210f098ed8203c700000000046a0063009109298d01719d9a0300000000056a52006365d7894c5b", + "ac6351650063636a", + 3, + -622355349, + "4b44026001afd9caac7b2e830fea359eec537d7fe5e4fc06d21d982f783cfca3" + ], + [ + "ff60473b02574f46d3e49814c484081d1adb9b15367ba8487291fc6714fd6e3383d5b335f001000000026a6ae0b82da3dc77e5030db23d77b58c3c20fa0b70aa7d341a0f95f3f72912165d751afd57230300000008ac536563516a6363ffffffff04f86c0200000000000453ac636ab13111000000000003510065f0d3f305000000000651516a65516a730a3a010000000002515200000000", + "ac6a", + 1, + 1895032314, + "93c76c0ef11fe28a4f36546d4281a5b26185f7ea00395dd72630d461f285370d" + ], + [ + "f218026204f4f4fc3d3bd0eada07c57b88570d544a0436ae9f8b753792c0c239810bb30fbc0200000002536affffffff8a468928d6ec4cc10aa0f73047697970e99fa64ae8a3b4dca7551deb0b639149010000000751520052650051ffffffffa98dc5df357289c9f6873d0f5afcb5b030d629e8f23aa082cf06ec9a95f3b0cf0000000000ffffffffea2c2850c5107705fd380d6f29b03f533482fd036db88739122aac9eff04e0aa010000000365536a03bd37db034ac4c4020000000007515152655200ac33b27705000000000151efb71e0000000000007b65425b", + "515151", + 3, + -1772252043, + "de35c84a58f2458c33f564b9e58bc57c3e028d629f961ad1b3c10ee020166e5a" + ], + [ + "48e7d42103b260b27577b70530d1ac2fed2551e9dd607cbcf66dca34bb8c03862cf8f5fd5401000000065151526aac00ffffffff1e3d3b841552f7c6a83ee379d9d66636836673ce0b0eda95af8f2d2523c91813030000000665acac006365ffffffff388b3c386cd8c9ef67c83f3eaddc79f1ff910342602c9152ffe8003bce51b28b0100000008636363006a636a52ffffffff04b8f67703000000000852005353ac6552520cef7202000000000551516352005096d6030000000005516a005100662582020000000001ac6c137280", + "6a65", + 1, + 1513618429, + "0f4d20fe1b05fd48b8e7f9b6b5be8b308e320356da0559e6a80e4a810ec189b6" + ], + [ + "91ebc4cf01bc1e068d958d72ee6e954b196f1d85b3faf75a521b88a78021c543a06e0562790000000001657c12df0503832121030000000000cc41a60100000000045263516540a9510500000000046365acac00000000", + "526a0065636a6a6aac", + 0, + -614046478, + "cb6e63ffc060ffe615a29ee1049f213eaebe89ab016fba01ca17cdd84275928d" + ], + [ + "3cd4474201be7a6c25403bf00ca62e2aa8f8f4f700154e1bb4d18c66f7bb7f9b975649f0dc0100000006535151535153ffffffff01febbeb000000000006005151006aac00000000", + "", + 0, + -1674687131, + "6b77ca70cc452cc89acb83b69857cda98efbfc221688fe816ef4cb4faf152f86" + ], + [ + "92fc95f00307a6b3e2572e228011b9c9ed41e58ddbaefe3b139343dbfb3b34182e9fcdc3f50200000001ac847bf1935fde8bcfe41c7dd99683289292770e7f163ad09deff0e0665ed473cd2b56b0f403000000055165516351294dab312dd87b9327ce2e95eb44b712cfae0e50fda15b07816c8282e8365b643390eaab01000000026aacffffffff016e0b6b040000000001ac00000000", + "650065acac005300", + 2, + -1885164012, + "bd7d26bb3a98fc8c90c972500618bf894cb1b4fe37bf5481ff60eef439d3b970" + ], + [ + "4db591ab018adcef5f4f3f2060e41f7829ce3a07ea41d681e8cb70a0e37685561e4767ac3b0000000004000052acd280e63601ae6ef20000000000036a636326c908f7", + "ac6a51526300630052", + 0, + 862877446, + "355ccaf30697c9c5b966e619a554d3323d7494c3ea280a9b0dfb73f953f5c1cb" + ], + [ + "503fd5ef029e1beb7b242d10032ac2768f9a1aca0b0faffe51cec24770664ec707ef7ede4f01000000045253ac53375e350cc77741b8e96eb1ce2d3ca91858c052e5f5830a0193200ae2a45b413dda31541f0000000003516553ffffffff0175a5ba0500000000015200000000", + "6a6551005365", + 1, + 1603081205, + "353ca9619ccb0210ae18b24d0e57efa7abf8e58fa6f7102738e51e8e72c9f0c4" + ], + [ + "c80abebd042cfec3f5c1958ee6970d2b4586e0abec8305e1d99eb9ee69ecc6c2cbd76374380000000007ac53006300ac510acee933b44817db79320df8094af039fd82111c7726da3b33269d3820123694d849ee5001000000046a65526562699bea8530dc916f5d61f0babea709dac578774e8a4dcd9c640ec3aceb6cb2443f24f302000000020063ea780e9e57d1e4245c1e5df19b4582f1bf704049c5654f426d783069bcc039f2d8fa659f03000000075153635200006a8d00de0b03654e8500000000000363635178ebbb0400000000045100636a239f1d030000000005006300536500000000", + "6565ac515100", + 3, + 1460851377, + "a28742f90c8b9590b7f0c7f747abb6092b4256e3d62cb67fd21884655d86a502" + ], + [ + "0337b2d5043eb6949a76d6632b8bb393efc7fe26130d7409ef248576708e2d7f9d0ced9d3102000000075352636a5163007034384dfa200f52160690fea6ce6c82a475c0ef1caf5c9e5a39f8f9ddc1c8297a5aa0eb02000000026a51ffffffff38e536298799631550f793357795d432fb2d4231f4effa183c4e2f61a816bcf0030000000463ac5300706f1cd3454344e521fde05b59b96e875c8295294da5d81d6cc7efcfe8128f150aa54d6503000000008f4a98c704c1561600000000000072cfa6000000000000e43def01000000000100cf31cc0500000000066365526a6500cbaa8e2e", + "", + 3, + 2029506437, + "7615b4a7b3be865633a31e346bc3db0bcc410502c8358a65b8127089d81b01f8" + ], + [ + "59f6cffd034733f4616a20fe19ea6aaf6abddb30b408a3a6bd86cd343ab6fe90dc58300cc90200000000ffffffffc835430a04c3882066abe7deeb0fa1fdaef035d3233460c67d9eabdb05e95e5a02000000070065ac53535300ffffffff4b9a043e89ad1b4a129c8777b0e8d87a014a0ab6a3d03e131c27337bbdcb43b402000000056a5100ac6ad9e9bf62014bb118010000000001526cbe484f", + "52635265", + 0, + 2103515652, + "4f2ccf981598639bec57f885b4c3d8ea8db445ea6e61cfd45789c69374862e5e" + ], + [ + "cbc79b10020b15d605680a24ee11d8098ad94ae5203cb6b0589e432832e20c27b72a926af2030000000565516a53acbb854f3146e55c508ece25fa3d99dbfde641a58ed88c051a8a51f3dacdffb1afb827814b02000000026352c43e6ef30302410a020000000000ff4bd9010000000005510063000008aa8e0400000000085265526565ac5365c52c8a77", + "53526aac0051", + 0, + 202662340, + "cc48e8f7f8908dfb6fd0180127067dbc8d86fb4c26462f6e8821d3d32b59eefa" + ], + [ + "7c07419202fa756d29288c57b5c2b83f3c847a807f4a9a651a3f6cd6c46034ae0aa3a7446b02000000036a6365ffffffff9da83cf4219bb96c76f2d77d5df31c1411a421171d9b59ec02e5c1218f29935403000000008c13879002f8b1ac0400000000086a63536a636553653c584f02000000000000000000", + "ac53656363", + 1, + -1038419525, + "4a74f365a161bc6c9bddd249cbd70f5dadbe3de70ef4bd745dcb6ee1cd299fbd" + ], + [ + "351cbb57021346e076d2a2889d491e9bfa28c54388c91b46ee8695874ad9aa576f1241874d02000000076563525300516affffffffe13e61b8880b8cd52be4a59e00f9723a4722ea58013ec579f5b3693b9e115b1100000000086363ac5252635351ffffffff027fee020400000000066a5200006a65b85f130200000000066a526300535200000000", + "6a65", + 1, + 586415826, + "69009f5b17ae2a8ad2b8b5fad4e3355bb3c77b93bd5c768f1782e5c95945d0c4" + ], + [ + "a8252ea903f1e8ff953adb16c1d1455a5036222c6ea98207fc21818f0ece2e1fac310f9a0100000000095163ac635363ac0000be6619e9fffcde50a0413078821283ce3340b3993ad00b59950bae7a9f931a9b0a3a035f010000000463005300b8b0583fbd6049a1715e7adacf770162811989f2be20af33f5f60f26eba653dc26b024a00000000006525351636552ffffffff046d2acc030000000002636a9a2d43050000000006006500516553ecf63204000000000052b9ed050000000007acacac5365656500000000", + "6553635253636a51", + 2, + 1442639059, + "4cbb84b6b3b1c388c2494dcc035eb308908aab1e25e0bb7b157e209c02d9d7e1" + ], + [ + "2f1a425c0471a5239068c4f38f9df135b1d24bf52d730d4461144b97ea637504495aec360801000000055300515365c71801dd1f49f376dd134a9f523e0b4ae611a4bb122d8b26de66d95203f181d09037974300000000025152ffffffff9bdcea7bc72b6e5262e242c94851e3a5bf8f314b3e5de0e389fc9e5b3eadac030000000009525265655151005153ffffffffdbb53ce99b5a2320a4e6e2d13b01e88ed885a0957d222e508e9ec8e4f83496cb0200000006635200ac63ac04c96237020cc5490100000000080000516a51ac6553074a360200000000025152225520ca", + "655165ac65516a", + 1, + -489869549, + "9bc5bb772c553831fb40abe466074e59a469154679c7dee042b8ea3001c20393" + ], + [ + "ef3acfd4024defb48def411b8f8ba2dc408dc9ee97a4e8bde4d6cb8e10280f29c98a6e8e9103000000035100513d5389e3d67e075469dfd9f204a7d16175653a149bd7851619610d7ca6eece85a516b2df0300000005516aac6552ca678bdf02f477f003000000000057e45b0300000000055252525252af35c20a", + "5165ac53", + 1, + -1900839569, + "78eb6b24365ac1edc386aa4ffd15772f601059581c8776c34f92f8a7763c9ccf" + ], + [ + "ff4468dc0108475fc8d4959a9562879ce4ab4867a419664bf6e065f17ae25043e6016c70480100000000ffffffff02133c6f0400000000000bd0a8020000000004006a520035afa4f6", + "51ac65", + 0, + -537664660, + "f6da59b9deac63e83728850ac791de61f5dfcaeed384ebcbb20e44afcd8c8910" + ], + [ + "4e8594d803b1d0a26911a2bcdd46d7cbc987b7095a763885b1a97ca9cbb747d32c5ab9aa91030000000353ac53a0cc4b215e07f1d648b6eeb5cdbe9fa32b07400aa773b9696f582cebfd9930ade067b2b2000000000400656500fc99833216b8e27a02defd9be47fafae4e4a97f52a9d2a210d08148d2a4e5d02730bcd460100000004516351ac37ce3ae1033baa55040000000006006a636a63acc63c990400000000025265eb1919030000000005656a6a516a00000000", + "", + 1, + -75217178, + "04c5ee48514cd033b82a28e336c4d051074f477ef2675ce0ce4bafe565ee9049" + ], + [ + "a88830a7023f13ed19ab14fd757358eb6af10d6520f9a54923a6d613ac4f2c11e249cda8aa030000000551630065acffffffff8f5fe0bc04a33504c4b47e3991d25118947a0261a9fa520356731eeabd561dd3020000000163ffffffff038404bd0100000000065153516a6a63d33a5601000000000263004642dc020000000009655152acac636352004be6f3af", + "5253536565006a6a", + 0, + 1174417836, + "9d5c816ab81a761d2f573c323d3ab55c9401bb001eb4ef769eed69516e4f7e6c" + ], + [ + "44e1a2b4010762af23d2027864c784e34ef322b6e24c70308a28c8f2157d90d17b99cd94a401000000085163656565006300ffffffff0198233d020000000002000000000000", + "52525153656365", + 0, + 1119696980, + "d9096de94d70c6337da6202e6e588166f31bff5d51bb5adc9468594559d65695" + ], + [ + "44ca65b901259245abd50a745037b17eb51d9ce1f41aa7056b4888285f48c6f26cb97b7a25020000000452636363ffffffff047820350400000000030053ac14f3e603000000000552635100630ce66c03000000000001bdc704000000000765650065ac51ac3e886381", + "51", + 0, + -263340864, + "42b52a07a3c088ce7ec410b81144738aa546b9d61ec6ba38372fc455ec5d623d" + ], + [ + "cfa147d2017fe84122122b4dda2f0d6318e59e60a7207a2d00737b5d89694d480a2c26324b0000000006006351526552ffffffff0456b5b804000000000600516a525363166633000000000003655363254c0e020000000007526a6a00525151097c1b020000000009656a52ac6300530065ad0d6e50", + "6a535165ac6a536500", + 0, + -574683184, + "e9ada180722bbd10682062eda7ab2e8631c6ae9e72044a958c1d28d482bd5816" + ], + [ + "91c5d5f6022fea6f230cc4ae446ce040d8313071c5ac1749c82982cc1988c94cb1738aa48503000000016a19e204f30cb45dd29e68ff4ae160da037e5fc93538e21a11b92d9dd51cf0b5efacba4dd70000000005656a6aac51ffffffff03db126905000000000853006a536563636a36a273030000000006656a52656552b03ede00000000000352516500000000", + "530052526a00", + 1, + 1437328441, + "b99434064834b3027a2c60b7cdbc92797d4d030cc43293eff99462d2807f68b6" + ], + [ + "03f20dc202c886907b607e278731ebc5d7373c348c8c66cac167560f19b341b782dfb634cb03000000056a51ac6a63ea3e8de7adb9f599c9caba95aa3fa852e947fc88ed97ee50e0a0ec0d14d164f44c0115c101000000035153516fdd679e0414edbd000000000005ac636a53512021f2040000000007006a0051536a52c73db2050000000005525265ac5369046e000000000002006a1ef7bd1e", + "52656a", + 0, + 1360223035, + "c8673f91e4ef5861be4d2f0b8cfe2bd1fcc1d5332273f95ce37b0abf70bd99c1" + ], + [ + "d9611140036881b61e01627078512bc3378386e1d4761f959d480fdb9d9710bebddba2079d020000000563536a5153819271b41e228f5b04daa1d4e72c8e1955230accd790640b81783cfc165116a9f535a74c000000000163ffffffffa2e7bb9a28e810624c251ff5ba6b0f07a356ac082048cf9f39ec036bba3d431a02000000076a000000ac65acffffffff01678a820000000000085363515153ac635100000000", + "535353", + 2, + -82213851, + "52b9e0778206af68998cbc4ebdaad5a9469e04d0a0a6cef251abfdbb74e2f031" + ], + [ + "98b3a0bf034233afdcf0df9d46ac65be84ef839e58ee9fa59f32daaa7d684b6bdac30081c60200000004636351acffffffffc71cf82ded4d1593e5825618dc1d5752ae30560ecfaa07f192731d68ea768d0f0100000006650052636563f3a2888deb5ddd161430177ce298242c1a86844619bc60ca2590d98243b5385bc52a5b8f00000000085365acac520052ac50d4722801c3b8a60300000000035165517e563b65", + "51", + 1, + -168940690, + "b6b684e2d2ecec8a8dce4ed3fc1147f8b2e45732444222aa8f52d860c2a27a9d" + ], + [ + "97be4f7702dc20b087a1fdd533c7de762a3f2867a8f439bddf0dcec9a374dfd0276f9c55cc0300000000cdfb1dbe6582499569127bda6ca4aaff02c132dc73e15dcd91d73da77e92a32a13d1a0ba020000000151ffffffff048cfbe202000000000900516351515363ac535128ce0100000000056aac53656ac84e8302000000000763536a536a6552f051230500000000066aac535153510848d813", + "ac51", + 0, + 229541474, + "e5da9a416ea883be1f8b8b2d178463633f19de3fa82ae25d44ffb531e35bdbc8" + ], + [ + "085b6e04040b5bff81e29b646f0ed4a45e05890a8d32780c49d09643e69cdccb5bd81357670100000000ffffffffa5c981fe758307648e783217e3b4349e31a557602225e237f62b636ec26df1a803000000036500524792e1da2930cc90822a8d2a0a91ea343317bce5356b6aa8aae6c3956076aa33a5351a9c0300000003ac5265e27ddbcd472a2f13325cc6be40049d53f3e266ac082172f17f6df817db1936d9ff48c02b000000000152ffffffff021aa767050000000007535363516351ac14d584000000000001aca4d136cc", + "6a525300536352536a", + 0, + -1398925877, + "d94006873aaff72211399e719839923ed37469a9560e1925684de25e21d467d4" + ], + [ + "eec32fff03c6a18b12cd7b60b7bdc2dd74a08977e53fdd756000af221228fe736bd9c42d870100000007005353ac515265ffffffff037929791a188e9980e8b9cc154ad1b0d05fb322932501698195ab5b219488fc020000000600635100656a0bfc176aa7e84f771ea3d45a6b9c24887ceea715a0ff10ede63db8f089e97d927075b4f100000000025163ffffffff02eb933c000000000000262c420000000000036563632549c2b6", + "6352", + 2, + 1480445874, + "ff8a4016dfdd918f53a45d3a1f62b12c407cd147d68ca5c92b7520e12c353ff5" + ], + [ + "98ea7eac0313d9fb03573fb2b8e718180c70ce647bebcf49b97a8403837a2556cb8c9377f30000000004ac53ac65ffffffff8caac77a5e52f0d8213ef6ce998bedbb50cfdf108954771031c0e0cd2a78423900000000010066e99a44937ebb37015be3693761078ad5c73aa73ec623ac7300b45375cc8eef36087eb80000000007515352acac5100ffffffff0114a51b02000000000000000000", + "6aac", + 0, + 243527074, + "bad77967f98941af4dd52a8517d5ad1e32307c0d511e15461e86465e1b8b5273" + ], + [ + "3ab70f4604e8fc7f9de395ec3e4c3de0d560212e84a63f8d75333b604237aa52a10da17196000000000763526a6553ac63a25de6fd66563d71471716fe59087be0dde98e969e2b359282cf11f82f14b00f1c0ac70f02000000050052516aacdffed6bb6889a13e46956f4b8af20752f10185838fd4654e3191bf49579c961f5597c36c0100000004ac636363c3a1785bae5b8a1b4be5d0cbfadc240b4f7acaa7dfed6a66e852835df5eb9ac3c553766801000000036a65630733b7530218569602000000000852006a6a6a51ac52777f06030000000006ac0063530052c08267c9", + "000000536aac0000", + 1, + 1919096509, + "3b532797b54394c9e750f26c50be78d6a87adf6de9d6f9c14cc8039436ba3c10" + ], + [ + "bdb6b4d704af0b7234ced671c04ba57421aba7ead0a117d925d7ebd6ca078ec6e7b93eea6600000000026565ffffffff3270f5ad8f46495d69b9d71d4ab0238cbf86cc4908927fbb70a71fa3043108e6010000000700516a65655152ffffffff6085a0fdc03ae8567d0562c584e8bfe13a1bd1094c518690ebcb2b7c6ce5f04502000000085251530052536a53a576a37f2c516aad9911f687fe83d0ae7983686b6269b4dd54701cb5ce9ec91f0e6828390300000000ffffffff04cc76cc020000000002656a01ffb702000000000153534610040000000008ac006565516a00521f55f5040000000000389dfee9", + "6a525165", + 0, + 1336204763, + "c54f81901c4ed1c32d9779f3a8caa26ece5fb432ecd8fc354bf1615dfa9a8f7f" + ], + [ + "54258edd017d22b274fbf0317555aaf11318affef5a5f0ae45a43d9ca4aa652c6e85f8a040010000000853ac655251656500ffffffff03321d450000000000085265526a51526a529ede8b030000000003635151ce6065020000000001534c56ec1b", + "acac", + 0, + 2094130012, + "110d90fea9470dfe6c5048f45c3af5e8cc0cb77dd58fd13d338268e1c24b1ccc" + ], + [ + "ce0d322e04f0ffc7774218b251530a7b64ebefca55c90db3d0624c0ff4b3f03f918e8cf6f60300000003656500ffffffff9cce943872da8d8af29022d0b6321af5fefc004a281d07b598b95f6dcc07b1830200000004515351ac8d926410e69d76b7e584aad1470a97b14b9c879c8b43f9a9238e52a2c2fefc2001c56af80100000003005253cd2cd1fe192ce3a93b5478af82fa250c27064df82ba416dfb0debf4f0eb307a746b6928901000000086500acac6a0063514214524502947efc0200000000035251652c40340100000000086a6a52000052656a5231c54c", + "51", + 2, + -2090320538, + "90e78966cbb949a82119d6224b7c8e01351e6a254c2754bb652474f78dbf31e8" + ], + [ + "47ac54940313430712ebb32004679d3a512242c2b33d549bf5bbc8420ec1fd0850ed50eb6d0300000008536aac6a65acac51ffffffffb843e44266ce2462f92e6bff54316661048c8c17ecb092cb493b39bfca9117850000000001519ab348c05e74ebc3f67423724a3371dd99e3bceb4f098f8860148f48ad70000313c4c223000000000653006565656512c2d8dc033f3c97010000000002636aa993aa010000000005526365526ab7cf560300000000076a0065ac6a526500000000", + "0053525353006a", + 2, + 59531991, + "dac6f5660bb1e47992c421de295c569136dd6336c0df27979854a8d5c5b253db" + ], + [ + "233cd90b043916fc41eb870c64543f0111fb31f3c486dc72457689dea58f75c16ae59e9eb2000000000500536a6a6affffffff9ae30de76be7cd57fb81220fce78d74a13b2dbcad4d023f3cadb3c9a0e45a3ce000000000965ac6353ac5165515130834512dfb293f87cb1879d8d1b20ebad9d7d3d5c3e399a291ce86a3b4d30e4e32368a9020000000453005165ffffffff26d84ae93eb58c81158c9b3c3cbc24a84614d731094f38d0eea8686dec02824d0300000004636a65acf02c784001a0bd5d0300000000080065535165ac516a416ef503", + "", + 1, + -295106477, + "a4d897e990b5815939e2e5feb548879a13fbfe1c00b4bdade5eee79aeee2e1d7" + ], + [ + "9200e26b03ff36bc4bf908143de5f97d4d02358db642bd5a8541e6ff709c420d1482d471b7000000000665536a636553ffffffff61ba6d15f5453b5079fb494af4c48de713a0c3e7f6454d7450074a2a80cb6d880300000006ac6a005165515dfb7574fbce822892c2acb5d978188b1d65f969e4fe874b08db4c791d176113272a5cc10100000000ffffffff0420958d000000000009ac63516a0063516353dd885505000000000465ac00007b79e901000000000066d8bf010000000005525252006a00000000", + "ac5152", + 0, + 2089531339, + "89ec7fab7cfe7d8d7d96956613c49dc48bf295269cfb4ea44f7333d88c170e62" + ], + [ + "45f335ba01ce2073a8b0273884eb5b48f56df474fc3dff310d9706a8ac7202cf5ac188272103000000025363ffffffff049d8595020000000002656a8e98b1030000000002ac51f3a80603000000000752535151ac00000306e30300000000020051b58b2b3a", + "", + 0, + 1899564574, + "f2ba7ad9407ff28fd063c5c8cf3dbe2053380973c9b1128092af14585456d73f" + ], + [ + "d8f652a6043b4faeada05e14b81756cd6920cfcf332e97f4086961d49232ad6ffb6bc6c097000000000453526563ffffffff1ea4d60e5e91193fbbc1a476c8785a79a4c11ec5e5d6c9950c668ceacfe07a1502000000025251fffffffffe029a374595c4edd382875a8dd3f20b9820abb3e93f877b622598d11d0b09e503000000095351000052ac515152ffffffff9d65fea491b979699ceb13caf2479cd42a354bd674ded3925e760758e85a756803000000036365acffffffff0169001d00000000000651636a65656300000000", + "0063630000ac", + 3, + 1050965951, + "4cc85cbc2863ee7dbce15490d8ca2c5ded61998257b9eeaff968fe38e9f009ae" + ], + [ + "718662be026e1dcf672869ac658fd0c87d6835cfbb34bd854c44e577d5708a7faecda96e260300000004526a636a489493073353b678549adc7640281b9cbcb225037f84007c57e55b874366bb7b0fa03bdc00000000075165ac65ac00008ab7f2a802eaa53d000000000007acac516aac526ae92f380100000000056aac00536500000000", + "00", + 1, + 43296088, + "2d642ceee910abff0af2116af75b2e117ffb7469b2f19ad8fef08f558416d8f7" + ], + [ + "94083c840288d40a6983faca876d452f7c52a07de9268ad892e70a81e150d602a773c175ad03000000007ec3637d7e1103e2e7e0c61896cbbf8d7e205b2ecc93dd0d6d7527d39cdbf6d335789f660300000000ffffffff019e1f7b03000000000800ac0051acac0053539cb363", + "", + 1, + -183614058, + "a17b66d6bb427f42653d08207a22b02353dd19ccf2c7de6a9a3a2bdb7c49c9e7" + ], + [ + "30e0d4d20493d0cd0e640b757c9c47a823120e012b3b64c9c1890f9a087ae4f2001ca22a61010000000152f8f05468303b8fcfaad1fb60534a08fe90daa79bff51675472528ebe1438b6f60e7f60c10100000008526a6551ac510053ffffffffaaab73957ea2133e32329795221ed44548a0d3a54d1cf9c96827e7cffd1706df020000000800526a005265526affffffffd19a6fe54352015bf170119742821696f64083b5f14fb5c7d1b5a721a3d7786801000000045265ac53ffffffff020f39bd0300000000036aac52049f6c05000000000352516aba5b4c60", + "6a6365516a6a655253", + 0, + -624256405, + "ab09ea40daca420b5ab385d5f120b26a8d7aa2f36831bab61dfbb90b795a04c9" + ], + [ + "f9c69d940276ec00f65f9fe08120fc89385d7350388508fd80f4a6ba2b5d4597a9e21c884f0100000002636315473ae6d82c744c07fc876ecd53bd0f3018b2dbedad77d757d5bdf3811b23d294e8c0170000000000afababe00157ede2050000000006ac6a5263635300000000", + "53", + 1, + 606547088, + "714d8b14699835b26b2f94c58b6ea4c53da3f7adf0c62ea9966b1e1758272c47" + ], + [ + "5c0ac112032d6885b7a9071d3c5f493aa16c610a4a57228b2491258c38de8302014276e8be0300000002006a17468315215262ad5c7393bb5e0c5a6429fd1911f78f6f72dafbbbb78f3149a5073e24740300000003ac5100ffffffff33c7a14a062bdea1be3c9c8e973f54ade53fe4a69dcb5ab019df5f3345050be00100000007ac63655163526a428defc0033ec36203000000000765516365536a00ae55b200000000000153f4c0080400000000095265516a536563536a00000000", + "6a005151006a", + 2, + 272749594, + "14b6f7b0c8232bd9b8b6f9e108156a0bd112324d05f8ffc2ba4bf42258700727" + ], + [ + "e3683329026720010b08d4bec0faa244f159ae10aa582252dd0f3f80046a4e145207d54d31000000000852acac52656aacac3aaf2a5017438ad6adfa3f9d05f53ebed9ceb1b10d809d507bcf75e0604254a8259fc29c02000000055352655251f926e52c04b44918030000000000f7679c0100000000090000525152005365539e3f48050000000007516500635363008396c905000000000253650591024f", + "6a6365", + 0, + 908746924, + "4613548115961a791c9d846df2c034cbd8926dd54b110590c28ae97642272e5c" + ], + [ + "48c4afb204204209e1df6805f0697edaa42c0450bbbd767941fe125b9bc40614d63d757e2203000000066a5363005152dc8b6a605a6d1088e631af3c94b8164e36e61445e2c60130292d81dabd30d15f54b355a802000000036a6353ffffffff1d05dcec4f3dedcfd02c042ce5d230587ee92cb22b52b1e59863f3717df2362f0300000005536552ac52ffffffffd4d71c4f0a7d53ba47bb0289ca79b1e33d4c569c1e951dd611fc9c9c1ca8bc6c030000000665536a6551acffffffff042f9aa905000000000753655153656351ab93d8010000000002655337440e0300000000005d4c690000000000015278587acb", + "006565526a51", + 0, + 1502064227, + "bbed77ff0f808aa8abd946ba9e7ec1ddb003a969fa223dee0af779643cb841a9" + ], + [ + "00b20fd104dd59705b84d67441019fa26c4c3dec5fd3b50eca1aa549e750ef9ddb774dcabe000000000651ac656aac65ffffffff52d4246f2db568fc9eea143e4d260c698a319f0d0670f84c9c83341204fde48b0200000000ffffffffb8aeabb85d3bcbc67b132f1fd815b451ea12dcf7fc169c1bc2e2cf433eb6777a03000000076a51ac6a6563acd510d209f413da2cf036a31b0def1e4dcd8115abf2e511afbcccb5ddf41d9702f28c52900100000005ac526a0065ffffffff039c827600000000000753655200656a52401561010000000002ac0082b716010000000002510000000000", + "535265", + 1, + -947367579, + "6c7f57a4e9c03c95bf22236d912ac8cba66c6dbebdc61fea5a736fd8d3150b34" + ], + [ + "455131860220abbaa72015519090a666faf137a0febce7edd49da1eada41feab1505a0028b02000000026365453ead4225724eb69beb590f2ec56a7693a608871e0ab0c34f5e96157f90e0a96148f3c50200000007525151535163acffffffff022d1249040000000008ac00acac6565630088b310040000000000e3920e59", + "51526a52ac5152", + 0, + 294375737, + "8036634b1978c37820ea253bac2bde46c68fc6f699c7d796b2cc74ffc68f4048" + ], + [ + "624d28cb02c8747915e9af2b13c79b417eb34d2fa2a73547897770ace08c6dd9de528848d303000000035163533c69d3f9b75b6ef8ed2df50c2210fd0bf4e889c42477d58682f711cbaece1a626194bb85030000000665ac53ac5353ffffffff018cc280040000000007acac52636352ac6859409e", + "ac51ac", + 1, + 1005144875, + "4bf229cf09d584f00136a80f077426a5c9e02ab4c4e93c637a602e7c04e18422" + ], + [ + "8f28471d02f7d41b2e70e9b4c804f2d90d23fb24d53426fa746bcdcfffea864925bdeabe3e0200000001acffffffff76d1d35d04db0e64d65810c808fe40168f8d1f2143902a1cc551034fd193be0e0000000001acffffffff048a5565000000000005005151516afafb610400000000045263ac53648bb30500000000086363516a6a5165513245de01000000000000000000", + "6a0053510053", + 1, + -1525137460, + "305fc8ff5dc04ebd9b6448b03c9a3d945a11567206c8d5214666b30ec6d0d6cc" + ], + [ + "10ec50d7046b8b40e4222a3c6449490ebe41513aad2eca7848284a08f3069f3352c2a9954f0000000009526aac656352acac53ffffffff0d979f236155aa972472d43ee6f8ce22a2d052c740f10b59211454ff22cb7fd00200000005acacac6353ffffffffbbf97ebde8969b35725b2e240092a986a2cbfd58de48c4475fe077bdd493a20c0100000003635365ffffffff4600722d33b8dba300d3ad037bcfc6038b1db8abfe8008a15a1de2da2264007302000000035351ac6dbdafaf020d0ccf040000000004636a516ae06e5e0200000000016a00000000", + "", + 0, + -1658960232, + "31ac6b02930185c2b025b67605227023f52e7d196e000ce4c3d3f288eb058736" + ], + [ + "fef98b7101bf99277b08a6eff17d08f3fcb862e20e13138a77d66fba55d54f26304143e536010000000451536500ffffffff04265965030000000004655252ace2c775010000000001002b23b4040000000006516a515353ac456a7a00000000000653525251acacba521291", + "526aacac0053", + 0, + -1614097109, + "85a6b1c783a3abd470a79967d90684d50a88d87255203b4ecdb431cd63ebb59e" + ], + [ + "34a2b8830253661b373b519546552a2c3bff7414ea0060df183b1052683d78d8f54e842442000000000152ffffffffd961a8e34cf374151058dfcddc86509b33832bc57267c63489f69ff01199697c0300000001acba856cfb01b17c2f050000000007515365ac53000000000000", + "5263656a", + 1, + -2104480987, + "fcb05b2af9c06cfb378f999fd7a82f96fd352e9146b9b65ec513ef1ce658b5de" + ], + [ + "43559290038f32fda86580dd8a4bc4422db88dd22a626b8bd4f10f1c9dd325c8dc49bf479f01000000026351ffffffff401339530e1ed3ffe996578a17c3ec9d6fccb0723dd63e7b3f39e2c44b976b7b03000000056a65656a51ffffffff6fb9ba041c96b886482009f56c09c22e7b0d33091f2ac5418d05708951816ce7000000000551ac525100ffffffff020921e40500000000035365533986f40500000000016a00000000", + "52ac51", + 0, + 1769771809, + "02040283ef2291d8e1f79bb71bdabe7c1546c40d7ed615c375643000a8b9600d" + ], + [ + "6878a6bd02e7e1c8082d5e3ee1b746cfebfac9e8b97e61caa9e0759d8a8ecb3743e36a30de0100000001532a911b0f12b73e0071f5d50b6bdaf783f4b9a6ce90ec0cad9eecca27d5abae188241ddec0200000001651c7758d803f7457b0500000000036551515f4e90000000000001007022080200000000035365acc86b6946", + "6351", + 0, + -1929374995, + "f24be499c58295f3a07f5f1c6e5084496ae160450bd61fdb2934e615289448f1" + ], + [ + "35b6fc06047ebad04783a5167ab5fc9878a00c4eb5e7d70ef297c33d5abd5137a2dea9912402000000036aacacffffffff21dc291763419a584bdb3ed4f6f8c60b218aaa5b99784e4ba8acfec04993e50c03000000046a00ac6affffffff69e04d77e4b662a82db71a68dd72ef0af48ca5bebdcb40f5edf0caf591bb41020200000000b5db78a16d93f5f24d7d932f93a29bb4b784febd0cbb1943f90216dc80bba15a0567684b000000000653525100006a1be2208a02f6bdc1030000000001658550ea04000000000365636a00000000", + "", + 0, + -1114114836, + "b653c54899c1d7497161b934958797b83611a717c18e7076c1e0a486f99000a3" + ], + [ + "bebb90c302bf91fd4501d33555a5fc5f2e1be281d9b7743680979b65c3c919108cc2f51751010000000100ffffffff969c30053f1276550532d0aa33cfe80ca63758cd215b740448a9c08a84826f33030000000465655153ffffffff04bf6f2a040000000003655265903e760100000000026a6a7103fa020000000006526553525365b05b2c000000000005000000535300000000", + "5151005363635153", + 1, + 1081291172, + "1e3c0518129698cd9542227f0a141d180187a468da6d575584b9dfe34feb4a59" + ], + [ + "af48319f031b4eeb4319714a285f44244f283cbff30dcb9275b06f2348ccd0d7f015b54f8500000000066363ac65ac6affffffff2560a9817ebbc738ad01d0c9b9cf657b8f9179b1a7f073eb0b67517409d108180200000004ac636552ffffffff0bdd67cd4ecae96249a2e2a96db1490ee645f042fd9d5579de945e22b799f4d00300000006655251515300cf187c8202e51abf0300000000056552006a00adf37d000000000004ac6a535100000000", + "6365", + 1, + -1855554446, + "acc4b39abbf3bd4b380ecb631c19d96cc8f35909b1ea39940a489b3f7345dfa5" + ], + [ + "f35befbc03faf8c25cc4bc0b92f6239f477e663b44b83065c9cb7cf231243032cf367ce313000000000465526a517c4c334149a9c9edc39e29276a4b3ffbbab337de7908ea6f88af331228bd90086a6900ba020000000151279d19950d2fe81979b72ce3a33c6d82ebb92f9a2e164b6471ac857f3bbd3c0ea213b54201000000085351635363520065052657c20300a9ba04000000000452636a6a0516ea020000000006535253656365cfdd3f01000000000865ac516aac00530000000000", + "", + 2, + -99793521, + "836e9ba82c036ca806ca9834d4655b5251580678942db8e1cc6f55d783dd16bd" + ], + [ + "d3da18520216601acf885414538ce2fb4d910997eeb91582cac42eb6982c9381589587794f0300000000fffffffff1b1c9880356852e10cf41c02e928748dd8fae2e988be4e1c4cb32d0bfaea6f70000000002656affffffff02fb0d69050000000000eda8580500000000085163526565ac52522b913c95", + "ac", + 1, + -1247973017, + "90bb0762c3c7bf0f1b42531960d66aa6b25e0728201644208cd7ba0436e9cd21" + ], + [ + "8218eb740229c695c252e3630fc6257c42624f974bc856b7af8208df643a6c520ef681bfd00000000002510066f30f270a09b2b420e274c14d07430008e7886ec621ba45665057120afce58befca9601030000000352515384c380a9015d96100000000000076a5300acac526500000000", + "ac005263", + 0, + -1855679695, + "5071f8acf96aea41c7518bd1b5b6bbe16258b529df0c03f9e374b83c66b742c6" + ], + [ + "1123e7010240310013c74e5def60d8e14dd67aedff5a57d07a24abc84d933483431b8cf8ea0300000003530051fc6775ff1a23c627a2e605dd2560e84e27f4208300071e90f4589e762ad9c9fe8d0da95e020000000465655200ffffffff0425159803000000000265639d28d90400000000096563636aacac525153474df801000000000851525165ac51006a75e23b040000000000e5bd3a4a", + "6363636565", + 0, + -467124448, + "3c45916684a1c22866830167fc842ba0fbae2c392b5bae051bbb9701dcd6991a" + ], + [ + "fd92fe1003083c5179f97e77bf7d71975788138147adbdb283306802e261c0aee080fa22630200000000860c643ba9a1816b9badf36077b4554d11720e284e395a1121bc45279e148b2064c65e490200000005516a53636a2c713088d20f4bc4001264d972cce05b9fe004dc33376ad24d0d013e417b91a5f1b6734e000000000100ffffffff02e3064c0500000000066552006a5165b86e87050000000004656553522052eadb", + "0053525265", + 0, + 776203277, + "8dd13496ffc9c6e1e1c45e57fbfa8db9c2677c06bf2879596a9bcd59f3e689c4" + ], + [ + "d1b6a703038f14d41fcc5cc45455faa135a5322be4bf0f5cbcd526578fc270a236cacb853f0200000000ffffffff135aeff902fa38f202ccf5bd34437ff89c9dc57a028b62447a0a38579383e8ef0000000000ffffffffadf398d2c818d0b90bc474f540c3618a4a643482eeab73d36101987e2ec0335900000000004bd3323504e69fc10000000000055151535251790ada020000000003636a521337a704000000000863ac63acac52656a1e9862010000000006656500ac516a8f4ee672", + "5251656565ac63", + 2, + 82008394, + "e272326c1171e26a32634c315442f72b827fbf54e3f0d7ca538345fa56433001" + ], + [ + "81dadaa7011556683db3fe95262f4fdb20391b7e75b7ffcee51b176af64d83c06f85545d620200000003515152ffffffff044805ef0300000000065353516352639702c8020000000008005163515152525270db08040000000007ac516a526553ac4aabc905000000000863650052636a525100000000", + "65656a5152", + 0, + -2126294159, + "f95803b90493138c22df24e9ef1e25213a6670bdb10952a7e5c9400c63a6fd7f" + ], + [ + "3b937e05032b8895d2f4945cb7e3679be2fbd15311e2414f4184706dbfc0558cf7de7b4d000000000001638b91a12668a3c3ce349788c961c26aa893c862f1e630f18d80e7843686b6e1e6fc39631000000000075263535365ac51eeb09dd1c9605391258ee6f74b9ae17b5e8c2ef010dc721c5433dcdc6e93a1593e3b6d1700000000085365ac6553526351ffffffff0308b18e04000000000253acb6dd00040000000008536aac5153ac516ab0a88201000000000500ac006500804e3ff2", + "", + 0, + 416167343, + "595a3c02254564634e8085283ec4ea7c23808da97ce9c5da7aecd7b553e7fd7f" + ], + [ + "a48f27ca047997470da74c8ee086ddad82f36d9c22e790bd6f8603ee6e27ad4d3174ea875403000000075153ac636a6aacffffffffefc936294e468d2c9a99e09909ba599978a8c0891ad47dc00ba424761627cef202000000056a51630053ffffffff304cae7ed2d3dbb4f2fbd679da442aed06221ffda9aee460a28ceec5a9399f4e0200000000f5bddf82c9c25fc29c5729274c1ff0b43934303e5f595ce86316fc66ad263b96ca46ab8d0100000003536500d7cf226b0146b00c04000000000200ac5c2014ce", + "515100636563", + 0, + 1991799059, + "9c051a7092fe17fa62b1720bc2c4cb2ffc1527d9fb0b006d2e142bb8fe07bf3c" + ], + [ + "180cd53101c5074cf0b7f089d139e837fe49932791f73fa2342bd823c6df6a2f72fe6dba1303000000066a6a63ac53acffffffff03853bc1020000000007ac526a6a6a6a003c4a8903000000000453515163a0fbbd030000000004656a5253253d64cf", + "ac65", + 0, + -1548453970, + "1a7d5d2203376199149277a670ea07038886f5e93bc51971504299cf52f3beeb" + ], + [ + "c21ec8b60376c47e057f2c71caa90269888d0ffd5c46a471649144a920d0b409e56f190b700000000008acac6a526a536365ffffffff5d315d9da8bf643a9ba11299450b1f87272e6030fdb0c8adc04e6c1bfc87de9a0000000000ea43a9a142e5830c96b0ce827663af36b23b0277244658f8f606e95384574b91750b8e940000000007516a63ac0063acffffffff023c61be04000000000451655263313cc8020000000006006a53526551ed8c3d56", + "6a", + 1, + 1160627414, + "3c09d5f69578e89bf1a43f3cf1d6fae5546b7610f0e2b92a2ffd51c80c643651" + ], + [ + "128cd90f04b66a4cbc78bf48748f6eec0f08d5193ee8d0a6f2e8d3e5f138ed12c2c87d01a3010000000652006aac0000ffffffff09fc88bb1851e3dfb3d30179c38e15aeb1b39929c7c74f6acd071994ed4806490300000000e7fc5ea12ec56f56c0d758ecf4bb88aa95f3b08176b336db3b9bec2f6e27336dce28adbe030000000400530051fffffffffd6ff1adcf1fbe0d883451ee46904f1b7e8820243d395559b2d4ee8190a6e891000000000080fb1ae702f85b4000000000000252008d96510100000000046a52536a00000000", + "", + 1, + 1667598199, + "508b078076335e06265e101fb2ae0342b50d0a5e4ef83a36f2275e671f4b7861" + ], + [ + "da9695a403493d3511c10e1fe1286f954db0366b7667c91ef18ae4578056c1bf752114ac5901000000035351519788d91dd1f9c62dc005d80ea54eb13f7131ca5aace3d5d29f9b58ccc5fbc9a27e779950010000000453ac6a00ffffffffe2556ff29ebe83eb42a32c7a8d93bc598043578f491b5935805a33608538845a03000000015265d21b3b018f26c4030000000005ac51535352e1cbcb10", + "00656552", + 2, + -1550927794, + "9739b2f481f15e49de50adbcfea6eb0a375f0463b80243f701ad1ecbcb7097cc" + ], + [ + "b240517501334021240427adb0b413433641555424f6d24647211e3e6bfbb22a8045cbda2f000000000071bac8630112717802000000000000000000", + "6a5165ac52656551", + 0, + 1790414254, + "2c8be597620d95abd88f9c1cf4967c1ae3ca2309f3afec8928058c9598660e9e" + ], + [ + "96bac43903044a199b4b3efeeec5d196ee23fb05495541fa2cd6fb6405a9432d1723363660010000000151ffffffffe6ce2b66ce1488918a3e880bebb0e750123f007c7bcbac8fcd67ce75cb6fbae80300000000ffffffff9c0955aa07f506455834895c0c56be5a095398f47c62a3d431fe125b161d666a0200000004520000ac7ffdbc540216f2f004000000000165a26dce01000000000000000000", + "5151656a656a6a63", + 0, + -707123065, + "ae31ab7a63ae73db083c6bcfa34c9950dd808d3bc2d16520a4890bb912d68b20" + ], + [ + "b8fd394001ed255f49ad491fecc990b7f38688e9c837ccbc7714ddbbf5404f42524e68c18f00000000056353535363081e15ee02706f7d050000000008515200535351526364c7ec040000000005636a53acac9206cbe1", + "655352ac", + 0, + -1251578838, + "8e0697d8cd8a9ccea837fd798cc6c5ed29f6fbd1892ee9bcb6c944772778af19" + ], + [ + "e42a76740264677829e30ed610864160c7f97232c16528fe5610fc08814b21c34eefcea69d010000000653006a6a0052ffffffff647046cf44f217d040e6a8ff3f295312ab4dd5a0df231c66968ad1c6d8f4428000000000025352ffffffff0199a7f900000000000000000000", + "655263006a005163", + 1, + 1122505713, + "7cda43f1ff9191c646c56a4e29b1a8c6cb3f7b331da6883ef2f0480a515d0861" + ], + [ + "0f034f32027a8e094119443aa9cfe11737c6d7dda9a52b839bc073dcc0235b847b28e0fab60200000006ac53ac536a63eee63447dfdad80476994b68706e916df1bd9d7cb4f3a4f6b14369de84564bea2e8688bd030000000565636a65acf8434663020b35fe01000000000500655163acb3d6a103000000000253ac345eeda0", + "526a51ac6351", + 1, + 66020215, + "73d7cf5a5b0449e84d7e615f41e9b6bc6103501e4ddc5a30567210e5db7dcc2e" + ], + [ + "a2dfa4690214c1ab25331815a5128f143219de51a47abdc7ce2d367e683eeb93960a31af9f010000000363636affffffff8be0628abb1861b078fcc19c236bc4cc726fa49068b88ad170adb2a97862e7460200000004ac655363ffffffff0441f11103000000000153dbab0c00000000000753ac5365526a63abbb9505000000000352516a29a029040000000003ac526a00000000", + "6a52ac63", + 1, + -1302210567, + "7c8fb6b3d954f0c45ec0b69eb0d4ba4d99cf7ffcf5fceebbd8cde447277fc2d1" + ], + [ + "9dbc591f04521670af83fb3bb591c5d4da99206f5d38e020289f7db95414390dddbbeb56680100000004ac5100acffffffffb6a40b5e29d5e459f8e72d39f800089529f0889006cad3d734011991da8ef09d0100000008526a5100ac536a515fc427436df97cc51dc8497642ffc868857ee245314d28b356bd70adba671bd6071301fc0000000000ffffffff487efde2f620566a9b017b2e6e6d42525e4070f73a602f85c6dfd58304518db30000000005516353006a8d8090180244904a0200000000046a65656ab1e9c20300000000025163a06a5449", + "", + 0, + -1414953913, + "a9b488fdd04166a822a49cbaaacc9f77a1547d59b7be98b00c58869fa2a68950" + ], + [ + "1345fb2c04bb21a35ae33a3f9f295bece34650308a9d8984a989dfe4c977790b0c21ff9a7f0000000006ac52ac6a0053ffffffff7baee9e8717d81d375a43b691e91579be53875350dfe23ba0058ea950029fcb7020000000453536352ffffffff684b6b3828dfb4c8a92043b49b8cb15dd3a7c98b978da1d314dce5b9570dadd2020000000763536a5200ac63d1a8647bf667ceb2eae7ec75569ca249fbfd5d1b582acfbd7e1fcf5886121fca699c011d0100000003ac006affffffff049b1eb00300000000001e46dc01000000000700656a6a630065ca95b40300000000030051520c84990100000000056aac526a6500000000", + "53526aac636300", + 2, + 1809978100, + "e3f19b8a9d68424900b66628b381b44553bbc96d1ea2efe5df99f1b350585c14" + ], + [ + "7d75dc8f011e5f9f7313ba6aedef8dbe10d0a471aca88bbfc0c4a448ce424a2c5580cda15603000000025152ffffffff01997f8e0200000000096552ac6a65656563530d93bbcc", + "00656a6563", + 0, + 1414485913, + "ec91eda1149f75bffb97612569a78855498c5d5386d473752a2c81454f297fa7" + ], + [ + "1459179504b69f01c066e8ade5e124c748ae5652566b34ed673eea38568c483a5a4c4836ca0100000008ac5352006563656affffffff5d4e037880ab1975ce95ea378d2874dcd49d5e01e1cdbfae3343a01f383fa35800000000095251ac52ac6aac6500ffffffff7de3ae7d97373b7f2aeb4c55137b5e947b2d5fb325e892530cb589bc4f92abd503000000076563ac53520052ffffffffb4db36a32d6e543ef49f4bafde46053cb85b2a6c4f0e19fa0860d9083901a119030000000251531bbcfe5504a6dbda040000000007536a5365ac6500d660c803000000000765656a53536a6a54e84e010000000003acac52df2ccf0500000000025351220c857e", + "", + 2, + 1879181631, + "41542fb175286b4b6afeedfadc4769744936e8d1db843d89ffd3a34007ae0288" + ], + [ + "d98b777f04b1b3f4de16b07a05c31d79965579d0edda05600c118908d7cf642c9cd670093f020000000853005351ac655363a268caad6733b7d1718008997f249e1375eb3ab9fe68ab0fe170d8e745ea24f54ce67f9b00000000066500516a5151ffffffff7ef8040dfcc86a0651f5907e8bfd1017c940f51cf8d57e3d3fe78d57e40b1e610200000003535263ffffffff39846cfed4babc098ff465256ba3820c30d710581316afcb67cd31c623b703360300000001acffffffff03d405120100000000056300006a5201a73d050000000003636a6a294c8c000000000006ac65536553ac00000000", + "63525351ac", + 1, + 2018694761, + "852549bc8f55c4ece6a503d89a17298debf8cd783759e44a1be5a6171763e745" + ], + [ + "cabb1b06045a895e6dcfc0c1e971e94130c46feace286759f69a16d298c8b0f6fd0afef8f20300000004ac006352ffffffffa299f5edac903072bfb7d29b663c1dd1345c2a33546a508ba5cf17aab911234602000000056a65515365ffffffff89a20dc2ee0524b361231092a070ace03343b162e7162479c96b757739c8394a030000000092ec524daf73fabee63f95c1b79fa8b84e92d0e8bac57295e1d0adc55dc7af5534ebea410200000001534d70e79b04674f6f00000000000400ac53517d60cc020000000002526596c51d040000000004ac6300ac62a787050000000007006a51656363639e2e7ff7", + "6551ac6351ac", + 3, + 1942663262, + "4ddabfc3da0e90138887c2801ac7ec9cbdef5ed9816f29887523bf020d4bfbb7" + ], + [ + "8b96d7a30132f6005b5bd33ea82aa325e2bcb441f46f63b5fca159ac7094499f380f6b7e2e00000000066aacac6300acffffffff0158056700000000000465005100c319e6d0", + "52006a", + 0, + -1100733473, + "fb4bd26a91b5cf225dd3f170eb09bad0eac314bc1e74503cc2a3f376833f183e" + ], + [ + "112191b7013cfbe18a175eaf09af7a43cbac2c396f3695bbe050e1e5f4250603056d60910e02000000001c8a5bba03738a22010000000005525352656a77a149010000000002510003b52302000000000351ac52722be8e6", + "65ac6565", + 0, + -1847972737, + "8e795aeef18f510d117dfa2b9f4a2bd2e2847a343205276cedd2ba14548fd63f" + ], + [ + "ce6e1a9e04b4c746318424705ea69517e5e0343357d131ad55d071562d0b6ebfedafd6cb840100000003656553ffffffff67bd2fa78e2f52d9f8900c58b84c27ef9d7679f67a0a6f78645ce61b883fb8de000000000100d699a56b9861d99be2838e8504884af4d30b909b1911639dd0c5ad47c557a0773155d4d303000000036a5151ffffffff9fdb84b77c326921a8266854f7bbd5a71305b54385e747fe41af8a397e78b7fa010000000763acac6a5100ac0d2e9b9d049b8173010000000007ac53526a650063ba9b7e010000000008526a00525263acac0ab3fd030000000000ea8a0303000000000200aca61a97b9", + "", + 1, + -1276952681, + "b6ed4a3721be3c3c7305a5128c9d418efa58e419580cec0d83f133a93e3a22c5" + ], + [ + "a7721d94021652d90c79aaf5022d98219337d50f836382403ed313adb1116ba507ac28b0b0010000000451ac630089e6d64a7aa81fb9595368f04d1b36d7020e7adf5807535c80d015f994cce29554fe869b01000000055353636500ffffffff024944c90100000000046300635369df9f01000000000000000000", + "656a536551", + 0, + -1740151687, + "935892c6f02948f3b08bcd463b6acb769b02c1912be4450126768b055e8f183a" + ], + [ + "2f7353dd02e395b0a4d16da0f7472db618857cd3de5b9e2789232952a9b154d249102245fd030000000151617fd88f103280b85b0a198198e438e7cab1a4c92ba58409709997cc7a65a619eb9eec3c0200000002636affffffff0397481c0200000000045300636a0dc97803000000000009d389030000000003ac6a53134007bb", + "0000536552526a", + 0, + -1912746174, + "30c4cd4bd6b291f7e9489cc4b4440a083f93a7664ea1f93e77a9597dab8ded9c" + ], + [ + "7d95473604fd5267d0e1bb8c9b8be06d7e83ff18ad597e7a568a0aa033fa5b4e1e2b6f1007020000000465006a6affffffffaee008503bfc5708bd557c7e78d2eab4878216a9f19daa87555f175490c40aaf000000000163ffffffffabd74f0cff6e7ceb9acc2ee25e65af1abcebb50c08306e6c78fa8171c37613dd010000000352acacffffffff54a3069393f7930fa1b331cdff0cb945ec21c11d4605d8eedba1d3e094c6ae1f01000000026300ffffffff0182edeb0500000000085263535153530065a247e8cd", + "51516a00", + 2, + -426210430, + "2707ca714af09494bb4cf0794abe33c6cba5f29891d619e76070269d1fa8e690" + ], + [ + "221d4718023d9ca9fe1af178dbfce02b2b369bf823ea3f43f00891b7fef98e215c06b94fdd000000000851005153000051acffffffffb1c7ad1c64b7441bf5e70cd0f6eb4ec96821d67fc4997d9e6dfdceadecd36dde01000000070051536a635153ffffffff04e883cd000000000006515365530052bbb2f70400000000002f1b2e03000000000165259fcb00000000000010dbde99", + "", + 1, + 665721280, + "ab45dd7ce573d4a55464be1c324c5485ecf7c817480fb3ee3d976b19876a0f2f" + ], + [ + "6f66c0b3013e6ae6aabae9382a4326df31c981eac169b6bc4f746edaa7fc1f8c796ef4e3740000000004656aac6affffffff0191c8d6030000000002525300000000", + "6a5352516a635352", + 0, + -1299629906, + "48411efeb133c6b7fec4e7bdbe613f827093cb06ea0dbcc2ffcfde3a9ac4356c" + ], + [ + "89e7928c04363cb520eff4465251fd8e41550cbd0d2cdf18c456a0be3d634382abcfd4a2130200000006ac516a6a656355042a796061ed72db52ae47d1607b1ceef6ca6aea3b7eea48e7e02429f382b378c4e5190100000006535163525252ffffffff53631cbda79b40183000d6ede011c778f70147dc6fa1aed3395d4ce9f7a8e69701000000076a655352516a52ad0de418d80afe059aab5da73237e0beb60af4ac490c3394c12d66665d1bac13bdf29aa8000000000153f2b59ab6027a33eb040000000007005351ac5100ac88b9410300000000020052e1e8a143", + "63656a", + 0, + 1258533326, + "b89f9cf9604e0521dd869431abdf535560a869a08b826cedf1828253d47d42cb" + ], + [ + "ca356e2004bea08ec2dd2df203dc275765dc3f6073f55c46513a588a7abcc4cbde2ff011c7020000000553525100003aefec4860ef5d6c1c6be93e13bd2d2a40c6fb7361694136a7620b020ecbaca9413bcd2a030000000965ac00536352535100ace4289e00e97caaea741f2b89c1143060011a1f93090dc230bee3f05e34fbd8d8b6c399010000000365526affffffff48fc444238bda7a757cb6a98cb89fb44338829d3e24e46a60a36d4e24ba05d9002000000026a53ffffffff03d70b440200000000056a6a526aac853c97010000000002515335552202000000000351635300000000", + "0052", + 3, + -528192467, + "fc93cc056c70d5e033933d730965f36ad81ef64f1762e57f0bc5506c5b507e24" + ], + [ + "82d4fa65017958d53e562fac073df233ab154bd0cf6e5a18f57f4badea8200b217975e31030200000003636a51ac0891a204227cc9050000000006635200655365bfef8802000000000865650051635252acfc2d0905000000000565ac51516380195e030000000007ac52525352510063d50572", + "53", + 0, + -713567171, + "e0ee0c224184730c9cc8ee7ccb0399ff8809c28fd9d7050bccb1a32509a3f9e7" + ], + [ + "75f6949503e0e47dd70426ef32002d6cdb564a45abedc1575425a18a8828bf385fa8e808e600000000016a82f9fd14e9647d7a1b5284e6c55169c8bd228a7ea335987cef0195841e83da45ec28aa2e0300000002516350dc6fe239d150efdb1b51aa288fe85f9b9f741c72956c11d9dcd176889963d699abd63f0000000000429a63f502777d20010000000006ac52ac516a53d081d9020000000003acac630c3cc3a8", + "535152516551510000", + 1, + 973814968, + "c8e60a5984b57ddc3f0ed4d767e3e9d0e6c37143de7501da29ad8c3d4dee83d8" + ], + [ + "24f24cd90132b2162f938f1c22d3ca5e7daa83515883f31a61a5177aebf99d7db6bdfc398c010000000163ffffffff01d5562d0100000000016300000000", + "5265ac5165ac5252", + 0, + 1055129103, + "5eeb03e03806cd7bfd44bbba69c30f84c2c5120df9e68cd8facc605fcfbc9693" + ], + [ + "5ff2cac201423064a4d87a96b88f1669b33adddc6fa9acdc840c0d8a243671e0e6de49a5b00300000005ac6353655353b91db50180db5a03000000000663535151006a047a3aff", + "52515365005163", + 0, + -1336626596, + "b8db8d57fe40ab3a99cf2f8ed57da7a65050fcc1d34d4280e25faf10108d3110" + ], + [ + "10011f150220ad76a50ccc7bb1a015eda0ff987e64cd447f84b0afb8dc3060bdae5b36a6900200000000ffffffff1e92dd814dfafa830187bc8e5b9258de2445ec07b02c420ee5181d0b203bb334000000000465536a65ffffffff0124e6540100000000060063655353ac00000000", + "530051", + 0, + 440222748, + "4614dcf172fd2db5bc0d1d0fe4d87a15d9b2928cc43480e58c6b802cf7a07b35" + ], + [ + "8b95ec900456648d820a9b8df1d8f816db647df8a8dc9f6e7151ebf6079d90ee3f6861352a0200000007520000ac535151ffffffff039b10b845f961225ac0bcaac4f5fe1991029a051aa3d06a3811b5762977a67403000000025252ffffffff8559d65f40d5e261f45aec8aad3d2c56c6114b22b26f7ee54a06f0881be3a7f5010000000765635252536363ffffffff38f8b003b50f6412feb2322b06b270197f81ad69c36af02ca5008b94eee5f650020000000165ffffffff01ae2b00010000000001638eb153a2", + "00535300ac53", + 2, + 1266056769, + "205f3653f0142b35ce3ef39625442efebae98cde8cbf0516b97b51073bb0479f" + ], + [ + "babbb7ea01ab5d584727cb44393b17cf66521606dc81e25d85273be0d57bad43e8f6b6d43501000000036a656aba83a68803fb0f4a000000000004536353633fcfe4020000000008ac00ac6351006a65182a0c03000000000453ac5363bee74f44", + "536a6a6a6365ac51", + 0, + -799187625, + "a33a95d143903529e1cc345b93fe150e0f1f1d4f3fcc6ea7a0252ddbf145920b" + ], + [ + "e86a24bc03e4fae784cdf81b24d120348cb5e52d937cd9055402fdba7e43281e482e77a1c100000000046363006affffffffa5447e9bdcdab22bd20d88b19795d4c8fb263fbbf7ce8f4f9a85f865953a6325020000000663ac53535253ffffffff9f8b693bc84e0101fc73748e0513a8cecdc264270d8a4ee1a1b6717607ee1eaa00000000026a513417bf980158d82c020000000009005253005351acac5200000000", + "6353516365536a6a", + 2, + -563792735, + "508129278ef07b43112ac32faf00170ad38a500eed97615a860fd58baaad174b" + ], + [ + "53bd749603798ed78798ef0f1861b498fc61dcee2ee0f2b37cddb115b118e73bc6a5a47a0201000000086a63656a6a6a000007ff674a0d74f8b4be9d2e8e654840e99d533263adbdd0cf083fa1d5dd38e44d2d163d9001000000055251ac6a51c8b6b63f744a9b9273ccfdd47ceb05d3be6400c1ed0f7283d32b34a7f4f0889cccf06be30000000008516a52636551516a9ac1fe63030c677e05000000000027bc610000000000086565636a635100526e2dc60200000000015300000000", + "6552536a515351", + 1, + -1617066878, + "fe516df92299e995b8e6489be824c6839543071ec5e9286060b2600935bf1f20" + ], + [ + "691bf9fc028ca3099020b79184e70039cf53b3c7b3fe695d661fd62d7b433e65feda2150610000000002ac63ffffffff2c814c15b142bc944192bddccb90a392cd05b968b599c1d8cd99a55a28a243fd01000000075300526a5200ac98516a5803dfd3540500000000046552ac5228381201000000000300536a4409a903000000000665636a5300658759621b", + "65ac5165", + 0, + -359941441, + "2b6be40583884c063bfc2a1689ad08ee203b17d4bdeddbd730d456647e004b4b" + ], + [ + "536bc5e60232eb60954587667d6bcdd19a49048d67a027383cc0c2a29a48b960dc38c5a0370300000004ac636300ffffffff8f1cfc102f39b1c9348a2195d496e602c77d9f57e0769dabde7eaaedf9c69e250100000004ac6a6351ffffffff0432f56f0400000000046a5365517fd54b0400000000035265539484e4050000000003536a5376dc25020000000006ac536a6a536ab978e686", + "ac0051006a006a006a", + 0, + -273074082, + "3c6eaba993d523ffc97ed430692390b8c12ad830feed85073d67462b7e749e09" + ], + [ + "74606eba01c2f98b86c29ba5a32dc7a7807c2abe6ed8d89435b3da875d87c12ae05329e6070200000003510052ffffffff02a1e2c4020000000006516563526a63c68bae04000000000752636300006363fe19ae4f", + "63acac5365", + 0, + 112323400, + "dd5092e9ba0db3db222a3f7fed4c4bd13fe4d191f9c082d5807f2cdef0d2ba08" + ], + [ + "2ed805e20399e52b5bcc9dc075dad5cf19049ff5d7f3de1a77aee9288e59c5f4986751483f020000000165ffffffff967531a5726e7a653a9db75bd3d5208fa3e2c5e6cd5970c4d3aba84eb644c72c0300000000ffffffffd79030d20c65e5f8d3c55b5692e5bdaa2ae78cfa1935a0282efb97515feac43f030000000400006365261ab88c02bdf66a0000000000026351d6ad8b000000000004525152ac00000000", + "6300535265", + 0, + 2072814938, + "9e6495debcf5c6d165c2dc247c895528f87de18c82ff6c40c4d020605620b960" + ], + [ + "fab796ee03f737f07669160d1f1c8bf0800041157e3ac7961fea33a293f976d79ce49c02ab0200000003ac5252eb097ea1a6d1a7ae9dace338505ba559e579a1ee98a2e9ad96f30696d6337adcda5a85f403000000076500656a6a656396d5d41a9b11f571d91e4242ddc0cf2420eca796ad4882ef1251e84e42b930398ec69dd80100000005526551ac6a8e5d0de804f763bb0400000000015288271a010000000001acf2bf290500000000020051c9641500000000000952655363636365ac5100000000", + "00ac536552", + 0, + -1854521113, + "a7af3152547de519e3178358f7a083154b0df0ac59bc69a66df8b66cc929fc6c" + ], + [ + "f2b539a401e4e8402869d5e1502dbc3156dbce93583f516a4947b333260d5af1a34810c6a00200000003525363ffffffff01d305e2000000000004ac535200a265fe77", + "", + 0, + -1435650456, + "a8a1db0372c037eeb26dc03c9b297e7f03afabcfa12b0d5413db7a25226802df" + ], + [ + "9f10b1d8033aee81ac04d84ceee0c03416a784d1017a2af8f8a34d2f56b767aea28ff88c8f02000000025352ffffffff748cb29843bea8e9c44ed5ff258df1faf55fbb9146870b8d76454786c4549de100000000016a5ba089417305424d05112c0ca445bc7107339083e7da15e430050d578f034ec0c589223b0200000005ac53ac6565ffffffff025a4ecd0100000000046365636540d2700000000000056a6553526333fa296c", + "", + 0, + -395044364, + "c3de9cacd3644752ca5a3a51b0356904abebc17e3a3d86c25103c90aff6cb71f" + ], + [ + "ab81755f02b325cbd2377acd416374806aa51482f9cc5c3b72991e64f459a25d0ddb52e66703000000026a008727056d48c00cc6e6222be6608c721bc2b1e69d0ffbadd51d131f05ec54bcd83003aac5000000000003f2cdb60454630e020000000007526aac63000000e9e25c040000000003516a0088c97e0000000000076a535265655263771b580500000000075100ac6565515100000000", + "515100ac", + 0, + -230931127, + "a65047626d3beefdf68bc0cd7ddd42332ff27d87f772c24e2b4c823162420e4a" + ], + [ + "7a17e0ef0378dab4c601240639139335da3b7d684600fa682f59b7346ef39386fe9abd69350000000003ac5252807f26fb3249326813e18260a603b9ad66f41f05eaa8146f66bcca452162a502aac4aa8b02000000026a534ea460faa7e3d7854ec6c70d7e797025697b547ec500b2c09c873b4d5517767d3f3720660300000000ffffffff01b12e7a02000000000700006a65656a63991c03e2", + "6a6a", + 1, + -1577994103, + "cff17feca457abd837a3b4355071e627731eeee16216c76b03891d88c17ea9bc" + ], + [ + "ff2ecc09041b4cf5abb7b760e910b775268abee2792c7f21cc5301dd3fecc1b4233ee70a2c0200000009acac5300006a51526affffffffeb39c195a5426afff38379fc85369771e4933587218ef4968f3f05c51d6b7c92000000000165453a5f039b8dbef7c1ffdc70ac383b481f72f99f52b0b3a5903c825c45cfa5d2c0642cd50200000001654b5038e6c49daea8c0a9ac8611cfe904fc206dad03a41fb4e5b1d6d85b1ecad73ecd4c0102000000086a51000053656565bdb5548302cc719200000000000452655265214a36030000000002006a00000000", + "52516a006a63", + 1, + -2113289251, + "3cbb0d5a6fce6b49cd3e82d8235e521a2720becc8485f66554250cba6def30a1" + ], + [ + "70a8577804e553e462a859375957db68cfdf724d68caeacf08995e80d7fa93db7ebc04519d0200000003535253619f4f2a428109c5fcf9fee634a2ab92f4a09dc01a5015e8ecb3fc0d9279c4a77fb27e9000000000056a51006a6affffffff3ed1a0a0d03f25c5e8d279bb5d931b7eb7e99c8203306a6c310db113419a69ad010000000465516300ffffffff6bf668d4ff5005ef73a1b0c51f32e8235e67ab31fe019bf131e1382050b39a630000000004536a6563ffffffff02faf0bb00000000000163cf2b4b05000000000752ac635363acac15ab369f", + "ac", + 0, + -1175809030, + "1c9d6816c20865849078f9777544b5ddf37c8620fe7bd1618e4b72fb72dddca1" + ], + [ + "a3604e5304caa5a6ba3c257c20b45dcd468f2c732a8ca59016e77b6476ac741ce8b16ca8360200000004acac6553ffffffff695e7006495517e0b79bd4770f955040610e74d35f01e41c9932ab8ccfa3b55d0300000007ac5253515365acffffffff6153120efc5d73cd959d72566fc829a4eb00b3ef1a5bd3559677fb5aae116e3800000000020052c29e7abd06ff98372a3a06227386609adc7665a602e511cadcb06377cc6ac0b8f63d4fdb03000000045100acacffffffff042090730500000000075163ac5252536514462e05000000000752ac636300656a20672c0400000000025153b2769900000000000465656a5300000000", + "5351", + 0, + 1460890590, + "0c1d9e9befee2f0aec339d9ad4cc7b5fb51f814a890a2b4274ce07c38c9807de" + ], + [ + "c6a72ed403313b7d027f6864e705ec6b5fa52eb99169f8ea7cd884f5cdb830a150cebade870100000007ac635165656a51ffffffff398d5838735ff43c390ca418593dbe43f3445ba69394a6d665b5dc3b4769b5d700000000065265ac515365ffffffff7ee5616a1ee105fd18189806a477300e2a9cf836bf8035464e8192a0d785eea3030000000700ac6a51516a52ffffffff018075fd0000000000015100000000", + "005251acac5252", + 2, + -656067295, + "2cc1c7514fdc512fd45ca7ba4f7be8a9fe6d3318328bc1a61ae6e7675047e654" + ], + [ + "93c12cc30270fc4370c960665b8f774e07942a627c83e58e860e38bd6b0aa2cb7a2c1e060901000000026300ffffffff4d9b618035f9175f564837f733a2b108c0f462f28818093372eec070d9f0a5440300000001acffffffff039c2137020000000001525500990100000000045265636a07980e0300000000005ba0e9d1", + "656a5100", + 1, + 18954182, + "ee8ff2eabaf8cfd532d9b932109a00b91c60b1d484d23ec7426fb306bfdbe2ff" + ], + [ + "97bddc63015f1767619d56598ad0eb5c7e9f880b24a928fea1e040e95429c930c1dc653bdb0100000008ac53acac00005152aaa94eb90235ed10040000000000287bdd0400000000016a8077673a", + "acac6a536352655252", + 0, + -813649781, + "5990b139451847343c9bb89cdba0e6daee6850b60e5b7ea505b04efba15f5d92" + ], + [ + "cc3c9dd303637839fb727270261d8e9ddb8a21b7f6cbdcf07015ba1e5cf01dc3c3a327745d0300000000d2d7804fe20a9fca9659a0e49f258800304580499e8753046276062f69dbbde85d17cd2201000000086352536a520000acffffffffbc75dfa9b5f81f3552e4143e08f485dfb97ae6187330e6cd6752de6c21bdfd2103000000050053650063ffffffff0313d0140400000000096565515253526aacac167f0a040000000007ac00535263536a9a52f8030000000003515163f75b66f2", + "6a635353636a65ac65", + 1, + 377286607, + "b0dd80fc8d5300efbc5ccade2b87ad8c2f1830c2a9fcd0719122ba14595b736b" + ], + [ + "236f91b702b8ffea3b890700b6f91af713480769dda5a085ae219c8737ebae90ff25915a3203000000056300ac6300811a6a10230f12c9faa28dae5be2ebe93f37c06a79e76214feba49bb017fb25305ff84eb020000000100ffffffff041e351703000000000351ac004ff53e05000000000253636c1460010000000000cb55f7010000000005515200510000000000", + "acac636a6aac5300", + 0, + 406448919, + "daea1bf312f48d7d2e5bc35e13855a18dec32035a3f726ac7649579fc49a098e" + ], + [ + "22e10d2003ab4ea9849a2801921113583b7c35c3710ff49a6003489395789a7cfb1e6051900100000006526a65535151ffffffff82f21e249ec60db33831d33b9ead0d56f6496db64337dcb7f1c3327c47729c4a020000000153ffffffff138f098f0e6a4cf51dc3e7a3b749f487d1ebde71b73b731d1d02ad1180ac7b8c02000000036563acda215011027a9484020000000007635165530000ac4bf6cb0400000000036aac653ce3f32c", + "0052", + 2, + 1136359457, + "aaf5af7efa7b63d4f4e017ee99d5f389977b7a724c359e0038994344f86af090" + ], + [ + "c47d5ad60485cb2f7a825587b95ea665a593769191382852f3514a486d7a7a11d220b62c54000000000563655253ac8c3cf32b0285b040e50dcf6987ddf7c385b3665048ad2f9317b9e0c5ba0405d8fde4129b0000000008525100ac65635300ffffffff549fe963ee410d6435bb2ed3042a7c294d0c7382a83edefba8582a2064af3265000000000152fffffffff7737a85e0e94c2d19cd1cde47328ece04b3e33cd60f24a8a345da7f2a96a6d00000000006656a0051656a28ff30d5049613ea020000000005ac51000063f06df1050000000007ac63516aac5153afef5901000000000700656500655253688bc00000000000076a5352526a53521ff1d5ff", + "51ac52", + 2, + -1296011911, + "89dbcd8060fbab09d417d99ab857c86e5ec5958d08ef11895fdd62dee97dcafc" + ], + [ + "0b43f122032f182366541e7ee18562eb5f39bc7a8e5e0d3c398f7e306e551cdef773941918030000000763006351ac51acffffffffae586660c8ff43355b685dfa8676a370799865fbc4b641c5a962f0849a13d825010000000263acffffffff0b2b6b800d8e77807cf130de6286b237717957658443674df047a2ab18e4138601000000066aac65520063ffffffff04f1dbca03000000000700635253656a52a6eefd0300000000036365655d8ca90200000000005a0d530400000000015300000000", + "65ac65acac", + 0, + 351448685, + "f28f61918bfe72b90a70c7b4c9fcb68cef50c7d6acac097acdb0b3af673ebc29" + ], + [ + "4b0ecc0c03ba35700d2a30a71f28e432ff6ac7e357533b49f4e97cf28f1071119ad6b97f3e0300000007ac516363ac63acffffffffcd6a2019d99b5c2d639ddca0b1aa5ea7c1326a071255ea226960bd88f45ca57d00000000085253655363005353ffffffffba257635191c9f216de3277be548cb5a2313114cb1a4c563b03b4ef6c0f4f7040300000000da542edf0495cdc40100000000026353c049e903000000000652516a5365512b0f9304000000000863516aac65516552fa9ece050000000008ac6500005152530000000000", + "6551525352510052", + 1, + -1355414590, + "3cd85f84aae6d702436f3f9b8980adcc1f8f202e957759540a27da0a32fc6c87" + ], + [ + "adaac0a803f66811346271c733036d6e0d45e15a9b602092e2e04ad93564f196e7f020b088000000000600526a636a00700ec3f9db07a3a6ce910bf318c7ec87a876e1f2a3366cc69f20cde09203b99c1cb9d15800000000050000ac636a4d0de554ebe95c6cc14faf5ff6361d1deba9474b8b0fd3b93c011cd96aec783abb3f3683020000000465005251ffffffff0464eb100500000000055200006a651beaa80300000000005a2f31050000000005526a65ac52ba7db100000000000352516a0cfb46e7", + "0051ac52636a", + 1, + -184733716, + "0ca3afec5af28a1f1d15b7b89ec7706735689532178532d5750e302b99d5ce58" + ], + [ + "af1c4ab301ec462f76ee69ba419b1b2557b7ded639f3442a3522d4f9170b2d6859765c3df402000000016affffffff01a5ca6c00000000000652536a00005300000000", + "6a6351", + 0, + 110304602, + "25595a96fbe7eca4053970f9d5091926823017bfa3f6a9ccea5fada24e68be1e" + ], + [ + "0bfd34210451c92cdfa02125a62ba365448e11ff1db3fb8bc84f1c7e5615da40233a8cd368010000000252ac9a070cd88dec5cf9aed1eab10d19529720e12c52d3a21b92c6fdb589d056908e43ea910e0200000009ac516a52656a6a5165ffffffffc3edcca8d2f61f34a5296c405c5f6bc58276416c720c956ff277f1fb81541ddd00000000020063ffffffff811247905cdfc973d179c03014c01e37d44e78f087233444dfdce1d1389d97c3020000000551630000631724a26e02ca37c90200000000075153525352ac529012a90100000000085200525253535353fa32575b", + "5352ac6351", + 1, + -1087700448, + "969f104745a675fd6ca5fec692be9ec178eaefdd9a3354885bdc5e67a106b807" + ], + [ + "2c84c0640487a4a695751d3e4be48019dbaea85a6e854f796881697383ea455347d2b2769001000000055265526500ffffffff6aac176d8aa00778d496a7231eeb7d3334f20c512d3db1683276402100d98de5030000000700536a5263526ac1ee9ceb171c0c984ebaf12c234fd1487fbf3b3d73aa0756907f26837efba78d1bed332003000000004d9e8ec0bed837cb929bbed76ee848959cec59de44bd7667b7631a744f880d5c71a20cfd0100000006005363515300ffffffff023753fb0000000000036565532d38730500000000070051526a63ac5200000000", + "650053", + 0, + -877941183, + "3538e501b9b2b660b86aa1d1ff1b0c6c96d4f888f7bf4cda61ef8f6305f6606a" + ], + [ + "1f7e4b1b045d3efa6cd7a11d7873a8bab886c19bd11fcb6712f0948f2db3a7be76ff76c8f1000000000852656a0065ac5363ffffffffdaafcfa6029336c997680a541725190f09a6f6da21e54560eca4b5b8ae987da1000000000952ac52acac52515165ffffffff825a38d3b1e5bb4d10f33653ab3ab6882c7abdaec74460257d1528ce7be3f98e0100000007526a006a656a63c14adc8f04953a5d3d3f89237f38b857dd357713896d36215f7e8b77b11d98ea3cdc93df02000000015212484f6104bfafae0300000000025263a2b012000000000004656300516c4d2605000000000653ac6500655301cc93030000000001ac14643b1f", + "63acac53", + 0, + 333824258, + "18da6ceb011cd36f15ad7dd6c55ef07e6f6ed48881ce3bb31416d3c290d9a0e9" + ], + [ + "467a3e7602e6d1a7a531106791845ec3908a29b833598e41f610ef83d02a7da3a1900bf29600000000046a636353ffffffff031db6dac6f0bafafe723b9199420217ad2c94221b6880654f2b35114f44b1df01000000086552636a63ac6352ffffffff02b3b95c01000000000263007032160300000000003261c0aa", + "6a", + 0, + 2110869267, + "bec81678ef196011c242f24da12b52eaa5ef56c9cd72c73cb2123b0e65bf8c1b" + ], + [ + "8713bc4f01b411149d575ebae575f5dd7e456198d61d238695df459dd9b86c4e3b2734b62e0300000003ac6363ffffffff03b58049050000000002ac653c714c04000000000953656a005151526a527b5a9e03000000000652ac5100525300000000", + "52", + 0, + -647281251, + "0e0bed1bf2ff255aef6e5c587f879ae0be6222ab33bd75ee365ec6fbb8acbe38" + ], + [ + "f2ba8a8701b9c401efe3dd0695d655e20532b90ac0142768cee4a3bb0a89646758f544aa8102000000036a52527899f4e4040c6f0b03000000000663656553005152b60c000000000008515200630053ac53a49c5f0400000000055351630063fa27340300000000015100000000", + "ac635251", + 0, + -1328936437, + "87584c7ad7b43b0a85619d327f8663349cc261c65dcd51aad22319cb4e57b618" + ], + [ + "b5a7df6102107beded33ae7f1dec0531d4829dff7477260925aa2cba54119b7a07d92d5a1d02000000046a516a52803b625c334c1d2107a326538a3db92c6c6ae3f7c3516cd90a09b619ec6f58d10e77bd6703000000056563006a63ffffffff0117484b03000000000653ac52526a65c1b548a1", + "ac006a525100", + 0, + 2074359913, + "708fcbe48b6921601571c5624b905bde73f5c02bf363f2f99210db0ad522546b" + ], + [ + "278cb16204b9dadf400266106392c4aa9df01ba03af988c8139dae4c1818ac009f13fc5f1a00000000065200ac656a52ffffffffd006bbebd8cbd7bdead24cddc9badfcc6bc0c2e63c037e5c29aa858f5d0f3e7d01000000046a0051acffffffffbc62a5f57e58da0b67956003ae81ac97cb4cbd1d694c914fc41515c008c4d8fd020000000165e329c844bcc16164be64b64a81cbf4ffd41ed2934e0daa0040ccb8365bab0b2a9e401c18030000000152ffffffff02588460030000000000a25a12030000000005535100005300000000", + "65536a5300ac51", + 3, + 989407546, + "1c29f110576f4a3b257f67454d99dfc0dee62ef5517ca702848ce4bd2ea1a1d7" + ], + [ + "49eb2178020a04fca08612c34959fd41447319c190fb7ffed9f71c235aa77bec28703aa1820200000003ac6353abaff326071f07ec6b77fb651af06e8e8bd171068ec96b52ed584de1d71437fed186aecf0300000001acffffffff03da3dbe02000000000552ac63ac6a8f3b680400000000096a536a65636a53516a5175470100000000016500000000", + "6a536365", + 0, + 1283691249, + "340ae3ff3e8548ff8212003e194c8022bfb6cccc5561b1fdef7194f17063a41e" + ], + [ + "0f96cea9019b4b3233c0485d5b1bad770c246fe8d4a58fb24c3b7dfdb3b0fd90ea4e8e947f0300000006006a5163515303571e1e01906956030000000003635353adc0fbbe", + "acac", + 0, + -1491469027, + "b0a8e0803b176c7e4f53dcefe22250f473758f5b607d1bbcd9108d69ba178b18" + ], + [ + "9a7d858604577171f5fe3f3fd3e5e039c4b0a06717a5381e9977d80e9f53e025e0f16d2877020000000752636565536353ffffffff5862bd028e8276e63f044be1dddcbb8d0c3fa097678308abf2b0f45104a93dbd0100000001531200667ba8fdd3b28e98a35da73d3ddfe51e210303d8eb580f923de988ee632d77793892030000000752526363526563ffffffffe9744eb44db2658f120847c77f47786d268c302120d269e6004455aa3ea5f5e202000000076300636a656551ffffffff03c61a3c020000000005516a6a6a53737f1a05000000000553ac65536592a4a00400000000016367edf6c8", + "535352", + 3, + 659348595, + "44f468a39e90f950895d7b1798bf5d41133eb9f61ec67a7a17efc9fb733d3438" + ], + [ + "148e68480196eb52529af8e83e14127cbfdbd4a174e60a86ac2d86eac9665f46f4447cf7aa01000000045200ac538f8f871401cf240c030000000005525252656a5266cf61", + "", + 0, + -344314825, + "a43a505f93bedd8358f9ff2ac4afc9b5306e712535968ceba2e4200433f27fbc" + ], + [ + "e2bc29d4013660631ba14ecf75c60ec5e9bed7237524d8c10f66d0675daa66d1492cb834530200000004ac510065e42d0c9e04f2b26c01000000000751525152acac65a35b7504000000000853ac6aac0065005394688c0400000000056365526553a1bced0300000000016a00000000", + "650063655353", + 0, + -888431789, + "43ee16b706611adf52cdf5af06efd16ad75dc7e18ad067d9d6e62ead2985032e" + ], + [ + "0c8a70d70494dca6ab05b2bc941b5b431c43a292bd8f2f02eab5e240a408ca73a676044a4103000000046a51006affffffff84496004e54836c035821f14439149f22e1db834f315b24588ba2f031511926c0100000000ffffffffbbc5e70ed1c3060ba1bfe99c1656a3158a7307c3ce8eb362ec32c668596d2bd3000000000763656363535100b039344c6fc4f9bec24322e45407af271b2d3dfec5f259ee2fc7227bc5285e22b3be85b40100000007ac0053ac6a5352e5ddfcff02d50231020000000005006a51536ab086d9020000000004ac51ac6a00000000", + "636565acac6a", + 3, + 241546088, + "5c11451d99e0679995bafd552facad1ffb742158905843483370caac4c69360c" + ], + [ + "f98f79cf0274b745e1d6f36da7cbe205a79132a7ad462bdc434cfb1dcd62a6977c3d2a5dbc010000000553516a5365ffffffff4f89f485b53cdad7fb80cc1b7e314b9735b9383bc92c1248bb0e5c6173a55c0d010000000353655293f9b014045ad96d02000000000963ac526a53ac636365f4c27904000000000952536563635152526a2788f0030000000002516aff5add01000000000763530051655351d04716ba", + "6552536a53", + 1, + -2128899945, + "95c24251cff87b2e50211ee272d0527d13ecdeac9bf8c7345fb09fb728dec251" + ], + [ + "6c7913f902aa3f5f939dd1615114ce961beda7c1e0dd195be36a2f0d9d047c28ac62738c3a020000000353ac00ffffffff477bf2c5b5c6733881447ac1ecaff3a6f80d7016eee3513f382ad7f554015b9701000000056563ac5152ffffffff04e58fe104000000000600526a526553e59790010000000001525a834b03000000000035fdaf0200000000076551ac6551520000000000", + "63ac53", + 1, + 1285478169, + "669fa8a8f4b97cc8e1816ae6adb4413a175042809e30bcf1d0a0397b2c1f6583" + ], + [ + "4624aa9204584f06a8a325c84e3b108cafb97a387af62dc9eab9afd85ae5e2c71e593a3b690200000003636a005eb2b44eabbaeca6257c442fea00107c80e32e8715a1293cc164a42e62ce14fea146220c020000000090b9ee38106e3310037bfc519fd209bdbd21c588522a0e96df5fba4e979392bc993bfe9f01000000076363636a6353536f1907d218ef6f3c729d9200e23c1dbff2df58b8b1282c6717b26cf760ee4c880d23f4d100000000086a516a536a525163ffffffff01d6f162050000000000ebbab208", + "5253650053", + 1, + -1515409325, + "6cf9cd409b7185b1f118171f0a34217af5b612ea54195ea186505b667c19337f" + ], + [ + "16562fc503f1cf9113987040c408bfd4523f1512da699a2ca6ba122dc65677a4c9bf7763830000000003636552ffffffff1ec1fab5ff099d1c8e6b068156f4e39b5543286bab53c6d61e2582d1e07c96cf02000000045163656affffffffd0ef40003524d54c08cb4d13a5ee61c84fbb28cde9eca7a6d11ba3a9335d8c620100000007635153536a6300fbb84fc2012003a6010000000002636a00000000", + "63636a006a6a", + 0, + -1310262675, + "4c154533157b2f889b7751e83426ba1a4e124b9ea3088486866fb5433b82d1d6" + ], + [ + "531665d701f86bacbdb881c317ef60d9cd1baeffb2475e57d3b282cd9225e2a3bf9cbe0ded01000000076300ac515263acffffffff0453a8500100000000076353ac516a6565e5e9200500000000026a52a44caa00000000000453ac000065e41b0500000000076500ac0065526ab4476f4d", + "006563006a00636a", + 0, + 1770013777, + "df3ed95fa9c904a0ecc41206d313ac22157d20b93dc02cb18db6cbd430aad0af" + ], + [ + "0f1227a20140655a3da36e413b9b5d108a866f6f147eb4940f032f5a89854eae6d7c3a91600100000009525363515153515253e37a79480161ab6102000000000000000000", + "65005200", + 0, + -1996383599, + "fef5ce63369124769b79343e66e29c436973d7d3f8cd0e0ab3318dc070b6976b" + ], + [ + "063ff6eb01aff98d0d2a6db224475010edb634c2f3b46257084676adeb84165a4ff8558d7601000000066353006a5165deb3262c042d109c000000000006636352ac005200b9c4050000000007516300ac510063cfffc800000000000200639e815501000000000700526a52ac6365ac7b07b8", + "656552ac6500", + 0, + -1559847112, + "51448b68451d6012d768a9453ff753868bdc4d95a9f08d8bcdc3cbedcbc71ede" + ], + [ + "3320f6730132f830c4681d0cae542188e4177cad5d526fae84565c60ceb5c0118e844f90bd030000000163ffffffff0257ec5a040000000005525251ac6538344d000000000002515200000000", + "5352656a53ac516a65", + 0, + 788050308, + "3afacaca0ef6be9d39e71d7b1b118994f99e4ea5973c9107ca687d28d8eba485" + ], + [ + "c13aa4b702eedd7cde09d0416e649a890d40e675aa9b5b6d6912686e20e9b9e10dbd40abb10000000007636353515351ac11d24dc4cc22ded7cdbc13edd3f87bd4b226eda3e4408853a57bcd1becf2df2a1671fd1600000000045165516affffffff01baea300100000000056a5253005300000000", + "0065", + 0, + -1195908377, + "610729ed2a518717bf30c32a6ffaa7aba8963225dce2ec3897805e35821d9b8b" + ], + [ + "d9a6f20e019dd1b5fae897fb472843903f9c3c2293a0ffb59cff2b413bae6eceab574aaf9d030000000563006a515102f54939032df5100100000000046a5165530ec28f010000000004ac5100007e874905000000000651005265ac6a00000000", + "ac63acac", + 0, + 271463254, + "2df4cc2686d6e168552f5443c218a086e4c41e01ba91e2c5365a7f63436e7ff6" + ], + [ + "157c81bf0490432b3fcb3f9a5b79e5f91f67f05efb89fa1c8740a3fe7e9bdc18d7cb6acd2203000000026351ffffffff912e48e72bbcf8a540b693cf8b028e532a950e6e63a28801f6eaad1afcc52ad00000000000b1a4b170a2b9e60e0cad88a0085137309f6807d25d5afb5c1e1d32aa10ba1cdf7df596dd0000000007525165656a51653674fba32a76fe09b273618d5f14124465933f4190ba4e0fd09d838daafc6223b31642ac00000000086a53536551ac6565ffffffff01fe9fb603000000000751656a5165636a00000000", + "006a6551", + 3, + -64357617, + "aef164bce218201307d98efbed396dd7a18bf2df7f3e5e8c768ce732499c1c7e" + ], + [ + "a2692fff03b2387f5bacd5640c86ba7df574a0ee9ed7f66f22c73cccaef3907eae791cbd230200000003536363ffffffff4d9fe7e5b375de88ba48925d9b2005447a69ea2e00495a96eafb2f144ad475b40000000008000053000052636537259bee3cedd3dcc07c8f423739690c590dc195274a7d398fa196af37f3e9b4a1413f810000000005ac63acac52ffffffff04c65fe60200000000065151536365657236fc02000000000800526300656a6a5195b8b6030000000007ac5165636aac6a7d7b66010000000001ac00000000", + "51", + 2, + -826546582, + "6b3fcf04dd294362d2464c8516896043ba807fe901641472d23e89a50f6113a5" + ], + [ + "2c5b003201b88654ac2d02ff6762446cb5a4af77586f05e65ee5d54680cea13291efcf930d0100000004536a006a37423d2504100367000000000004536a515335149800000000000152166aeb03000000000452510063226c8e03000000000000000000", + "635251", + 0, + 1060344799, + "7e058ca5dd07640e4aae7dea731cfb7d7fef1bfd0d6d7b6ce109d041f4ca2a31" + ], + [ + "f981b9e104acb93b9a7e2375080f3ea0e7a94ce54cd8fb25c57992fa8042bdf4378572859f0100000002630008604febba7e4837da77084d5d1b81965e0ea0deb6d61278b6be8627b0d9a2ecd7aeb06a0300000005ac5353536a42af3ef15ce7a2cd60482fc0d191c4236e66b4b48c9018d7dbe4db820f5925aad0e8b52a03000000070063510052516301863715efc8608bf69c0343f18fb81a8b0c720898a3563eca8fe630736c0440a179129d03000000086aac6a52ac6a63ac44fec4c00408320a03000000000062c21c030000000007ac6a655263006553835f0100000000015303cd60000000000005535263536558b596e0", + "00", + 0, + -2140385880, + "49870a961263354c9baf108c6979b28261f99b374e97605baa532d9fa3848797" + ], + [ + "e7416df901269b7af14a13d9d0507709b3cd751f586ce9d5da8d16a121e1bd481f5a086e1103000000046a005200ffffffff01aa269c040000000006acac6a6a5263ee718de6", + "525363", + 0, + 1309186551, + "eea7d2212bda2d408fff146f9ae5e85e6b640a93b9362622bb9d5e6e36798389" + ], + [ + "402a815902193073625ab13d876190d1bbb72aecb0ea733c3330f2a4c2fe6146f322d8843a0300000007656a0000535363fffffffff9dccdec5d8509d9297d26dfcb1e789cf02236c77dc4b90ebccbf94d1b5821150300000001510bf1f96a03c5c145000000000002ac6ae11b1c0100000000055163516a5239c8a600000000000365636300000000", + "63536aac", + 0, + -1811424955, + "0090803a20102a778ab967a74532faee13e03b702083b090b1497bc2267ee2fe" + ], + [ + "c4b702e502f1a54f235224f0e6de961d2e53b506ab45b9a40805d1dacd35148f0acf24ca5e00000000075200ac65ac53acf34ba6099135658460de9d9b433b84a8562032723635baf21ca1db561dce1c13a06f4407000000000751ac006a63516affffffff02a853a603000000000163d17a6703000000000463006a5200000000", + "ac5363515153", + 1, + 480734903, + "6ac2296565d013ac4f9dc737821b534cfd42eea3dace80bee4c4669662bb6ffd" + ], + [ + "9b83f78704f492b9b353a3faad8d93f688e885030c274856e4037818848b99e490afef27770200000000ffffffff36b60675a5888c0ef4d9e11744ecd90d9fe9e6d8abb4cff5666c898fdce98d9e00000000046a656352596370fca7a7c139752971e169a1af3e67d7656fc4fc7fd3b98408e607c2f2c836c9f27c030000000553ac516300a0761de7e158947f401b3595b7dc0fe7b75fa9c833d13f1af57b9206e4012de0c41b8124030000000853656a5353510052242e5f5601bf83b301000000000465516a6300000000", + "63515200ac656365", + 3, + -150879312, + "9cf05990421ea853782e4a2c67118e03434629e7d52ab3f1d55c37cf7d72cdc4" + ], + [ + "f492a9da04f80b679708c01224f68203d5ea2668b1f442ebba16b1aa4301d2fe5b4e2568f301000000085300535152526365ffffffff93b34c3f37d4a66df255b514419105b56d7d60c24bf395415eda3d3d8aa5cd0101000000020065ffffffff9dba34dabdc4f1643b372b6b77fdf2b482b33ed425914bb4b1a61e4fad33cf39000000000152ffffffffbbf3dc82f397ef3ee902c5146c8a80d9a1344fa6e38b7abce0f157be7adaefae0000000009515351005365006a51ffffffff021359ba010000000000403fea0200000000085200ac6353ac635300000000", + "00ac51acacac", + 0, + -2115078404, + "bcf2cd25bbe87547620e87088e51f633500d68e9d4f31f07d7b659750461cee9" + ], + [ + "2f73e0b304f154d3a00fde2fdd40e791295e28d6cb76af9c0fd8547acf3771a02e3a92ba37030000000752ac63516565639aa95467b065cec61b6e7dc4d6192b5536a7c569315fb43f470078b31ed22a55dab8265f02000000070065636a6a6a53ffffffff9e3addbff52b2aaf9fe49c67017395198a9b71f0aa668c5cb354d06c295a691a0100000000ffffffff45c2b4019abaf05c5e484df982a4a07459204d1343a6ee5badade358141f8f990300000007ac516a6aacac6308655cd601f3bc2f0000000000015200000000", + "", + 0, + -2082053939, + "9a95e692e1f78efd3e46bb98f178a1e3a0ef60bd0301d9f064c0e5703dc879c2" + ], + [ + "5a60b9b503553f3c099f775db56af3456330f1e44e67355c4ab290d22764b9144a7b5f959003000000030052acbd63e0564decc8659aa53868be48c1bfcda0a8c9857b0db32a217bc8b46d9e7323fe9649020000000453ac6551d0ecf806211db989bead96c09c7f3ec5f73c1411d3329d47d12f9e46678f09bac0dc383e0200000000ffffffff01494bb202000000000500516551ac00000000", + "ac", + 0, + 1169947809, + "62a36c6e8da037202fa8aeae03e533665376d5a4e0a854fc4624a75ec52e4eb1" + ], + [ + "7e98d353045569c52347ca0ff2fdba608829e744f61eb779ffdb5830aae0e6d6857ab2690e03000000065365ac656352ffffffffa890dd37818776d12da8dca53d02d243ef23b4535c67016f4c58103eed85360f030000000093dbacdc25ca65d2951e047d6102c4a7da5e37f3d5e3c8b87c29b489360725dcd117ee2003000000056a6300ac53c7e99fa1dc2b8b51733034e6555f6d6de47dbbf1026effac7db80cb2080678687380dc1e02000000075352005263516affffffff04423272040000000006635365510051e0f53b0500000000086300516552635152f74a5f04000000000553ac0053520e8e5f00000000000751ac5363516a6a00000000", + "6a516352", + 3, + 890006103, + "697cd75ff0a4aef1886e8f93a8159c6a068d806d73cb534321ac2c3918826364" + ], + [ + "e3649aa40405e6ffe377dbb1bbbb672a40d8424c430fa6512c6165273a2b9b6afa9949ec430200000006630052655153a365f62f2792fa90c784efe3f0981134d72aac0b1e1578097132c7f0406671457c332b840200000002536ad780f40cf51be22bb4ff755434779c7f1def4999e4f289d2bd23d142f36b66fbe5cfbb4b01000000056a5252ac521430ffdc67127c9c0fc97dcd4b578dab64f4fb9550d2b59d599773962077a563e8b6732c02000000016affffffff04cb2687000000000001636e320904000000000252acf70e9401000000000100dc33930500000000050063536aacbc231765", + "65520053", + 3, + -2016196547, + "2b5c35cb390a72b8b02629d48c5a7487c2790a0952f8422a6f23ab9ecfb80e89" + ], + [ + "1d033569040700441686672832b531ab55db89b50dc1f9fc00fb72218b652da9dcfbc83be901000000066551ac526a632b390f9ad068e5fdee6563e88e2a8e4e09763c861072713dc069893dc6bbc9db3f00e26502000000096a5363526565525252ffffffff8a36bdd0aaf38f6707592d203e14476ca9f259021e487135c7e8324244057ed90300000000ed3fb2a3dfd4d46b5f3603fe0148653911988457bd0ed7f742b07c452f5476c228ff9f600200000007526aac00525152ffffffff04b88e48030000000000c753d602000000000853510000006553518fda2603000000000853ac52acac5263534839f1030000000006ac006aacac5300000000", + "5165536353000052", + 1, + 2075958316, + "c2cefaec2293134acbcf6d2a8bf2b3eb42e4ec04ee8f8bf30ff23e65680677c1" + ], + [ + "4c4be7540344050e3044f0f1d628039a334a7c1f7b4573469cfea46101d6888bb6161fe9710200000000ffffffffac85a4fdad641d8e28523f78cf5b0f4dc74e6c5d903c10b358dd13a5a1fd8a06000000000163e0ae75d05616b72467b691dc207fe2e65ea35e2eadb7e06ea442b2adb9715f212c0924f10200000000ffffffff0194ddfe02000000000265ac00000000", + "00006500", + 1, + -479922562, + "d66924d49f03a6960d3ca479f3415d638c45889ce9ab05e25b65ac260b51d634" + ], + [ + "202c18eb012bc0a987e69e205aea63f0f0c089f96dd8f0e9fcde199f2f37892b1d4e6da90302000000055352ac6565ffffffff0257e5450100000000025300ad257203000000000000000000", + "520052ac6a005265", + 0, + 168054797, + "502967a6f999f7ee25610a443caf8653dda288e6d644a77537bcc115a8a29894" + ], + [ + "32fa0b0804e6ea101e137665a041cc2350b794e59bf42d9b09088b01cde806ec1bbea077df0200000008515153650000006506a11c55904258fa418e57b88b12724b81153260d3f4c9f080439789a391ab147aabb0fa0000000006000052ac51510986f2a15c0d5e05d20dc876dd2dafa435276d53da7b47c393f20900e55f163b97ce0b800000000007526a520065636a8087df7d4d9c985fb42308fb09dce704650719140aa6050e8955fa5d2ea46b464a333f870000000009636300636a6565006affffffff01994a0d040000000002536500000000", + "516563530065", + 2, + -163068286, + "f58637277d2bc42e18358dc55f7e87e7043f5e33f4ce1fc974e715ef0d3d1c2a" + ], + [ + "ae23424d040cd884ebfb9a815d8f17176980ab8015285e03fdde899449f4ae71e04275e9a80100000006006553530053ffffffff018e06db6af519dadc5280c07791c0fd33251500955e43fe4ac747a4df5c54df020000000251ac330e977c0fec6149a1768e0d312fdb53ed9953a3737d7b5d06aad4d86e9970346a4feeb503000000075151ac6563526a67cabc431ee3d8111224d5ecdbb7d717aa8fe82ce4a63842c9bd1aa848f111910e5ae1eb0100000004ac515300bfb7e0d7048acddc030000000009636a5253636a655363a3428e040000000001525b99c6050000000003655265717e6e020000000000d99011eb", + "ac6a6a516565", + 1, + -716251549, + "b098eb9aff1bbd375c70a0cbb9497882ab51f3abfebbf4e1f8d74c0739dc7717" + ], + [ + "030f44fc01b4a9267335a95677bd190c1c12655e64df74addc53b753641259af1a54146baa020000000152e004b56c04ba11780300000000026a53f125f001000000000251acd2cc7c03000000000763536563655363c9b9e50500000000015200000000", + "ac", + 0, + -1351818298, + "19dd32190ed2a37be22f0224a9b55b91e37290577c6c346d36d32774db0219a3" + ], + [ + "c05f448f02817740b30652c5681a3b128322f9dc97d166bd4402d39c37c0b14506d8adb5890300000003536353ffffffffa188b430357055ba291c648f951cd2f9b28a2e76353bef391b71a889ba68d5fc02000000056565526a6affffffff02745f730100000000003ec34c0400000000036aac5200000000", + "516551510053", + 0, + -267877178, + "bd1beb1f9867c22627f7c4db637ba3c73231bfc931760c2af322da90b1cf0300" + ], + [ + "163ba45703dd8c2c5a1c1f8b806afdc710a2a8fc40c0138e2d83e329e0e02a9b6c837ff6b80000000006006551516a522b48b8f134eb1a7e6f5a6fa319ce9d11b36327ba427b7d65ead3b4a6a69f85cda8bbcd22030000000563656552acffffffffdbcf4955232bd11eef0cc6954f3f6279675b2956b9bcc24f08c360894027a60201000000056500006500ffffffff04d0ce9d0200000000008380650000000000015233f360040000000002006aedcf0801000000000000000000", + "000065006500ac", + 0, + 216965323, + "853251875a6a8b3d9cdb5bcbe19256e5eefa1a56883af28e0b6e8804455ecc27" + ], + [ + "07f7f5530453a12ad0c7eb8fbc3f140c7ab6818144d67d2d8752600ca5d9a9358e2dff87d4000000000563526a526a9e599c379d455e2da36d0cde88d931a863a3e97e01e93b9edb65856f3d958dc08b92b720000000000165bbc8d66dae3b1b170a6e2457f5b161465cb8706e0e6ffc6af55deb918365f14c5f40d4890100000000a7bd77c069ee4b48638e2363fcf2a86b02bea022047bd9fcb16d2b94ad068308d19b31cb00000000046a5300529672aa8f01dbd8a205000000000663536353006a02e99901", + "ac006351006a6363", + 1, + 119789359, + "6629a1e75c6ae8f4f9d5f734246b6a71682a5ea57246040ef0584f6b97916175" + ], + [ + "fe647f950311bf8f3a4d90afd7517df306e04a344d2b2a2fea368935faf11fa6882505890d00000000045100516affffffff43c140947d9778718919c49c0535667fc6cc727f5876851cb8f7b6460710c7f60100000000ffffffffce4aa5d90d7ab93cbec2e9626a435afcf2a68dd693c15b0e1ece81a9fcbe025e0300000000ffffffff02f34806020000000002515262e54403000000000965635151ac655363636de5ce24", + "6a005100ac516351", + 2, + 989643518, + "818a7ceaf963f52b5c48a7f01681ac6653c26b63a9f491856f090d9d60f2ffe3" + ], + [ + "a1050f8604d0f9d2feefcdb5051ae0052f38e21bf39daf583fd0c3900faa3eab5d431c0bbe030000000653536a005151683d27e5c6e0da8f22125823f32d5d98477d8098ef36263b9694d61d4d85d3f2ac02b7570200000006000052005165ffffffff0cad981542bcb54a87d9400aa63e514c7c6fab7158c2b1fb37821ea755eb162a0200000000b94feb5100e5ef3bf8ed8d43356c8a8d5ac6c7e80d7ff6040f4f0aa19abbe783f4f461240200000007636500000052655686fd70042be3ad02000000000365636a15680b000000000004acac53511277c705000000000452635252d27a0102000000000000000000", + "6a6aac65655251", + 1, + -982144648, + "e5fa92d182456e8dca1545376e62e54254106cb39566ca17932652385b57177c" + ], + [ + "cef7316804c3e77fe67fc6207a1ea6ae6eb06b3bf1b3a4010a45ae5c7ad677bb8a4ebd16d90200000009ac536a5152ac5263005301ab8a0da2b3e0654d31a30264f9356ba1851c820a403be2948d35cafc7f9fe67a06960300000006526a63636a53ffffffffbada0d85465199fa4232c6e4222df790470c5b7afd54704595a48eedd7a4916b03000000076563ac006a006ab28dba4ad55e58b5375053f78b8cdf4879f723ea4068aed3dd4138766cb4d80aab0aff3d0300000003ac6a00ffffffff010f5dd6010000000003006a5100000000", + "", + 1, + 889284257, + "31729d511760f21d9f00c70018ef391ac6e9d18d9d7660e1e1e7a8c1a1d97263" + ], + [ + "7b3ff28004ba3c7590ed6e36f45453ebb3f16636fe716acb2418bb2963df596a50ed954d2e03000000055251515265ffffffff706ee16e32e22179400c9841013971645dabf63a3a6d2d5feb42f83aa468983e030000000653ac51ac5152ffffffffa03a16e5e5de65dfa848b9a64ee8bf8656cc1f96b06a15d35bd5f3d32629876e020000000043c1a3965448b3b46f0f0689f1368f3b2981208a368ec5c30defb35595ef9cf95ffd10e902000000036aac65253a5bbe042e907204000000000800006565656352634203b4020000000002656336b3b70100000000007a063f0100000000026500a233cb76", + "006551636a53ac5251", + 1, + -1144216171, + "b4fb45bad7afba3e35275336fd09b787897e280f66b630c5c5fbd4a552e75b09" + ], + [ + "d5c1b16f0248c60a3ddccf7ebd1b3f260360bbdf2230577d1c236891a1993725e262e1b6cb000000000363636affffffff0a32362cfe68d25b243a015fc9aa172ea9c6b087c9e231474bb01824fd6bd8bc030000000352516affffffff0420d9a70200000000045152656a45765d0000000000055252536a5277bad1000000000001523f3f3803000000000463acac5200000000", + "52636a5265", + 1, + 1305123906, + "978dc178ecd03d403b048213d904653979d11c51730381c96c4208e3ea24243a" + ], + [ + "1be8ee5604a9937ebecffc832155d9ba7860d0ca451eaced58ca3688945a31d93420c27c460100000005ac5300535288b65458af2f17cbbf7c5fbcdcfb334ffd84c1510d5500dc7d25a43c36679b702e850f7c0200000003005300ffffffff7c237281cb859653eb5bb0a66dbb7aeb2ac11d99ba9ed0f12c766a8ae2a2157203000000066aac526365acfffffffff09d3d6639849f442a6a52ad10a5d0e4cb1f4a6b22a98a8f442f60280c9e5be8020000000400656552ffffffff0398fe83030000000003526aacbdd6ec0100000000045352526a82c1e6040000000001652b71c40c", + "6563526353656351", + 2, + -853634888, + "d7462d6fa13f37e2720ca416f770a1574fcf53d7d048283c0cdc0671ade9811e" + ], + [ + "9e0f99c504fbca858c209c6d9371ddd78985be1ab52845db0720af9ae5e2664d352f5037d4010000000552ac53636affffffff0e0ce866bc3f5b0a49748f597c18fa47a2483b8a94cef1d7295d9a5d36d31ae7030000000663515263ac635bb5d1698325164cdd3f7f3f7831635a3588f26d47cc30bf0fefd56cd87dc4e84f162ab702000000036a6365ffffffff85c2b1a61de4bcbd1d5332d5f59f338dd5e8accbc466fd860f96eef1f54c28ec030000000165ffffffff04f5cabd010000000007000052ac526563c18f1502000000000465510051dc9157050000000008655363ac525253ac506bb600000000000765656a5363006a00000000", + "006a6a0052", + 0, + 1186324483, + "2f9b7348600336512686e7271c53015d1cb096ab1a5e0bce49acd35bceb42bc8" + ], + [ + "11ce51f90164b4b54b9278f0337d95c50d16f6828fcb641df9c7a041a2b274aa70b1250f2b00000000076a6a65006551524c9fe7f604af44be050000000005525365006521f79a0300000000015306bb4e04000000000265ac99611a05000000000665ac656500006dc866d0", + "", + 0, + -1710478768, + "e0b6dfd00056068d3cb13fab177002404b6286b12f247eec0a3ee6dcf7723a39" + ], + [ + "86bc233e02ba3c647e356558e7252481a7769491fb46e883dd547a4ce9898fc9a1ca1b77790000000003535151f0c1d09c37696d5c7c257788f5dff5583f4700687bcb7d4acfb48521dc953659e325fa390300000003acac5280f29523027225af03000000000663ac006565ac7e59d90400000000016549dac846", + "53006aac52acac", + 0, + 711159875, + "178241b9244fb157a6e49870b83dd8d2c80db05b742cc2769b2ef101cbae1094" + ], + [ + "beac155d03a853bf18cd5c490bb2a245b3b2a501a3ce5967945b0bf388fec2ba9f04c03d68030000000012fe96283aec4d3aafed8f888b0f1534bd903f9cd1af86a7e64006a2fa0d2d30711af770010000000163ffffffffd963a19d19a292104b9021c535d3e302925543fb3b5ed39fb2124ee23a9db00302000000056500ac63acffffffff01ad67f503000000000300ac5189f78db2", + "53536a636500", + 2, + 748992863, + "bde3dd0575164d7ece3b5783ce0783ffddb7df98f178fe6468683230314f285a" + ], + [ + "81dab34a039c9e225ba8ef421ec8e0e9d46b5172e892058a9ade579fe0eb239f7d9c97d45b0300000008ac65655351526363ffffffff10c0faaf7f597fc8b00bbc67c3fd4c6b70ca6b22718d15946bf6b032e62dae570000000004536a006a02cddec3acf985bbe62c96fccf17012a87026ed63fc6756fa39e286eb4c2dd79b59d37400300000002516affffffff04f18b8d030000000005535152636564411c020000000003006300e965750300000000001bd2cf02000000000365526a00000000", + "006551", + 0, + -1488174485, + "837f15a277e25f47bc2bdd17e99eed396ac3b88c740459a43a404040c237a127" + ], + [ + "489ebbf10478e260ba88c0168bd7509a651b36aaee983e400c7063da39c93bf28100011f280100000001632fc856f05f59b257a4445253e0d91b6dffe32302d520ac8e7f6f2467f7f6b4b65f2f59e903000000076353ac6351656affffffff0122d9480db6c45a2c6fd68b7bc57246edffbf6330c39ccd36aa3aa45ec108fc0300000001659a7e78a69aadd6b030b12602dff0739bbc346b466c7c0129b34f50ae1f61e634e11e9f3d0000000006516a53525100ffffffff0112710700000000000765636353536352c4dd0e2c", + "", + 0, + -293358504, + "3bb56b25b9647f37f88d5617bc931f822433ae66ce0ea000e50c26fcfa813f8a" + ], + [ + "6911195d04f449e8eade3bc49fd09b6fb4b7b7ec86529918b8593a9f6c34c2f2d301ec378b00000000016349162266af054643505b572c24ff6f8e4c920e601b23b3c42095881857d00caf56b28acd030000000565525200ac3ac4d24cb59ee8cfec0950312dcdcc14d1b360ab343e834004a5628d629642422f3c5acc02000000035100accf99b663e3c74787aba1272129a34130668a877cc6516bfb7574af9fa6d07f9b41973034000000000753515152635252ffffffff042b3c95000000000000ff92330200000000036a5252884a2402000000000853530065520063000d78be030000000005535253ac65a72cb34b", + "6a", + 2, + -637739405, + "2015e8cca32dd47a352e02604b08eb564039d9b51d85d0ec225a0bc53e47a2c6" + ], + [ + "746347cf03faa548f4c0b9d2bd96504d2e780292730f690bf0475b188493fb67ca58dcca4f0000000002005336e3521bfb94c254058e852a32fc4cf50d99f9cc7215f7c632b251922104f638aa0b9d080100000008656aac5351635251ffffffff4da22a678bb5bb3ad1a29f97f6f7e5b5de11bb80bcf2f7bb96b67b9f1ac44d09030000000165ffffffff036f02b300000000000663536aac63ac50b72a050000000001aca8abf804000000000663006a6a6353797eb999", + "acac5100", + 1, + -1484493812, + "64e32985faa16652e0aac3d180dbd96ce9e79c97c279c1398e8e2abb8eacff8a" + ], + [ + "e17149010239dd33f847bf1f57896db60e955117d8cf013e7553fae6baa9acd3d0f1412ad90200000006516500516500cb7b32a8a67d58dddfb6ceb5897e75ef1c1ff812d8cd73875856487826dec4a4e2d2422a0100000004ac525365196dbb69039229270400000000070000535351636a8b759602000000000551ac52655131e99d040000000003516551ee437f5c", + "ac656a53", + 1, + 1102662601, + "a70d0643c50979ab645417feb0e44aa258e30ac02074d62dcc07fafb38e674aa" + ], + [ + "144971940223597a2d1dec49c7d4ec557e4f4bd207428618bafa3c96c411752d494249e1fb0100000004526a5151ffffffff340a545b1080d4f7e2225ff1c9831f283a7d4ca4d3d0a29d12e07d86d6826f7f0200000003006553ffffffff03c36965000000000000dfa9af00000000000451636aac7f7d140300000000016300000000", + "", + 1, + -108117779, + "c84fcaf9d779df736a26cc3cabd04d0e61150d4d5472dd5358d6626e610be57f" + ], + [ + "b11b6752044e650b9c4744fb9c930819227d2ac4040d8c91a133080e090b042a142e93906e0000000003650053ffffffff6b9ce7e29550d3c1676b702e5e1537567354b002c8b7bb3d3535e63ad03b50ea01000000055100516300fffffffffcf7b252fea3ad5a108af3640a9bc2cd724a7a3ce22a760fba95496e88e2f2e801000000036a00ac7c58df5efba193d33d9549547f6ca839f93e14fa0e111f780c28c60cc938f785b363941b00000000076351516552ac5265e51fcd0308e9830400000000026a00ab72190300000000016a63d07100000000000400516a6300000000", + "53005165ac5165", + 0, + 229563932, + "697786b0b18e62ce643820fe413370ccf6e65c05da239adeb7ca293f2dc2d72d" + ], + [ + "2aee6b9a02172a8288e02fac654520c9dd9ab93cf514d73163701f4788b4caeeb9297d2e2503000000036363008fb36695528d7482710ea2926412f877a3b20acae31e9d3091406bfa6b62ebf9d9d2a6470100000009535165536a63520065ffffffff03f7b560050000000002ac6a9a8338050000000000206ce90000000000056552516a5100000000", + "5252", + 1, + -1102319963, + "77acca2c85f6661df917569fcf9bcf47f7182e06fd3b293ab65a9016fa6d4d50" + ], + [ + "9554595203ad5d687f34474685425c1919e3d2cd05cf2dac89d5f33cd3963e5bb43f8706480100000000ffffffff9de2539c2fe3000d59afbd376cb46cefa8bd01dbc43938ff6089b63d68acdc2b02000000096553655251536a6500fffffffff9695e4016cd4dfeb5f7dadf00968e6a409ef048f81922cec231efed4ac78f5d0100000005636a5365006caaf0070162cc6402000000000351635100000000", + "", + 0, + -1105256289, + "8d86b7118d8e329f7d51394b29481071c3a51c7679f8034c7311916fa388646a" + ], + [ + "04f51f2a0484cba53d63de1cb0efdcb222999cdf2dd9d19b3542a896ca96e23a643dfc45f00200000007acac53510063002b091fd0bfc0cfb386edf7b9e694f1927d7a3cf4e1d2ce937c1e01610313729ef6419ae7030000000165a3372a913c59b8b3da458335dc1714805c0db98992fd0d93f16a7f28c55dc747fe66a5b5030000000753516552536351ffffffff5650b318b3e236802a4e41ed9bc0a19c32b7aa3f9b2cda1178f84499963a0cde000000000165ffffffff0383954f04000000000553ac536363a8fc90030000000000a2e315000000000003ac005100000000", + "0053", + 2, + -1424653648, + "b09c5550ca8eab1d602f1bf74e30a5ff23cb391928ca665b4edc55650f6d2786" + ], + [ + "5e4fab42024a27f0544fe11abc781f46596f75086730be9d16ce948b04cc36f86db7ad50fd01000000026a00613330f4916285b5305cc2d3de6f0293946aa6362fc087727e5203e558c676b314ef8dd401000000001af590d202ba496f040000000001009e3c9604000000000351ac51943d64d3", + "51ac510052", + 1, + -129301207, + "556c3f90aa81f9b4df5b92a23399fe6432cf8fecf7bba66fd8fdb0246440036c" + ], + [ + "a115284704b88b45a5f060af429a3a8eab10b26b7c15ed421258f5320fa22f4882817d6c2b0300000003005300ffffffff4162f4d738e973e5d26991452769b2e1be4b2b5b7e8cbeab79b9cf9df2882c040000000006636aac63ac5194abc8aa22f8ddc8a7ab102a58e39671683d1891799d19bd1308d24ea6d365e571172f1e030000000700515352515153ffffffff4da7ad75ce6d8541acbb0226e9818a1784e9c97c54b7d1ff82f791df1c6578f60000000000ffffffff01b1f2650400000000080051ac656a516a5300000000", + "516352535265", + 0, + -1269106800, + "063a8b77e343f75d9b2613f7cc76fc125bba68647417cf403b15eec0e80cabac" + ], + [ + "f3f771ae02939752bfe309d6c652c0d271b7cab14107e98032f269d92b2a8c8853ab057da80100000004636a6365670c305c38f458e30a7c0ab45ee9abd9a8dc03bae1860f965ffced879cb2e5d0bb156821020000000153ffffffff025dc619050000000002ac51ec0d250100000000076a5200636a6363333aecd8", + "650053ac515100", + 1, + 1812404608, + "a7aa34bf8a5644f03c6dd8801f9b15ba2e07e07256dbf1e02dad59f0d3e17ea9" + ], + [ + "fd3e267203ae7d6d3975e738ca84f12540229bb237dd228d5f688e9d5ba53fce4302b0334d01000000026353ffffffff602a3ab75af7aa951d93093e345ef0037a2863f3f580a9b1a575fffe68e677450300000000239e476d1e8f81e8b6313880d8a49b27c1b00af467f29756e76f675f084a5676539636ab0300000006656351acac52d9217747044d773204000000000752ac51526353acc33e45050000000005516500005115d8890400000000035163510cbbbd0200000000016500000000", + "65ac526aac6a5352", + 2, + -886179388, + "8763fc77056569a497af457b5e809a453eb8da68b41c33c1df94d4a90fb88f37" + ], + [ + "f380ae23033646af5dfc186f6599098015139e961919aea28502ea2d69474413d94a555ea2000000000753635265acac5314da394b99b07733341ddba9e86022637be3b76492992fb0f58f23c915098979250a966203000000026300ffffffff4bb6d1c0a0d84eac7f770d3ad0fdc5369ae42a21bbe4c06e0b5060d5990776220300000000ffffffff0486fd70020000000007ac6500635252acf3fd72010000000005656a6a6551212de90500000000096365006a63635153000fa33100000000000600535151656300000000", + "52", + 2, + -740890152, + "f804fc4d81f039009ed1f2cccb5c91da797543f235ac71b214c20e763a6d86d7" + ], + [ + "5c45d09801bb4d8e7679d857b86b97697472d514f8b76d862460e7421e8617b15a2df217c6010000000763acac6565006affffffff01156dbc03000000000952ac63516551ac6aac00000000", + "6aac", + 0, + 1310125891, + "270445ab77258ced2e5e22a6d0d8c36ac7c30fff9beefa4b3e981867b03fa0ad" + ], + [ + "4ecc6bde030ca0f83c0ed3d4b777f94c0c88708c6c933fe1df6874f296d425cac95355c23d0000000006ac6a51536a52f286a0969d6170e20f2a8000193807f5bc556770e9d82341ef8e17b0035eace89c76edd50200000007ac65525100656affffffff5bade6e462fac1927f078d69d3a981f5b4c1e59311a38efcb9a910aa436afaa80000000006ac6a00635252ffffffff0331e58902000000000663ac53636352b8b3ca000000000001637a1d26040000000008535263ac6a5352655ae34a39", + "6a65", + 2, + 2142728517, + "303c8dd6fefd68fef70b0248077adda35feeabfbe747e6c5b93d6fc260ccef93" + ], + [ + "a59484b501eb50114be0fc79e72ab9bc9f4a5f7acdf274a56d6b68684eb68cf8b07ec5d1c20000000004650000639e09aa940141e3530200000000046500ac6500000000", + "00516565", + 0, + -1561622405, + "d60bbadd2cc0674100baa08d0e0493ee4248f0304b3eb778da942041f503a896" + ], + [ + "53dc1a88046531c7b57a35f4d9adf101d068bf8d63fbbedaf4741dba8bc5e92c8725def571030000000453655251fcdf116a226b3ec240739c4c7493800e4edfe67275234e371a227721eac43d3d9ecaf1b50300000003ac0052ffffffff2c9279ffeea4718d167e9499bd067600715c14484e373ef93ae4a31d2f5671ab0000000009516553ac636a6a65001977752eeba95a8f16b88c571a459c2f2a204e23d48cc7090e4f4cc35846ca7fc0a455ce00000000055165ac0063188143f80205972902000000000765ac63ac516353c7b6a50000000000036a510000000000", + "655351536a", + 0, + 103806788, + "b276584d3514e5b4e058167c41dc02915b9d97f6795936a51f40e894ed8508bc" + ], + [ + "53f8959f01ddb36afdcd20167edcbb75a63d18654fdcf10bc0004c761ab450fe236d79cb2702000000065151650063653435003a033a5e34050000000009ac52516a630000516ab86db3030000000002006344ac09050000000003636300f3644537", + "526363ac656353", + 0, + -218513553, + "bd6a5101606e537e04162097a0f3b41064c169e69a7b7dd8d55fa83c3c5a882d" + ], + [ + "5a06cb4602dcfc85f49b8d14513f33c48f67146f2ee44959bbca092788e6823b2719f3160b02000000003c013f2518035b9ea635f9a1c74ec1a3fb7496a160f46aae2e09bfc5cd5111a0f20969e003000000015158c89ab7049f20d6010000000007ac6a52ac53515349765e000000000002006382926301000000000353510086da09010000000006656a6365525300000000", + "526a63", + 1, + 1502936586, + "b4e5e9952c80304d55168d73865f628e2b3e5b3edc1fe2bfa1170add4648e544" + ], + [ + "ca9d84fa0129011e1bf27d7cb71819650b59fb292b053d625c6f02b0339249b498ff7fd4b601000000025352ffffffff032173a00400000000065252535152639473bb030000000009005153526a53535151d085bd0000000000076a53655165655300000000", + "005152ac51", + 0, + 580353445, + "b7cfd728f53e2aaf77008ae898df3fcd49955298a7f69a50b1532b0db98031ad" + ], + [ + "e3cdbfb4014d90ae6a4401e85f7ac717adc2c035858bf6ff48979dd399d155bce1f150daea0300000002ac51a67a0d39017f6c71040000000005535200535200000000", + "", + 0, + -1899950911, + "c1c7df8206e661d593f6455db1d61a364a249407f88e99ecad05346e495b38d7" + ], + [ + "b2b6b9ab0283d9d73eeae3d847f41439cd88279c166aa805e44f8243adeb3b09e584efb1df00000000026300ffffffff7dfe653bd67ca094f8dab51007c6adaced09de2af745e175b9714ca1f5c68d050000000003ac6500aa8e596903fd3f3204000000000553ac6a6a533a2e210500000000055253ac526392d0ee0200000000075200656352005200000000", + "65acacac65005365", + 0, + 28298553, + "3e9fab68740cb0d44d2b4aebffeb03df92b3aa1f0302b6f045071b2f34a362f8" + ], + [ + "f30c5c3d01a6edb9e10fafaf7e85db14e7fec558b9dca4a80b05d7c3a2944d282c5018f4680200000003005263ffffffff04aac3530300000000026551bc2419010000000008005163ac6a5100658e7085050000000000c5e4ec050000000006656a6a6353652d8e8882", + "ac53005251ac52", + 0, + -490287546, + "bb73343cf104b71c3b830f6099a197af9703ccf6b3bdee5608dde7821cc14a72" + ], + [ + "4314339e01de40faabcb1b970245a7f19eedbc17c507dac86cf986c2973715035cf95736ae020000000165bde67b900151510b04000000000853ac00655200535300000000", + "52", + 0, + 399070095, + "47585dc25469d04ff3a60939d0a03779e3e81a411bf0ca18b91bb925ebd30718" + ], + [ + "2d4cf4e9031b3e175b2ff18cd933151379d9cfac4713d8bd0e63b70bd4a92277aa7af901ab000000000465515353ffffffff557666c7f3be9cdecdad44c3df206eb63a2da4ed1f159d21193882a9f0340081020000000663535252ac63ffffffff8a8c897bdb87e93886aad5ded9d82a13101d5476554386373646ca5e23612e450300000007006a5265526a635ac03fc00198bb02040000000009525100526a6563636a1d052834", + "52ac00acac6a", + 0, + -1469882480, + "09ed6563a454814ab7e3b4c28d56d8751162b77df1825b37ba66c6147750b2a3" + ], + [ + "f063171b03e1830fdc1d685a30a377537363ccafdc68b42bf2e3acb908dac61ee24b37595c020000000665ac51006aacf447bc8e037b89d6cadd62d960cc442d5ced901d188867b5122b42a862929ce45e7b628d010000000153a009a1ba42b00f1490b0b857052820976c675f335491cda838fb7934d5eea0257684a2a202000000001e83cf2401a7f7770300000000076553526a53526a00000000", + "", + 2, + 1984790332, + "581d480ec4e2a459808f85e76f1242c14daba43eaff63fe305ffd1b86407faba" + ], + [ + "cf7bdc250249e22cbe23baf6b648328d31773ea0e771b3b76a48b4748d7fbd390e88a004d30000000003ac536a4ab8cce0e097136c90b2037f231b7fde2063017facd40ed4e5896da7ad00e9c71dd70ae600000000096a0063516352525365ffffffff01b71e3e00000000000300536a00000000", + "", + 1, + 546970113, + "6a815ba155270af102322c882f26d22da11c5330a751f520807936b320b9af5d" + ], + [ + "ac7a125a0269d35f5dbdab9948c48674616e7507413cd10e1acebeaf85b369cd8c88301b7c030000000863656aac6a530053ffffffffed94c39a582e1a46ce4c6bffda2ccdb16cda485f3a0d94b06206066da12aecfe01000000055263536363ef71dcfb02ee07fa0400000000016a6908c802000000000651656a6551ac688c2c2d", + "6a6351526551", + 0, + 858400684, + "f8268f4a29450b932e62c73b294f7159d69face8c0d772a874071fbe1fa7668e" + ], + [ + "3a1f454a03a4591e46cf1f7605a3a130b631bf4dfd81bd2443dc4fac1e0a224e74112884fe0000000005516aac6a53a87e78b55548601ffc941f91d75eab263aa79cd498c88c37fdf275a64feff89fc1710efe03000000016a39d7ef6f2a52c00378b4f8f8301853b61c54792c0f1c4e2cd18a08cb97a7668caa008d970200000002656affffffff017642b20100000000086a63535253ac6a6528271998", + "51", + 2, + 1459585400, + "94ebe4e031f4ab2b13a16fe952be55051aa07311b400b54c0aceab84eb17dcd1" + ], + [ + "f59366cc0114c2a18e6bd1347ed9470f2522284e9e835dd5c5f7ef243639ebea95d9b232b6020000000153474b62eb045c001705000000000763525163525200038a520400000000076a5253656a63005b968904000000000963536353ac0053635387106002000000000000000000", + "5252630051", + 0, + 1834116153, + "460fa39c72197ab17b10c1a1314f0e332045614c9b4485e94cae6084cabafced" + ], + [ + "6269e0fa0173e76e89657ca495913f1b86af5b8f1c1586bcd6c960aede9bc759718dfd5044000000000352ac530e2c7bd90219849b000000000005006a53006319f28100000000000600515165ac5200000000", + "6a", + 0, + -2039568300, + "d85c366955acbeefef093d85487bfc41a6611942a6e596154fd064b6afc8343a" + ], + [ + "eb2bc00604815b9ced1c604960d54beea4a3a74b5c0035d4a8b6bfec5d0c9108f143c0e99a0000000000ffffffff22645b6e8da5f11d90e5130fd0a0df8cf79829b2647957471d881c2372c527d8010000000263acffffffff1179dbaf17404109f706ae27ad7ba61e860346f63f0c81cb235d2b05d14f2c1003000000025300264cb23aaffdc4d6fa8ec0bb94eff3a2e50a83418a8e9473a16aaa4ef8b855625ed77ef40100000003ac51acf8414ad404dd328901000000000652526500006ab6261c000000000002526a72a4c9020000000006ac526500656586d2e7000000000006656aac00ac5279cd8908", + "51", + 1, + -399279379, + "d37532e7b2b8e7db5c7c534197600397ebcc15a750e3af07a3e2d2e4f84b024f" + ], + [ + "dc9fe6a8038b84209bbdae5d848e8c040433237f415437592907aa798bf30d9dbbddf0ff85010000000153ffffffff23269a7ea29fcf788db483b8d4c4b35669e582608644259e950ce152b0fa6e050000000001acffffffff65de94857897ae9ea3aa0b938ba6e5adf374d48469922d2b36dbb83d3b8c8261010000000452ac5200ffffffff02856e9b0300000000026a51980c8e0200000000026563d2648db4", + "000051ac526565", + 2, + 1562581941, + "19afe3555f84c947b5569ca320278734ce9a2ebd612c53ac4f253d1a4d2f3e52" + ], + [ + "eba8b0de04ac276293c272d0d3636e81400b1aaa60db5f11561480592f99e6f6fa13ad387002000000060053ac536563bebb23d66fd17d98271b182019864a90e60a54f5a615e40b643a54f8408fa8512cfac927030000000663ac6a6aac65ffffffff890a72192bc01255058314f376bab1dc72b5fea104c154a15d6faee75dfa5dba020000000100592b3559b0085387ac7575c05b29b1f35d9a2c26a0c27903cc0f43e7e6e37d5a60d8305a030000000152ffffffff0126518f05000000000000000000", + "005300635252635351", + 1, + 664344756, + "26dc2cba4bd5334e5c0b3a520b44cc1640c6b923d10e576062f1197171724097" + ], + [ + "91bd040802c92f6fe97411b159df2cd60fb9571764b001f31657f2d616964637605875c2a901000000055263006a65ffffffff3651df372645f50cf4e32fdf6e61c766e912e16335db2b40c5d52fe89eefe7cd0000000003006565ffffffff03ca862503000000000751ac6353005252c6bf140200000000040052005167d27000000000000653525351636a00000000", + "515163005252ac", + 1, + 1983087664, + "88b96f7714e48321f4d0e4956ff46cf936ee3b14996e89bc946084a9acb60dc1" + ], + [ + "185cda1a01ecf7a8a8c28466725b60431545fc7a3367ab68e34d486e8ea85ee3128e0d8384000000000365ac63ec88b7bb031c56eb04000000000965636a51005252006a7c78d5040000000006acac63ac51ac3024a40500000000076300526a51ac51464c0e8c", + "0065535265515352", + 0, + 1594558917, + "506c9f2b971f1649c8646a197afc2bfd1980ae798c02241232c6e447097e9415" + ], + [ + "a9531f07034091668b65fea8b1a79700d586ac9e2f42ca0455a26abe41f9e1805d009a0f5702000000086365516365ac52633619bac643a9e28ee47855118cf80c3a74531cdf198835d206d0fe41804e325a4f9f105e03000000016a58e3ab0d46375d98994daf0fa7c600d2bb4669e726fca0e3a3f21ea0d9e777396740328f0100000007636a5363526a538d3ea7700304cb6603000000000551516352510184030500000000085353636565ac0051d9cff402000000000451525352f0e36254", + "5353ac5365ac", + 2, + 1633101834, + "35afb21a41f5ad9544c63c29ebdd68a8cfac03ac245145ca32b753ab3a694de3" + ], + [ + "6b5ecc7903fe0ba37ea551df92a59e12bad0a3065846ba69179a8f4a741a2b4fcf679aac810200000004535263529a3d343293b99ab425e7ef8529549d84f480bcd92472bab972ea380a302128ae14dfcd0200000000025163ffffffff24636e4545cab9bf87009119b7fc3ec4d5ee9e206b90f35d1df8a563b6cd097a010000000652ac53005153c64467860406e832020000000009526300006a53ac6352ac1395010000000002ac53b117f300000000000763655351ac00651edf0203000000000751ac6353535252628ef71d", + "636a52ac526563", + 2, + -1559697626, + "4f27b1102f3f8633a693225835fbdca8681563b5c8b64bbc6e0df559caa3bbbf" + ], + [ + "92c9fb780138abc472e589d5b59489303f234acc838ca66ffcdf0164517a8679bb622a4267020000000153468e373d04de03fa020000000008ac006a52655163006af649050000000007515153006a00658ceb59030000000001ac36afa00200000000075300635151000000000000", + "6a", + 0, + 2059357502, + "6904a629cc6b1a2b724c64d95361a4621b49de81a1333a88438700cbce0bff8b" + ], + [ + "6f62138301436f33a00b84a26a0457ccbfc0f82403288b9cbae39986b34357cb2ff9b889b302000000045253655335a7ff6701bac9960400000000076552656352635200000000", + "6aac51", + 0, + 1444414211, + "65eed736c4dc78083dcdca7fe92d9a24a4390efc1abb835f88c16d702f48849a" + ], + [ + "9981143a040a88c2484ac3abe053849e72d04862120f424f373753161997dd40505dcb4783030000000700536365536565a2e10da3f4b1c1ad049d97b33f0ae0ea48c5d7c30cc8810e144ad93be97789706a5ead180100000003636a00ffffffffbdcbac84c4bcc87f03d0ad83fbe13b369d7e42ddb3aecf40870a37e814ad8bb5010000000863536a5100636a53ffffffff883609905a80e34202101544f69b58a0b4576fb7391e12a769f890eef90ffb72020000000651656352526affffffff042436600000000000035352534a9ce0010000000007636563636a53652df19d030000000003ac65acedc51700000000000000000000", + "ac6300acac", + 2, + 293672388, + "a9544ab8326a6361ccde6e11a052bff5196da8915168f22b39a05d71925e5307" + ], + [ + "a2bb630b01989bc5d643f2da4fb9b55c0cdf846ba06d1dbe372893024dbbe5b9b8a1900af802000000055265ac63aca7a68d2f04916c74010000000002ac007077f0040000000001007d4127010000000005ac516aac000f31e8030000000000571079c9", + "650051ac", + 0, + -1103627693, + "9489487125feeb56a6df08aa2aba65f204d36af8f41773a584dd0ed03fd4c06e" + ], + [ + "49f7d0b6037bba276e910ad3cd74966c7b3bc197ffbcfefd6108d6587006947e97789835ea0300000008526a52006a650053ffffffff8d7b6c07cd10f4c4010eac7946f61aff7fb5f3920bdf3467e939e58a1d4100ab03000000076aac63ac535351ffffffff8f48c3ba2d52ad67fbcdc90d8778f3c8a3894e3c35b9730562d7176b81af23c801000000025265ffffffff0301e3ef0300000000046a525353e899ac05000000000551536a65ac259bea0400000000007b739972", + "53516aacac6aac", + 1, + 955403557, + "47434a0456d729f4bcd49b17705e0501f637030c540a6ba11bf96b351ebdc2e7" + ], + [ + "58a4fed801fbd8d92db9dfcb2e26b6ff10b120204243fee954d7dcb3b4b9b53380e7bb8fb60100000003006351ffffffff02a0795b050000000006536351ac6aac2718d00200000000065151acac515354d21ba1", + "005363515351", + 0, + -1322430665, + "44b83f4912c1f88d89f746acb5cac17b353106dc34255a32fb624f679db565c8" + ], + [ + "32765a0b02e455793d9ce530e9f6a44bcbc612e893a875b5da61d822dc56d8245166c398b403000000075353ac6300006a6bdee2a78d0d0b6a5ea666eed70b9bfea99d1d612ba3878f615c4da10d4a521cba27155002000000025363ffffffff043cd42401000000000551656a53653685320100000000030000511881bc0500000000045165636a20169f010000000006ac656aac63acdb0706a8", + "65ac5353", + 0, + 1936499176, + "89f352d307c9e980e4d5b04f8c08f165ead050345cb732b934a8dcd11023fd26" + ], + [ + "17fad0d303da0d764fedf9f2887a91ea625331b28704940f41e39adf3903d8e75683ef6d46020000000151ffffffffff376eea4e880bcf0f03d33999104aafed2b3daf4907950bb06496af6b51720a020000000900636a63525253525196521684f3b08497bad2c660b00b43a6a517edc58217876eb5e478aa3b5fda0f29ee1bea00000000036aac6affffffff03dde8e2050000000007ac5365ac51516a14772e000000000004630000acbbb3600100000000045251656a50f180f0", + "0053", + 0, + -1043701251, + "dbd390e8f1bd34f78ea2ee2bcc975a94f00a94bf8a5704af9585b40f95f8aa84" + ], + [ + "236c32850300045e292c84ede2b9ab5733ba08315a2bb09ab234c4b4e8894808edbdac0d3b020000000553635363acffffffffd3f696bb31fdd18a72f3fc2bb9ae54b416a253fc37c1a0f0180b52d35bad49440100000003650053ffffffffa85c75a2406d82a93b12e555b66641c1896a4e83ae41ef1038218311e38ace060200000004006a51ac104b5e6701e2842c04000000000700630051ac000000000000", + "63ac6a516a", + 1, + -1709887524, + "02fdf4ccd26d4f2b3dc85a096d0975791ffe58590906589f6c3e004fe7e58d59" + ], + [ + "b78d5fd601345f3100af494cdf447e7d4076179f940035b0ebe8962587d4d0c9c6c9fc34ee0300000003516a6affffffff03dc5c890100000000085353ac53ac6a52534ac941040000000006ac63656a5151d4266b0100000000036aacac70731f2d", + "0053510053", + 0, + -1789071265, + "3eb5554f372e9d3a8f3bf326b1b901b68caa1f99a55009b69f83fcfadd13e520" + ], + [ + "5a2257df03554550b774e677f348939b37f8e765a212e566ce6b60b4ea8fed4c9504b7f7d10000000005536552655258b67bb931df15b041177cf9599b0604160b79e30f3d7a594e7826bae2c29700f6d8f8f40300000005515300ac6a159cf8808a41f504eb5c2e0e8a9279f3801a5b5d7bc6a70515fbf1c5edc875bb4c9ffac500000000050063510052ffffffff0422a90105000000000965006a650000516a006417d202000000000552636300524d969d0100000000035153acc4f077040000000005ac5200636500000000", + "6a52", + 1, + -1482463464, + "fe2c31bf623e4271b5c95a5b55b537adea161cdf2588778452ff6d5ac00cfd54" + ], + [ + "e0032ad601269154b3fa72d3888a3151da0aed32fb2e1a15b3ae7bee57c3ddcffff76a1321010000000100110d93ae03f5bd080100000000075263516a6551002871e60100000000046a005252eaa7530400000000026a526e325c71", + "630052", + 0, + -1857873018, + "1124ce2eef893700a2903b96908a37a0df0b979c818f0e93d4a78b138337fa56" + ], + [ + "014b2a5304d46764817aca180dca50f5ab25f2e0d5749f21bb74a2f8bf6b8b7b3fa8189cb7030000000865ac51656a51ac6360ecd91e8abc7e700a4c36c1a708a494c94bb20cbe695c408543146566ab22be43beae910300000003516300ffffffffffa48066012829629a9ec06ccd4905a05df0e2b745b966f6a269c9c8e13451fc00000000026565ffffffffc40ccadc21e65fe8a4b1e072f4994738ccaf4881ae6fede2a2844d7da4d199ab02000000045152536affffffff01b6e0540300000000035153523e063432", + "", + 0, + 1056459916, + "ec6c440ffff776852f6c49bdf50492ff9a6c8ef4fc8784a6b5a7542f4c985e9c" + ], + [ + "c4ef04c103c5dde65410fced19bf6a569549ecf01ceb0db4867db11f2a3a3eef0320c9e8e001000000075100536a53516affffffff2a0354fa5bd96f1e28835ffe30f52e19bd7d5150c687d255021a6bec03cf4cfd03000000056a006300514900c5b01d3d4ae1b97370ff1155b9dd0510e198d266c356d6168109c54c11b4c283dca00300000000ffffffff02e19e3003000000000451655351fa5c0003000000000163ef1fc64b", + "51636a51630065", + 1, + -1754709177, + "0a281172d306b6a32e166e6fb2a2cc52c505c5d60ea448e9ba7029aa0a2211e1" + ], + [ + "29083fe00398bd2bb76ceb178f22c51b49b5c029336a51357442ed1bac35b67e1ae6fdf13100000000056a6500ac51ffffffffe4ca45c9dc84fd2c9c47c7281575c2ba4bf33b0b45c7eca8a2a483f9e3ebe4b3010000000100ffffffffdf47ad2b8c263fafb1e3908158b18146357c3a6e0832f718cd464518a219d18303000000096352ac656351ac0052daddfb3b0231c36f00000000000400526a5275c7e002000000000000000000", + "ac536aac52", + 2, + 300802386, + "56a1cd52ca286e172228097197cf07d76032f4f4acca3579e2f2e999b1dd710b" + ], + [ + "1201ab5d04f89f07c0077abd009762e59db4bb0d86048383ba9e1dad2c9c2ad96ef660e6d002000000066a65ac5200652466fa5143ab13d55886b6cdc3d0f226f47ec1c3020c1c6e32602cd3428aceab544ef43e00000000086a6a6a526a6a5263ffffffffd5be0b0be13ab75001243749c839d779716f46687e2e9978bd6c9e2fe457ee480200000001651e1bac0f72005cf638f71a3df2e3bbc0fa35bf00f32d9c7dc9c39a5e8909f7d53170c8ae02000000076a51516363516affffffff02f0a6210500000000036300ac867356010000000008ac65ac6353536a659356d367", + "ac53535252", + 0, + 917543338, + "7f0248e34c12668b8355c50d21c8dbb20c217bd331d1f800b9950cadaf32375a" + ], + [ + "344fa11e01c19c4dd232c77742f0dd0aeb3695f18f76da627628741d0ee362b0ea1fb3a2180200000007635151005100529bab25af01937c1f050000000004515353656e7630af", + "6351005163ac51", + 0, + -629732125, + "9e2512e57162703e8658db721dddba7badc4561d7d673a13ad64b7b0f1e621ec" + ], + [ + "b2fda1950191358a2b855f5626a0ebc830ab625bea7480f09f9cd3b388102e35c0f303124c030000000465ac6553ffffffff03f9c5ec0400000000066551516551650e2b9f0500000000045365525284e8f6040000000001ac00000000", + "ac51655253", + 0, + 1433027632, + "f960e775db95cfa932146db23f878b7f06b0c25882d3345530dbfffaaba05edb" + ], + [ + "a4a6bbd201aa5d882957ac94f2c74d4747ae32d69fdc765add4acc2b68abd1bdb8ee333d6e0300000007516a6552515152ffffffff02c353cb040000000006ac635151536588bd320500000000066552525253ac00000000", + "", + 0, + 1702060459, + "9ec678b38b1a7a4745426e48f41d86fe63bb45b043b2b0303aac98e1eab8d3a1" + ], + [ + "584e8d6c035a6b2f9dac2791b980a485994bf38e876d9dda9b77ad156eee02fa39e19224a60300000002636529db326cc8686a339b79ab6b6e82794a18e0aabc19d9ad13f31dee9d7aad8eff38288588020000000452530052ffffffff09a41f07755c16cea1c7e193c765807d18cadddca6ec1c2ed7f5dcdca99e90e80000000001acffffffff01cba62305000000000451ac63acccdf1f67", + "536a6363", + 2, + -27393461, + "1125645b49202dca2df2d76dae51877387903a096a9d3f66b5ac80e042c95788" + ], + [ + "83a583d204d926f2ee587a83dd526cf1e25a44bb668e45370798f91a2907d184f7cddcbbc70300000006006565536a539f71d3776300dffdfa0cdd1c3784c9a1f773e34041ca400193612341a9c42df64e3f550e01000000050052515251ffffffff52dab2034ab0648553a1bb8fc4e924b2c89ed97c18dfc8a63e248b454035564b01000000015139ab54708c7d4d2c2886290f08a5221cf69592a810fd1979d7b63d35c271961e710424fd0300000005ac65ac5251ffffffff01168f7c030000000000a85e5fb0", + "6a536353656a00", + 0, + 179595345, + "5350a31ac954a0b49931239d0ecafbf34d035a537fd0c545816b8fdc355e9961" + ], + [ + "ffd35d51042f290108fcb6ea49a560ba0a6560f9181da7453a55dfdbdfe672dc800b39e7320200000006630065516a65f2166db2e3827f44457e86dddfd27a8af3a19074e216348daa0204717d61825f198ec00301000000025100ffffffffdf41807adb7dff7db9f14d95fd6dc4e65f8402c002d009a3f1ddedf6f4895fc8030000000400006a65a5a848345052f860620abd5fcd074195548ce3bd0839fa9ad8642ed80627bf43a0d47dbd010000000665006a656a53b38cdd6502a186da0500000000056500006a53527c0e010000000007536551acacac52534bd1b1", + "6a635253ac0000", + 0, + 1095082149, + "9b0ea712da894a5f42871402d7326598daa56a42834acc42176360eda79ded43" + ], + [ + "6c9a4b98013c8f1cae1b1df9f0f2de518d0c50206a0ab871603ac682155504c0e0ce946f460100000000ffffffff04e9266305000000000753535100ac6aacded39e04000000000365ac6ab93ccd010000000002515397bf3d050000000002636300000000", + "63520052ac656353", + 0, + -352633155, + "ab8a6224bf7cd47dd82e382e18440a9041dda6bd57729ffed39ddcb9ab2274ed" + ], + [ + "e01dc7f0021dc07928906b2946ca3e9ac95f14ad4026887101e2d722c26982c27dc2b59fdb0000000005ac5200516ab5a31ffadcbe74957a5a3f97d7f1475cc6423fc6dbc4f96471bd44c70cc736e7dec0d1ea020000000851636a526a52ac53ffffffff04bc2edd05000000000152528c7b02000000000952ac51526500525353324820040000000002005380c71300000000000863006500ac525252451bbb48", + "5365ac", + 0, + -552384418, + "7578c44d2ab1f5cbd8a509b56a6ea64192e76390dab45659edeb23621ce55bdb" + ], + [ + "009046a1023f266d0113556d604931374d7932b4d6a7952d08fbd9c9b87cbd83f4f4c178b4030000000452ac526346e73b438c4516c60edd5488023131f07acb5f9ea1540b3e84de92f4e3c432289781ea4900000000046500655357dfd6da02baef910100000000026a007d101703000000000700516500acac5100000000", + "6a6553ac", + 0, + -802456605, + "f8757fbb4448ca34e0cd41b997685b37238d331e70316659a9cc9087d116169d" + ], + [ + "df76ec0801a3fcf3d18862c5f686b878266dd5083f16cf655facab888b4cb3123b3ce5db7e01000000010010e7ac6a0233c83803000000000365ac51faf14a040000000004ac51655100000000", + "6353ac", + 0, + 15705861, + "e7d873aa079a19ec712b269a37d2670f60d8cb334c4f97e2e3fd10eeb8ee5f5e" + ], + [ + "828fd3e0031084051ccef9cfdd97fae4d9cc50c0dae36bd22a3ff332881f17e9756c3e288e0200000003535363961a2ccccaf0218ec6a16ba0c1d8b5e93cfd025c95b6e72bc629ec0a3f47da7a4c396dad01000000025353ffffffff19ad28747fb32b4caf7b5dbd9b2da5a264bedb6c86d3a4805cd294ae53a86ac40200000006535353516551ffffffff04a41650030000000003656a6a8331a304000000000700516365ac516a0d2a47010000000005ac516353acdebc190400000000055300636a6300000000", + "515253ac52", + 0, + 1866105980, + "c269646f2b9b1a570c1e7f718dd9ce004b73cf57a0d65ef746fb3b16a4b7d611" + ], + [ + "c4b80f850323022205b3e1582f1ed097911a81be593471a8dce93d5c3a7bded92ef6c7c1260100000002006affffffff70294d62f37c3da7c5eae5d67dce6e1b28fedd7316d03f4f48e1829f78a88ae801000000096a5200530000516351f6b7b544f7c39189d3a2106ca58ce4130605328ce7795204be592a90acd81bef517d6f170200000000ffffffff012ab8080000000000075100006365006335454c1e", + "53ac6a536aacac", + 0, + -1124103895, + "06277201504e6bf8b8c94136fad81b6e3dadacb9d4a2c21a8e10017bfa929e0e" + ], + [ + "8ab69ed50351b47b6e04ac05e12320984a63801716739ed7a940b3429c9c9fed44d3398ad40300000006536a516a52638171ef3a46a2adb8025a4884b453889bc457d63499971307a7e834b0e76eec69c943038a0300000000ffffffff566bb96f94904ed8d43d9d44a4a6301073cef2c011bf5a12a89bedbaa03e4724030000000265acb606affd01edea38050000000008515252516aacac6300000000", + "65000000006365ac53", + 0, + -1338942849, + "7912573937824058103cb921a59a7f910a854bf2682f4116a393a2045045a8c3" + ], + [ + "2484991e047f1cf3cfe38eab071f915fe86ebd45d111463b315217bf9481daf0e0d10902a402000000006e71a424eb1347ffa638363604c0d5eccbc90447ff371e000bf52fc743ec832851bb564a0100000000ffffffffef7d014fad3ae7927948edbbb3afe247c1bcbe7c4c8f5d6cf97c799696412612020000000851536a5353006a001dfee0d7a0dd46ada63b925709e141863f7338f34f7aebde85d39268ae21b77c3068c01d000000000753515100636563ffffffff018478070200000000085200635365ac525341b08cd3", + "", + 3, + 265623923, + "b733d44ed509d9eb51f93197cfd3cf20301b2aef237ccf3deba781c98fc500f7" + ], + [ + "54839ef9026f65db30fc9cfcb71f5f84d7bb3c48731ab9d63351a1b3c7bc1e7da22bbd508e0300000000442ad138f170e446d427d1f64040016032f36d8325c3b2f7a4078766bdd8fb106e52e8d20000000003656500ffffffff02219aa101000000000551ac5200659646bd02000000000452acacac24c394a5", + "ac", + 0, + 906807497, + "0106127dd7901786b645a7feaaa2e49d9bfed8da4afd514475fc7ac5a42420b4" + ], + [ + "5036d7080434eb4eef93efda86b9131b0b4c6a0c421e1e5feb099a28ff9dd8477728639f77030000000751516a5351525391429be9cce85d9f3d358c5605cf8c3666f034af42740e94d495e28b9aaa1001ba0c8758030000000600655200006affffffffd838978e10c0c78f1cd0a0830d6815f38cdcc631408649c32a25170099669daa0000000001ac8984227e804ad268b5b367285edcdf102d382d027789250a2c0641892b480c21bf84e3fb0100000000b518041e023d8653010000000001004040fb0100000000080051ac5200636a6300000000", + "52ac", + 0, + 366357656, + "bd0e88829afa6bdc1e192bb8b2d9d14db69298a4d81d464cbd34df0302c634c6" + ], + [ + "9ad5ccf503fa4facf6a27b538bc910cce83c118d6dfd82f3fb1b8ae364a1aff4dcefabd38f03000000096365655263ac655300807c48130c5937190a996105a69a8eba585e0bd32fadfc57d24029cbed6446d30ebc1f100100000004000053650f0ccfca1356768df7f9210cbf078a53c72e0712736d9a7a238e0115faac0ca383f219d0010000000500536552002799982b0221b8280000000000000c41320000000000086552ac6365636a6595f233a3", + "6a5152", + 2, + 553208588, + "f99c29a79f1d73d2a69c59abbb5798e987639e36d4c44125d8dc78a94ddcfb13" + ], + [ + "669538a204047214ce058aed6a07ca5ad4866c821c41ac1642c7d63ed0054f84677077a84f030000000653ac6a655353ffffffff70c2a071c115282924e3cb678b13800c1d29b6a028b3c989a598c491bc7c76c5030000000752ac52ac5163ac80420e8a6e43d39af0163271580df6b936237f15de998e9589ec39fe717553d415ac02a4030000000463635153184ad8a5a4e69a8969f71288c331aff3c2b7d1b677d2ebafad47234840454b624bf7ac1d03000000036a6363df38c24a02fbc63a040000000001535ec3dc050000000002536500000000", + "635153", + 3, + -190399351, + "663677df67bf45f3358c34fb5296274117ea392c021e6ecd6916d7c2158e19c8" + ], + [ + "a7f139e502af5894be88158853b7cbea49ba08417fbbca876ca6614b5a41432be34499987b000000000665635165ac63ffffffff8b8d70e96c7f54eb70da0229b548ced438e1ca2ba5ddd648a027f72277ee1efc0100000000ffffffff044f2c4204000000000165e93f550100000000050000526a6a94550304000000000365536aadc21c0300000000016300000000", + "6aacac63635265ac", + 1, + 2143189425, + "6e3f97955490d93d6a107c18d7fe402f1cada79993bb0ff0d096357261b3a724" + ], + [ + "3b94438f0366f9f53579a9989b86a95d134256ce271da63ca7cd16f7dd5e4bffa17d35133f010000000100ffffffff1aaad0c721e06ec00d07e61a84fb6dc840b9a968002ce7e142f943f06fd143a10100000007535151ac510053b68b8e9c672daf66041332163e04db3f6048534bd718e1940b3fc3811c4eef5b7a56888b01000000001d58e38c012e38e70000000000075253ac6365536a00000000", + "655352", + 1, + -935223304, + "8b289a2649047eecc49da07c69351f67d5b87d60fd1e5f74cdd525833fbcac9b" + ], + [ + "e5dca8a20456de0a67e185fa6ea94085ceae478d2c15c73cb931a500db3a1b6735dd1649ec0200000002536a32d11bbdcb81361202681df06a6b824b12b5cb40bb1a672cf9af8f2a836e4d95b783932703000000065100536565acb345085932939eef0c724adef8a57f9e1bf5813852d957c039b6a12d9c2f201ea520fb030000000009ac5352005165acac6a5efc6072f1a421dc7dc714fc6368f6d763a5d76d0278b95fc0503b9268ccfadb48213a2500000000026a53ffffffff039ee1c4020000000008ac53536353535163184018000000000005655265526a9a4a8a050000000001ac00000000", + "65536a006553", + 2, + 1902561212, + "83bcf3f2ddb147f75759a0f2a3a80f2ee1b01b7efc25ba83b9802fbd1c3dfa41" + ], + [ + "972128b904e7b673517e96e98d80c0c8ceceae76e2f5c126d63da77ffd7893fb53308bb2da0300000005ac655252acffffffff4cac767c797d297c079a93d06dc8569f016b4bf7a7d79b605c526e1d36a40e2202000000085365636aac6a6a6a69928d2eddc836133a690cfb72ec2d3115bf50fb3b0d10708fa5d2ebb09b4810c426a1db01000000060052526300001e8e89585da7e77b2dd2e30625887f0660accdf29e53a614d23cf698e6fc8ab03310e87700000000076a520051acac6555231ddb0330ec2d03000000000100faf4570400000000036a6352bdc42400000000000153d6dd2f04", + "", + 0, + 209234698, + "d252eddb74ce116ac306969573d49f4318ea7ad6acd54299a64fc40fb34c0764" + ], + [ + "1fb4085b022c6cfb848f8af7ba3ba8d21bd23ffa9f0bfd181cb68bcaaf2074e66d4974a31602000000080000006a6a6500ac6c12c07d9f3dbd2d93295c3a49e3757119767097e7fd5371f7d1ba9ba32f1a67a5a426f00000000000ffffffff018fd2fc04000000000363ac5100000000", + "65006a6a526a", + 0, + 1431502299, + "8b7dd0ff12ca0d8f4dbf9abf0abba00e897c2f6fd3b92c79f5f6a534e0b33b32" + ], + [ + "5374f0c603d727f63006078bd6c3dce48bd5d0a4b6ea00a47e5832292d86af258ea0825c260000000009655353636352526a6af2221067297d42a9f8933dfe07f61a574048ff9d3a44a3535cd8eb7de79fb7c45b6f47320200000003ac006affffffff153d917c447d367e75693c5591e0abf4c94bbdd88a98ab8ad7f75bfe69a08c470200000005ac65516365ffffffff037b5b7b000000000001515dc4d904000000000004bb26010000000004536a6aac00000000", + "516552516352ac", + 2, + 328538756, + "8bb7a0129eaf4b8fc23e911c531b9b7637a21ab11a246352c6c053ff6e93fcb6" + ], + [ + "c441132102cc82101b6f31c1025066ab089f28108c95f18fa67db179610247086350c163bd01000000055152526300ffffffff9b8d56b1f16746f075249b215bdb3516cbbe190fef6292c75b1ad8a8988897c3000000000451655300ffffffff02f9078b0000000000070053ac51ac0051c0422105000000000651006563525200000000", + "ac51", + 0, + -197051790, + "0d675c36ed969b27c390235835a12d6ff040f6d3d9acd6f1e508d4e5d93dc403" + ], + [ + "ab82ad3b04545bd86b3bb937eb1af304d3ef1a6d1343ed809b4346cafb79b7297c09e1648202000000086351ac5200535353ffffffff95d32795bbaaf5977a81c2128a9ec0b3c7551b9b1c3d952876fcb423b2dfb9e80000000005515363acac47a7d050ec1a603627ce6cd606b3af314fa7964abcc579d92e19c7aba00cf6c3090d6d4601000000056a516551633e794768bfe39277ebc0db18b5afb5f0c8117dde9b4dfd5697e9027210eca76a9be20d630000000006005200636aacffffffff01ec2ddc050000000008ac52ac65ac65ac5100000000", + "536300", + 1, + -2070209841, + "b362da5634f20be7267de78b545d81773d711b82fe9310f23cd0414a8280801d" + ], + [ + "8bff9d170419fa6d556c65fa227a185fe066efc1decf8a1c490bc5cbb9f742d68da2ab7f320100000006000053525365a7a43a80ab9593b9e8b6130a7849603b14b5c9397a190008d89d362250c3a2257504eb810200000005acacac0051ee141be418f003e75b127fd3883dbf4e8c3f6cd05ca4afcaac52edd25dd3027ae70a62a00000000008ac52526a5200536affffffffb8058f4e1d7f220a1d1fa17e96d81dfb9a304a2de4e004250c9a576963a586ae0300000004acac5363b9bc856c039c01d804000000000951656aac53005365acb0724e0000000000036563acea7c7a0000000000036a00ac00000000", + "6565", + 1, + -1349282084, + "91e30f9537e6db6647dc14005471ad52c72a408898ce94a20e7335b59d6efddd" + ], + [ + "0e1633b4041c50f656e882a53fde964e7f0c853b0ada0964fc89ae124a2b7ffc5bc97ea6230100000005ac6aacacacffffffff2e35f4dfcad2d53ea1c8ada8041d13ea6c65880860d96a14835b025f76b1fbd9000000000351515121270867ef6bf63a91adbaf790a43465c61a096acc5a776b8e5215d4e5cd1492e611f761000000000500ac6a5265ffffffff63b5fc39bcac83ca80ac36124abafc5caee608f9f63a12479b68473bd4bae769000000000865ac52acac5263acffffffff0163153e02000000000600516565515300000000", + "6a6aac00", + 0, + -968477862, + "2895286e20c64b3f440f63f31e23c495e96ceeac48e54d858bf6522c07c3cdd2" + ], + [ + "2b052c24022369e956a8d318e38780ef73b487ba6a8f674a56bdb80a9a63634c6110fb5154010000000251acffffffff48fe138fb7fdaa014d67044bc05940f4127e70c113c6744fbd13f8d51d45143e01000000005710db3804e01aa9030000000007acac6a516a5152fd55aa01000000000651510000ac636d6026010000000000b97da9000000000000fddf3b53", + "006552", + 0, + 595461670, + "bdc139efefac1cea236855c47152424e47b89f7dd79717962da1c8b9009c79fa" + ], + [ + "073bc856015245f03b2ea2da62ccedc44ecb99e4250c7042f596bcb23b294c9dc92cfceb6b0200000005516352636afe292fb303b7c3f001000000000352636af3c49502000000000400ac6a535851850100000000056aac65536500000000", + "6a53006a52", + 0, + 247114317, + "6d047c82c51fe13a363ebfa6be10eeaaff3d6fe6209fd092b2b56255fc38f9da" + ], + [ + "7888b71403f6d522e414d4ca2e12786247acf3e78f1918f6d727d081a79813d129ee8befce0100000006516a63536365ffffffff4a882791bf6400fda7a8209fb2c83c6eef51831bdf0f5dacde648859090797ec030000000153ffffffffbb08957d59fa15303b681bad19ccf670d7d913697a2f4f51584bf85fcf91f1f30200000008526565ac52ac63acffffffff0227c0e8050000000001ac361dc801000000000600515165000000000000", + "656a", + 2, + 1869281295, + "9533d9296a82da8c657d102b3d6dc63b721f7833d74f2b15ef727348e3f8165a" + ], + [ + "cc4dda57047bd0ca6806243a6a4b108f7ced43d8042a1acaa28083c9160911cf47eab910c40200000006526a00006a63e4154e581fcf52567836c9a455e8b41b162a78c85906ccc1c2b2b300b4c69caaaa2ba02303000000065152ac510065ffffffff69696b523ed4bd41ecd4d65b4af73c9cf77edf0e066138712a8e60a04614ea1c03000000036a000016c9045c7df7836e05ac4b2e397e2dd72a5708f4a8bf6d2bc36adc5af3cacefcf074b8b403000000065352ac5252acffffffff01d7e380050000000000cf4e699a", + "525163656351", + 1, + -776533694, + "ff18c5bffd086e00917c2234f880034d24e7ea2d1e1933a28973d134ca9e35d2" + ], + [ + "b7877f82019c832707a60cf14fba44cfa254d787501fdd676bd58c744f6e951dbba0b3b77f0200000009ac515263ac53525300a5a36e500148f89c0500000000075265ac6a6a65ac00000000", + "6563", + 0, + -1785108415, + "5f1917641e3c3a887026c455b25fc2ee58b1f01d79dec20b4f763cc6e9d9f7e3" + ], + [ + "aeb14046045a28cc59f244c2347134d3434faaf980961019a084f7547218785a2bd03916f3000000000165f852e6104304955bda5fa0b75826ee176211acc4a78209816bbb4419feff984377b2352200000000003a94a5032df1e0d60390715b4b188c330e4bb7b995f07cdef11ced9d17ee0f60bb7ffc8e0100000002516513e343a5c1dc1c80cd4561e9dddad22391a2dbf9c8d2b6048e519343ca1925a9c6f0800a020000000665516365ac513180144a0290db270000000000046551515138b1870100000000055363ac516a9e5cd98a", + "53ac", + 0, + 478591320, + "4a2ffc462c302c55bb020ea074fb1064b574bff3d9bc9742807b68055c0d97e5" + ], + [ + "c9270fe004c7911b791a00999d108ce42f9f1b19ec59143f7b7b04a67400888808487bd59103000000066a0052ac6565b905e76687be2dd7723b22c5e8269bc0f2000a332a289cfc40bc0d617cfe3214a61a85a30300000007ac63ac00635251560871209f21eb0268f175b8b4a06edd0b04162a974cf8b5dada43e499a1f22380d35ede0300000000792213fc58b6342cc8100079f9f5f046fb89f2d92cf0a2cb6d07304d32d9da858757037c000000000651636565516affffffff02c72a8b03000000000452acac530dfb9f05000000000096f94307", + "5253536351", + 3, + 543688436, + "0278adbcc476d135493ae9bdcd7b3c2002df17f2d81c17d631c50c73e546c264" + ], + [ + "57a5a04c0278c8c8e243d2df4bb716f81d41ac41e2df153e7096f5682380c4f441888d9d260300000002636afdbe4203525dff42a7b1e628fe22bccaa5edbb34d8ab02faff198e085580ea5fcdb0c61b0000000002ac6affffffff03375e6c05000000000563516a6a513cb6260400000000007ca328020000000005516a636a5294701cc7", + "0053ac5152", + 0, + -550925626, + "45485ce2e926bf8ee366ac0b0903540562ac05f3c5cdc881baf9dce1e142d1e7" + ], + [ + "072b75a504ad2550c2e9a02614bc9b2a2f50b5b553af7b87c0ef07c64ddc8d8934c96d216401000000026aaca1387242a5bcd21099b016ad6045bed7dce603472757d9822cc5f602caa4ae20414d378b02000000026a63e4ac816734acdc969538d6f70b8ab43a2589f55e0177a4dc471bdd0eb61d59f0f46f6bb801000000055351526a52d9f2977be76a492c3a7617b7a16dc29a3b0a7618f328c2f7d4fd9bafe760dc427a5066ef000000000465635165ffffffff02c5793600000000000165296820050000000002ac6300000000", + "53006a6aac0052", + 2, + 66084636, + "437e89bb6f70fd2ed2feef33350b6f6483b891305e574da03e580b3efd81ae13" + ], + [ + "7e27c42d0279c1a05eeb9b9faedcc9be0cab6303bde351a19e5cbb26dd0d594b9d74f40d2b020000000200518c8689a08a01e862d5c4dcb294a2331912ff11c13785be7dce3092f154a005624970f84e0200000000500cf5a601e74c1f0000000000066a52636a6a5200000000", + "6500006a5351", + 0, + 449533391, + "c31419c6afa744a7c065dd62e90f1e96c6cbcceb7304c559fed7d81909679d26" + ], + [ + "11414de403d7f6c0135a9df01cb108c1359b8d4e105be50a3dcba5e6be595c8817217490b20000000003005263ffffffff0c6becb9c3ad301c8dcd92f5cbc07c8bed7973573806d1489316fc77a829da03030000000700005253535352ffffffff2346d74ff9e12e5111aa8779a2025981850d4bf788a48de72baa2e321e4bc9ca00000000046352ac63cc585b64045e03850500000000075253516aacac00efa9cf0300000000065200635151acbe80330400000000060063635100000be159050000000007525300655300ac00000000", + "51656a0051", + 0, + 683137826, + "d4737f3b58f3e5081b35f36f91acde89dda00a6a09d447e516b523e7a99264d5" + ], + [ + "1c6b5f29033fc139338658237a42456123727c8430019ca25bd71c6168a9e35a2bf54538d80100000008536aac52ac6a6a52ffffffff3fb36be74036ff0c940a0247c451d923c65f826793d0ac2bb3f01ecbec80332901000000050000516363ffffffff5d9eca0cf711685105bd060bf7a67321eaef95367acffab36ce8dedddd632ee2000000000652ac6a63ac517167319e032d26de040000000003516363dc38fb010000000000b37b00000000000005520051ac534baba51f", + "636300ac6563", + 0, + -2049129935, + "a8ed152739f1c8ac0c96f2c2f7ef5f4bbf4a5a6e6c0ee26a915bebfc385ab27d" + ], + [ + "978b9dad0214cfc7ce392d74d9dcc507350dc34007d72e4125861c63071ebf2cc0a6fd4856020000000551ac6a6a52ffffffff47f20734e3370e733f87a6edab95a7a268ae44db7a8974e255614836b22938720200000008635265ac51516553ffffffff0137b2560100000000035252ac2f3363e9", + "006a6352", + 1, + 2014249801, + "55611a5fb1483bce4c14c33ed15198130e788b72cd8929b2ceef4dd68b1806bf" + ], + [ + "442f1c8703ab39876153c241ab3d69f432ba6db4732bea5002be45c8ca10c3a2356fe0e9590300000001accb2b679cab7c58a660cb6d4b3452c21cd7251a1b77a52c300f655f5baeb6fa27ff5b79880300000003005252e5ccf55712bc8ed6179f6726f8a78f3018a7a0391594b7e286ef5ee99efdcde302a102cc0200000009006352526351536a63ffffffff04443f63030000000005536a6363651405fb020000000008ac5353515253006a9f172b000000000003535263ad5c50050000000007656a65630000ac00000000", + "65636a006552", + 2, + 2125838294, + "ec3f58de85a13baae2760020b1776676b18432d43792b3e5ebd8eb43a97994d0" + ], + [ + "2b3470dd028083910117f86614cdcfb459ee56d876572510be4df24c72e8f58c70d5f5948b03000000056a65635265da2c3aac9d42c9baafd4b655c2f3efc181784d8cba5418e053482132ee798408ba43ccf90300000000ffffffff047dda4703000000000765516a52ac53009384a603000000000551636a636a8cf57a030000000002526a8cf6a405000000000952636a6a6565525100661e09cb", + "ac520063ac6a6a52", + 1, + 1405647183, + "5694adec37b52c6562305644b7790c0115e48f8297e0c60b80656b95513b8da3" + ], + [ + "d74282b501be95d3c19a5d9da3d49c8a88a7049c573f3788f2c42fc6fa594f59715560b9b00000000009655353525265ac52ac9772121f028f8303030000000003510065af5f47040000000007ac516a6551630000000000", + "ac53006363ac", + 0, + -1113209770, + "2f482b97178f17286f693796a756f4d7bd2dfcdbecd4142528eec1c7a3e5101a" + ], + [ + "3a5644a9010f199f253f858d65782d3caec0ac64c3262b56893022b9796086275c9d4d097b02000000009d168f7603a67b30050000000007ac51536a0053acd9d88a0500000000066553635352633cf1f403000000000352ac6a00000000", + "005363536565acac6a", + 0, + -1383947195, + "a72af2143af372e06cd7b13b85558a98ffbd937894d99247b6b8aba11a773e4b" + ], + [ + "67b3cc43049d13007485a8133b90d94648bcf30e83ba174f5486ab42c9107c69c5530c5e1f0000000003005100ffffffff9870ebb65c14263282ea8d41e4f4f40df16b565c2cf86f1d22a9494cad03a67f01000000016a5a121bee5e359da548e808ae1ad6dfccae7c67cbb8898d811638a1f455a671e822f228ef030000000151c1fcc9f9825f27c0dde27ea709da62a80a2ff9f6b1b86a5874c50d6c37d39ae31fb6c8a0030000000163553b8786020ca74a000000000005656351535275c0760000000000020052e659b05d", + "636a6a6a", + 0, + -342795451, + "28524792262542aff915fcbdc306ca1fff8b5d9ea54103eb4693f85a6c779fa0" + ], + [ + "bda1ff6804a3c228b7a12799a4c20917301dd501c67847d35da497533a606701ad31bf9d5e0300000001ac16a6c5d03cf516cd7364e4cbbf5aeccd62f8fd03cb6675883a0636a7daeb650423cb1291010000000500656553ac4a63c30b6a835606909c9efbae1b2597e9db020c5ecfc0642da6dc583fba4e84167539a8020000000865525353515200acffffffff990807720a5803c305b7da08a9f24b92abe343c42ac9e917a84e1f335aad785d00000000026a52ffffffff04981f200300000000008c762200000000000153690b9605000000000151ce88b301000000000753526a6a51006500000000", + "000052ac52530000", + 1, + -1809193140, + "0396ec42c37e898cb246dcdff66e2a2acfb3640b7a2dd1f603eb786ef59f5bee" + ], + [ + "2ead28ff0243b3ab285e5d1067f0ec8724224402b21b9cef9be962a8b0d153d401be99bbee0000000004ac635153ffffffff6985987b7c1360c9fa8406dd6e0a61141709f0d5195f946da55ed83be4e3895301000000020053ffffffff016503d20500000000085251ac6a65656a6a00000000", + "51", + 1, + 1723793403, + "67483ee62516be17a2431a163e96fd88a08ff2ce8634a52e42c1bc04e30f3f8a" + ], + [ + "db4904e6026b6dd8d898f278c6428a176410d1ffbde75a4fa37cda12263108ccd4ca6137440100000007656a0000515263ffffffff1db7d5005c1c40da0ed17b74cf6b2a6ee2c33c9e0bacda76c0da2017dcac2fc702000000026a53ffffffff0454cf2103000000000153463aef0000000000076a63006552636387e0ed050000000000e8d16f05000000000352ac63e4521b22", + "", + 1, + 1027042424, + "12b7f4dd4ac7ae8a65cc28f353305763122f1f2957ea73446fff00ef6c2d3723" + ], + [ + "dca31ad10461ead74751e83d9a81dcee08db778d3d79ad9a6d079cfdb93919ac1b0b6187110200000007650052536551ac7f7e9aed78e1ef8d213d40a1c50145403d196019985c837ffe83836222fe3e5955e177e70100000006525152525300ffffffff5e98482883cc08a6fe946f674cca479822f0576a43bf4113de9cbf414ca628060100000006ac53516a5253ffffffff07490b0b898198ec16c23b75d606e14fa16aa3107ef9818594f72d5776805ec502000000036a0052ffffffff01932a28030000000007656551ac6a516a2687aa06", + "635300ac", + 2, + -1880362326, + "71d1c5106f3fc844c32f5d75a48b99253efbdf8ef7c08c4ee15335e4aee3ec9d" + ], + [ + "e14e1a9f0442ab44dfc5f6d945ad1ff8a376bc966aad5515421e96ddbe49e529614995cafc03000000055165515165fffffffff97582b8290e5a5cfeb2b0f018882dbe1b43f60b7f45e4dd21dbd3a8b0cfca3b0200000000daa267726fe075db282d694b9fee7d6216d17a8c1f00b2229085495c5dc5b260c8f8cd5d000000000363ac6affffffffaab083d22d0465471c896a438c6ac3abf4d383ae79420617a8e0ba8b9baa872b010000000763526563ac5363d948b5ce022113440200000000076a636552006a53229017040000000000e6f62ac8", + "526353636a65", + 3, + -485265025, + "1bc8ad76f9b7c366c5d052dc479d6a8a2015566d3a42e93ab12f727692c89d65" + ], + [ + "720d4693025ca3d347360e219e9bc746ef8f7bc88e8795162e5e2f0b0fc99dc17116fc937100000000046353520045cb1fd79824a100d30b6946eab9b219daea2b0cdca6c86367c0c36af98f19ac64f3575002000000008a1c881003ed16f3050000000007536a63630000ac45e0e704000000000151f6551a0500000000076353656551536300000000", + "65536a6a510000", + 1, + 1249091393, + "107b7fea2a89f1716c836489cb9b794cc56b5003888e650069f0ab919f8bff2c" + ], + [ + "69df842a04c1410bfca10896467ce664cfa31c681a5dac10106b34d4b9d4d6d0dc1eac01c1000000000551536a5165269835ca4ad7268667b16d0a2df154ec81e304290d5ed69e0069b43f8c89e673328005e200000000066a5153006aacffffffffc9314bd80b176488f3d634360fcba90c3a659e74a52e100ac91d3897072e3509010000000665ac51636363ffffffff0e0768b13f10f0fbd2fa3f68e4b4841809b3b5ba0e53987c3aaffcf09eee12bf0300000008ac535263526a53ac514f4c2402da8fab0400000000001ef15201000000000451526a52d0ec9aca", + "525365ac52", + 1, + 313967049, + "a72a760b361af41832d2c667c7488dc9702091918d11e344afc234a4aea3ec44" + ], + [ + "adf2340d03af5c589cb5d28c06635ac07dd0757b884d4777ba85a6a7c410408ad5efa8b1900100000003510000ffffffff808dc0231c96e6667c04786865727013922bcb7db20739b686f0c17f5ba70e8f0300000000fd2332a654b580881a5e2bfec8313f5aa878ae94312f37441bf2d226e7fc953dcf0c77ab000000000163aa73dc580412f8c2050000000005636aacac63da02d502000000000153e74b52020000000001536b293d030000000006636552ac526500000000", + "00005252", + 0, + -568651175, + "2ba54d0067af47a7e8902abf4ee382268e78e5ca424047817f408c394b3e8bca" + ], + [ + "e4fec9f10378a95199c1dd23c6228732c9de0d7997bf1c83918a5cfd36012476c0c3cba24002000000085165536500ac0000ad08ab93fb49d77d12a7ccdbb596bc5110876451b53a79fdce43104ff1c316ad63501de801000000036a635276af9908463444aeecd32516a04dd5803e02680ed7f16307242a794024d93287595250f4000000000089807279041a82e603000000000200521429100200000000055253636a63f20b940400000000004049ed04000000000300526543dfaf7d", + "6563526aac", + 2, + -1923470368, + "a946cd4885bc5e6160ee87e8ab443327ad43ee02ae095859248ababe0c449b4e" + ], + [ + "4000d3600100b7a3ff5b41ec8d6ccdc8b2775ad034765bad505192f05d1f55d2bc39d0cbe101000000065165ac6a5163ffffffff034949150100000000026a6a92c9f600000000000565536a635200e697040000000007636a5353525365237ae7d2", + "52000063", + 0, + -880046683, + "6174e28e376d36e5cac48ffc38e90f0c425519eb39032142fc7a57baf11c89eb" + ], + [ + "eabc0aa701fe489c0e4e6222d72b52f083166b49d63ad1410fb98caed027b6a71c02ab830c0300000006525363530065ffffffff01a5dc0b05000000000253533e820177", + "", + 0, + 954499283, + "1d849b92eedb9bf26bd4ced52ce9cb0595164295b0526842ab1096001fcd31b1" + ], + [ + "d48d55d304aad0139783b44789a771539d052db565379f668def5084daba0dfd348f7dcf6b00000000006826f59e5ffba0dd0ccbac89c1e2d69a346531d7f995dea2ca6d7e6d9225d81aec257c6003000000096a655200ac656552acffffffffa188ffbd5365cae844c8e0dea6213c4d1b2407274ae287b769ab0bf293e049eb0300000004ac6a6a51ad1c407c5b116ca8f65ed496b476183f85f072c5f8a0193a4273e2015b1cc288bf03e9e2030000000152ffffffff04076f4404000000000465535353be6500050000000003ac65ac3c15040500000000085100536353516a52ed3aba04000000000700ac5353636aac00000000", + "5253526563acac", + 2, + -1506108646, + "569e37ae36eff78c39b021a8d2c862ac190488703f4981614d880d4bf19b5544" + ], + [ + "9746f45b039bfe723258fdb6be77eb85917af808211eb9d43b15475ee0b01253d33fc3bfc502000000065163006a655312b12562dc9c54e11299210266428632a7d0ee31d04dfc7375dcad2da6e9c11947ced0e000000000009074095a5ac4df057554566dd04740c61490e1d3826000ad9d8f777a93373c8dddc4918a00000000025351ffffffff01287564030000000003636a0000000000", + "52", + 2, + -1380411075, + "e091b57aa044402b515b958aa22a05304df52818ce469f15f592c0a6ebbb9d02" + ], + [ + "8731b64903d735ba16da64af537eaf487b57d73977f390baac57c7b567cb2770dfa2ef65870100000001635aedd990c42645482340eacb0bfa4a0a9e888057389c728b5b6a8691cdeb1a6a67b45e140200000008ac53526a52516551ffffffff45c4f567c47b8d999916fd49642cbc5d10d43c304b99e32d044d35091679cb860100000003006a51ffffffff0176d6c200000000000000000000", + "6a6553", + 2, + -1221546710, + "ccfdba36d9445f4451fb7cbf0752cc89c23d4fc6fff0f3930d20e116f9db0b95" + ], + [ + "f5cfc52f016209ab1385e890c2865a74e93076595d1ca77cbe8fbf2022a2f2061a90fb0f3e010000000253acffffffff027de73f0200000000085252ac510052acac49cd6a020000000000e6c2cb56", + "51655253530063", + 0, + -1195302704, + "5532717402a2da01a1da912d824964024185ca7e8d4ad1748659dc393a14182b" + ], + [ + "df0a32ae01c4672fd1abd0b2623aae0a1a8256028df57e532f9a472d1a9ceb194267b6ee190200000009536a6a51516a525251b545f9e803469a2302000000000465526500810631040000000000441f5b050000000006530051006aaceb183c76", + "536a635252ac6a", + 0, + 1601138113, + "9a0435996cc58bdba09643927fe48c1fc908d491a050abbef8daec87f323c58f" + ], + [ + "d102d10c028b9c721abb259fe70bc68962f6cae384dabd77477c59cbeb1fb26266e091ba3e0100000002516affffffffe8d7305a74f43e30c772109849f4cd6fb867c7216e6d92e27605e69a0818899700000000026a65ecf82d58027db4620500000000026552c28ed301000000000000000000", + "0051515365", + 1, + -131815460, + "e719bbfc9df2b77cb1303161106fc13b6b0709fc82a067b715ebe1cf7d47d75b" + ], + [ + "cef930ed01c36fcb1d62ceef931bef57098f27a77a4299904cc0cbb44504802d535fb11557010000000153ffffffff02c8657403000000000863ac655253520063d593380400000000036a536a00000000", + "656a0051636553", + 0, + -351313308, + "8d15e39940afbecf3c96d8b8ebf3b7075fb69615a4669edb282be97b3b27108c" + ], + [ + "b1c0b71804dff30812b92eefb533ac77c4b9fdb9ab2f77120a76128d7da43ad70c20bbfb990200000002536392693e6001bc59411aebf15a3dc62a6566ec71a302141b0c730a3ecc8de5d76538b30f55010000000665535252ac514b740c6271fb9fe69fdf82bf98b459a7faa8a3b62f3af34943ad55df4881e0d93d3ce0ac0200000000c4158866eb9fb73da252102d1e64a3ce611b52e873533be43e6883137d0aaa0f63966f060000000000ffffffff04a605b604000000000851006a656a630052f49a0300000000000252515a94e1050000000005ac65005200fd8dd002000000000651535163526a2566852d", + "ac5363", + 0, + -1718831517, + "b0dc030661783dd9939e4bf1a6dfcba809da2017e1b315a6312e5942d714cf05" + ], + [ + "6a270ee404ebc8d137cfd4bb6b92aa3702213a3139a579c1fc6f56fbc7edd9574ef17b13f3010000000500656565acffffffffaa65b1ab6c6d87260d9e27a472edceb7dd212483e72d90f08857abf1dbfd46d10100000000fffffffff93c4c9c84c4dbbe8a912b99a2830cfe3401aebc919041de063d660e585fc9f002000000076aac52ac6a53acfa6dcef3f28355a8d98eee53839455445eeee83eecd2c854e784efa53cee699dbfecaebd01000000026a51ffffffff04f7d71b050000000009ac6a536aac6a6365513c376505000000000452656a53fa742002000000000039ed82030000000007516aac635165512fdabd17", + "535252526563", + 1, + -1326210506, + "6395a22dd3378726fa2b92402bdae26f15e9dc41b2b38c39ba1c7e79fbfa4438" + ], + [ + "3657e4260304ccdc19936e47bdf058d36167ee3d4eb145c52b224eff04c9eb5d1b4e434dfc000000000058aefe57707c66328d3cceef2e6f56ab6b7465e587410c5f73555a513ace2b232793a74400000000036a006522e69d3a785b61ad41a635d59b3a06b2780a92173f85f8ed428491d0aaa436619baa9c45010000000263512609629902eb7793050000000000a1b967040000000003525353a34d6192", + "516a", + 0, + -1761874713, + "0a2ff41f6d155d8d0e37cd9438f3b270df9f9214cda8e95c76d5a239ca189df2" + ], + [ + "a0eb6dc402994e493c787b45d1f946d267b09c596c5edde043e620ce3d59e95b2b5b93d43002000000086a5252526aac636555694287a279e29ee491c177a801cd685b8744a2eab83824255a3bcd08fc0e3ea13fb88200000000056365520063ffffffff029e424a040000000006ac53516a636a23830f0400000000016adf49c1f9", + "ac0065ac6500005252", + 1, + 669294500, + "f398f44f5ead1c19b708fec28bdba33c0f6f9f8f3fa2e1697d76ee7f9738f924" + ], + [ + "6e67c0d3027701ef71082204c85ed63c700ef1400c65efb62ce3580d187fb348376a23e9710200000001655b91369d3155ba916a0bc6fe4f5d94cad461d899bb8aaac3699a755838bfc229d6828920010000000765536353526a52ffffffff04c0c792000000000005650052535372f79e000000000001527fc0ee010000000004ac530065d1b3e902000000000151a942b278", + "6a5151", + 0, + 1741407676, + "f4eb16dbd61db1fe8064856e67d5a4ca6a7735406699a6603d96aba53a734e68" + ], + [ + "8f53639901f1d643e01fc631f632b7a16e831d846a0184cdcda289b8fa7767f0c292eb221a00000000036a53acffffffff037a2daa01000000000553ac6a6a51eac349020000000005ac526552638421b3040000000007006a005100ac63048a1492", + "ac65", + 0, + 1033685559, + "da86c260d42a692358f46893d6f91563985d86eeb9ea9e21cd38c2d8ffcfcc4d" + ], + [ + "491f99cb01bdfba1aa235e5538dac081fae9ce55f9622de483afe7e65105c2b0db75d360d200000000045251636340b60f0f041421330300000000096351ac000051636553ce2822040000000005516a00ac5180c8e40300000000025100caa8570400000000020000cfdc8da6", + "6a5100516a655365", + 0, + -953727341, + "397c68803b7ce953666830b0221a5e2bcf897aa2ded8e36a6b76c497dcb1a2e1" + ], + [ + "b3cad3a7041c2c17d90a2cd994f6c37307753fa3635e9ef05ab8b1ff121ca11239a0902e700300000008635300006aac5163ffffffffcec91722c7468156dce4664f3c783afef147f0e6f80739c83b5f09d5a09a57040200000004516a6552ffffffff969d1c6daf8ef53a70b7cdf1b4102fb3240055a8eaeaed2489617cd84cfd56cf02000000025253ffffffff46598b6579494a77b593681c33422a99559b9993d77ca2fa97833508b0c169f80200000009655300655365516351ffffffff04d7ddf800000000000753536a65ac635109f3420300000000036a65ac33589d04000000000952656a65655151acac944d6f0400000000006a8004ba", + "005165", + 1, + 1035865506, + "fe1dc9e8554deecf8f50c417c670b839cc9d650722ebaaf36572418756075d58" + ], + [ + "e1cfd73b0125add9e9d699f5a45dca458355af175a7bd4486ebef28f1928d87864384d02df02000000036a0051ffffffff0357df030100000000036a5365777e2d040000000006636a00005265f434a601000000000351655100000000", + "53", + 0, + -1936500914, + "5ff796ec362f811a8acebab73fe51acb423391f0570bc32070c13bfc362a4104" + ], + [ + "cf781855040a755f5ba85eef93837236b34a5d3daeb2dbbdcf58bb811828d806ed05754ab8010000000351ac53ffffffffda1e264727cf55c67f06ebcc56dfe7fa12ac2a994fecd0180ce09ee15c480f7d00000000086351516a51acac0053dd49ff9f334befd6d6f87f1a832cddfd826a90b78fd8cf19a52cb8287788af94e939d6020000000700525251ac526310d54a7e8900ed633f0f6f0841145aae7ee0cbbb1e2a0cae724ee4558dbabfdc58ba6855010000000452536a53fd1b101102c51f910500000000096300656a525252656a300bee010000000008ac52005263635151e19235c9", + "53005365", + 2, + 1422854188, + "47375ce8a6890e96241eff5952901b6eddd904e299f72942170871853777f82f" + ], + [ + "fea256ce01272d125e577c0a09570a71366898280dda279b021000db1325f27edda41a5346010000000153c752c21c013c2b3a01000000000000000000", + "65", + 0, + 1145543262, + "076b9f844f6ae429de228a2c337c704df1652c292b6c6494882190638dad9efd" + ] +] diff --git a/domain/consensus/utils/txscript/data/tx_invalid.json b/domain/consensus/utils/txscript/data/tx_invalid.json new file mode 100644 index 0000000..6c08031 --- /dev/null +++ b/domain/consensus/utils/txscript/data/tx_invalid.json @@ -0,0 +1,714 @@ +[ + [ + "The following are deserialized transactions which are invalid." + ], + [ + "They are in the form" + ], + [ + "[[[prevout hash, prevout index, prevout scriptPubKey, amount?], [input 2], ...]," + ], + [ + "serializedTransaction, verifyFlags]" + ], + [ + "Objects that are only a single string (like this one) are ignored" + ], + [ + "0e1b5688cf179cd9f7cbda1fac0090f6e684bbf8cd946660120197c3f3681809 but with extra junk appended to the end of the scriptPubKey" + ], + [ + [ + [ + "6ca7ec7b1847f6bdbd737176050e6a08d66ccd55bb94ad24f4018024107a5827", + 0, + "0x41 0x043b640e983c9690a14c039a2037ecc3467b27a0dcd58f19d76c7bc118d09fec45adc5370a1c5bf8067ca9f5557a4cf885fdb0fe0dcc9c3a7137226106fbc779a5 CHECKSIG VERIFY 1" + ] + ], + "010000000127587a10248001f424ad94bb55cd6cd6086a0e05767173bdbdf647187beca76c000000004948304502201b822ad10d6adc1a341ae8835be3f70a25201bbff31f59cbb9c5353a5f0eca18022100ea7b2f7074e9aa9cf70aa8d0ffee13e6b45dddabf1ab961bda378bcdb778fa4701ffffffff0100f2052a010000001976a914fc50c5907d86fed474ba5ce8b12a66e0a4c139d888ac00000000", + "P2SH" + ], + [ + "Same as above, but with the sig in the scriptSig also pushed with the same non-standard OP_PUSHDATA" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1 0x4c 0x47 0x3044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a01" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006b4c473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "An invalid P2SH Transaction" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x7a052c840ba73af26755de42cf01cc9e0a49fef0 EQUAL" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000009085768617420697320ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "Tests for CheckTransaction()" + ], + [ + "No inputs" + ], + [ + "Skipped because this is not checked by btcscript, this is a problem for chain." + ], + [ + "No outputs" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x05ab9e14d983742513f0f451e105ffb4198d1dd4 EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022100f16703104aab4e4088317c862daec83440242411b039d14280e03dd33b487ab802201318a7be236672c5c56083eb7a5a195bc57a40af7923ff8545016cd3b571e2a601232103c40e5d339df3f30bf753e7e04450ae4ef76c9e45587d1d993bdc4cd06f0651c7acffffffff0000000000", + "P2SH" + ], + [ + "Negative output" + ], + [ + "Removed because btcscript doesn't do tx sanity checking." + ], + [ + "MAX_MONEY + 1 output" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x32afac281462b822adbec5094b8d4d337dd5bd6a EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100e1eadba00d9296c743cb6ecc703fd9ddc9b3cd12906176a226ae4c18d6b00796022100a71aef7d2874deff681ba6080f1b278bac7bb99c61b08a85f4311970ffe7f63f012321030c0588dc44d92bdcbf8e72093466766fdc265ead8db64517b0c542275b70fffbacffffffff010140075af0750700015100000000", + "P2SH" + ], + [ + "MAX_MONEY output + 1 output" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0xb558cbf4930954aa6a344363a15668d7477ae716 EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022027deccc14aa6668e78a8c9da3484fbcd4f9dcc9bb7d1b85146314b21b9ae4d86022100d0b43dece8cfb07348de0ca8bc5b86276fa88f7f2138381128b7c36ab2e42264012321029bb13463ddd5d2cc05da6e84e37536cb9525703cfd8f43afdb414988987a92f6acffffffff020040075af075070001510001000000000000015100000000", + "P2SH" + ], + [ + "Duplicate inputs" + ], + [ + "Removed because btcscript doesn't check input duplication, btcchain does" + ], + [ + "Coinbase of size 1" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + "Removed because btcscript doesn't handle coinbase checking, btcchain does" + ], + [ + "Coinbase of size 101" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + "Removed because btcscript doesn't handle coinbase checking, btcchain does" + ], + [ + "Null txin" + ], + [ + "Removed because btcscript doesn't do tx sanity checking." + ], + [ + "Same as the transactions in valid with one input SIGHASH_ALL and one SIGHASH_ANYONECANPAY, but we set the _ANYONECANPAY sequence number, invalidating the SIGHASH_ALL signature" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ], + [ + "0000000000000000000000000000000000000000000000000000000000000200", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ] + ], + "01000000020001000000000000000000000000000000000000000000000000000000000000000000004948304502203a0f5f0e1f2bdbcd04db3061d18f3af70e07f4f467cbc1b8116f267025f5360b022100c792b6e215afc5afc721a351ec413e714305cb749aae3d7fee76621313418df10101000000000200000000000000000000000000000000000000000000000000000000000000000000484730440220201dc2d030e380e8f9cfb41b442d930fa5a685bb2c8db5906671f865507d0670022018d9e7a8d4c8d86a73c2a724ee38ef983ec249827e0e464841735955c707ece98101000000010100000000000000015100000000", + "P2SH" + ], + [ + "CHECKMULTISIG with incorrect signature order" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + [ + [ + "b3da01dd4aae683c7aee4d5d8b52a540a508e1115f77cd7fa9a291243f501223", + 0, + "HASH160 0x14 0xb1ce99298d5f07364b57b1e5c9cc00be0b04a954 EQUAL" + ] + ], + "01000000012312503f2491a2a97fcd775f11e108a540a5528b5d4dee7a3c68ae4add01dab300000000fdfd0048304502207aacee820e08b0b174e248abd8d7a34ed63b5da3abedb99934df9fddd65c05c4022100dfe87896ab5ee3df476c2655f9fbe5bd089dccbef3e4ea05b5d121169fe7f5f401483045022100f6649b0eddfdfd4ad55426663385090d51ee86c3481bdc6b0c18ea6c0ece2c0b0220561c315b07cffa6f7dd9df96dbae9200c2dee09bf93cc35ca05e6cdf613340aa014c695221031d11db38972b712a9fe1fc023577c7ae3ddb4a3004187d41c45121eecfdbb5b7210207ec36911b6ad2382860d32989c7b8728e9489d7bbc94a6b5509ef0029be128821024ea9fac06f666a4adc3fc1357b7bec1fd0bdece2b9d08579226a8ebde53058e453aeffffffff0180380100000000001976a914c9b99cddf847d10685a4fabaa0baf505f7c3dfab88ac00000000", + "P2SH" + ], + [ + "Empty stack when we try to run CHECKSIG" + ], + [ + [ + [ + "ad503f72c18df5801ee64d76090afe4c607fb2b822e9b7b63c5826c50e22fc3b", + 0, + "0x21 0x027c3a97665bf283a102a587a62a30a0c102d4d3b141015e2cae6f64e2543113e5 CHECKSIG NOT" + ] + ], + "01000000013bfc220ec526583cb6b7e922b8b27f604cfe0a09764de61e80f58dc1723f50ad0000000000ffffffff0101000000000000002321027c3a97665bf283a102a587a62a30a0c102d4d3b141015e2cae6f64e2543113e5ac00000000", + "P2SH" + ], + [ + "CHECKLOCKTIMEVERIFY tests" + ], + [ + "By-height locks, with argument just beyond tx nLockTime" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "499999999 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000fe64cd1d", + "P2SH" + ], + [ + "By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "500000001 CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000feffffff", + "P2SH" + ], + [ + "Argument missing" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000001b1010000000100000000000000000000000000", + "P2SH" + ], + [ + "Argument negative with by-DAAScore nLockTime=0" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "-1 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Argument negative with by-blocktime nLockTime=500,000,000" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "-1 CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000004005194b1010000000100000000000000000002000000", + "P2SH" + ], + [ + "Input locked" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b1ffffffff0100000000000000000002000000", + "P2SH" + ], + [ + "Another input being unlocked isn't sufficient; the CHECKLOCKTIMEVERIFY-using input must be unlocked" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ], + [ + "0000000000000000000000000000000000000000000000000000000000000200", + 1, + "1" + ] + ], + "010000000200010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00020000000000000000000000000000000000000000000000000000000000000100000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Argument/tx height/time mismatch, both versions" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "499999999 CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "500000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "500000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", + "P2SH" + ], + [ + "Argument 2^32 with nLockTime=2^32-1" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967296 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", + "P2SH" + ], + [ + "Same, but with nLockTime=2^31-1" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffff7f", + "P2SH" + ], + [ + "6 byte non-minimally-encoded arguments are invalid even if their contents are valid" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x06 0x000000000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Failure due to failing CHECKLOCKTIMEVERIFY in scriptSig" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b0000000000100000000000000000000000000", + "P2SH" + ], + [ + "Failure due to failing CHECKLOCKTIMEVERIFY in redeemScript" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x198821d0c372b25f4d25d71171164ac5a3a0f20d EQUAL" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000030251b0000000000100000000000000000000000000", + "P2SH" + ], + [ + "A transaction with a non-standard DER signature." + ], + [ + [ + [ + "b1dbc81696c8a9c0fccd0693ab66d7c368dbc38c0def4e800685560ddd1b2132", + 0, + "DUP HASH160 0x14 0x4b3bd7eba3bc0284fd3007be7f3be275e94f5826 EQUALVERIFY CHECKSIG" + ] + ], + "010000000132211bdd0d568506804eef0d8cc3db68c3d766ab9306cdfcc0a9c89616c8dbb1000000006c493045022100c7bb0faea0522e74ff220c20c022d2cb6033f8d167fb89e75a50e237a35fd6d202203064713491b1f8ad5f79e623d0219ad32510bfaa1009ab30cbee77b59317d6e30001210237af13eb2d84e4545af287b919c2282019c9691cc509e78e196a9d8274ed1be0ffffffff0100000000000000001976a914f1b3ed2eda9a2ebe5a9374f692877cdf87c0f95b88ac00000000", + "P2SH,DERSIG" + ], + [ + "CHECKSEQUENCEVERIFY tests" + ], + [ + "By-height locks, with argument just beyond txin.nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4259839 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feff40000100000000000000000000000000", + "P2SH" + ], + [ + "By-time locks, with argument just beyond txin.nSequence (but within numerical boundries)" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194305 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4259839 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feff40000100000000000000000000000000", + "P2SH" + ], + [ + "Argument missing" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Argument negative with by-DAAScore txin.nSequence=0" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "-1 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Argument negative with by-blocktime txin.nSequence=CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "-1 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + "Argument/tx height/time mismatch, both versions" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "65535 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194304 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4259839 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "6 byte non-minimally-encoded arguments are invalid even if their contents are valid" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x06 0x000000000000 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff00000100000000000000000000000000", + "P2SH" + ], + [ + "Failure due to failing CHECKSEQUENCEVERIFY in scriptSig" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "02000000010001000000000000000000000000000000000000000000000000000000000000000000000251b0000000000100000000000000000000000000", + "P2SH" + ], + [ + "Failure due to failing CHECKSEQUENCEVERIFY in redeemScript" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x7c17aff532f22beb54069942f9bf567a66133eaf EQUAL" + ] + ], + "0200000001000100000000000000000000000000000000000000000000000000000000000000000000030251b1000000000100000000000000000000000000", + "P2SH" + ], + [ + "The following tests for the fix of a btc bug in the handling of SIGHASH_SINGLE" + ], + [ + "It results in signing the constant 1, instead of something generated based on the transaction," + ], + [ + "when the input doing the signing has an index greater than the maximum output index" + ], + [ + "We have fixed this bug and it should no longer be part of the concensus rules" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000200", + 0, + "1" + ], + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "DUP HASH160 0x14 0xe52b482f2faa8ecbf0db344f93c84ac908557f33 EQUALVERIFY CHECKSIG" + ] + ], + "01000000020002000000000000000000000000000000000000000000000000000000000000000000000151ffffffff0001000000000000000000000000000000000000000000000000000000000000000000006b483045022100c9cdd08798a28af9d1baf44a6c77bcc7e279f47dc487c8c899911bc48feaffcc0220503c5c50ae3998a733263c5c0f7061b483e2b56c4c41b456e7d2f5a78a74c077032102d5c25adb51b61339d2b05315791e21bbe80ea470a49db0135720983c905aace0ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "Make diffs cleaner by leaving a comment here without comma at the end" + ] +] diff --git a/domain/consensus/utils/txscript/data/tx_valid.json b/domain/consensus/utils/txscript/data/tx_valid.json new file mode 100644 index 0000000..272d591 --- /dev/null +++ b/domain/consensus/utils/txscript/data/tx_valid.json @@ -0,0 +1,938 @@ +[ + [ + "The following are deserialized transactions which are valid." + ], + [ + "They are in the form" + ], + [ + "[[[prevout hash, prevout index, prevout scriptPubKey, amount?], [input 2], ...]," + ], + [ + "serializedTransaction, verifyFlags]" + ], + [ + "Objects that are only a single string (like this one) are ignored" + ], + [ + "The following is 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63" + ], + [ + "It is of particular interest because it contains an invalidly-encoded signature which OpenSSL accepts" + ], + [ + "See http://r6.ca/blog/20111119T211504Z.html" + ], + [ + "It is also the first OP_CHECKMULTISIG transaction in standard form" + ], + [ + [ + [ + "60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", + 0, + "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG" + ] + ], + "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004847304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", + "P2SH" + ], + [ + "The following is a tweaked form of 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63" + ], + [ + "It is an OP_CHECKMULTISIG with an arbitrary extra byte stuffed into the signature at pos length - 2" + ], + [ + [ + [ + "60a20bd93aa49ab4b28d514ec10b06e1829ce6818ec06cd3aabd013ebcdc4bb1", + 0, + "1 0x41 0x04cc71eb30d653c0c3163990c47b976f3fb3f37cccdcbedb169a1dfef58bbfbfaff7d8a473e7e2e6d317b87bafe8bde97e3cf8f065dec022b51d11fcdd0d348ac4 0x41 0x0461cbdcc5409fb4b4d42b51d33381354d80e550078cb532a34bfa2fcfdeb7d76519aecc62770f5b0e4ef8551946d8a540911abe3e7854a26f39f58b25c15342af 2 OP_CHECKMULTISIG" + ] + ], + "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b49aa43ad90ba260000000004948304402203f16c6f40162ab686621ef3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2bab01ffffffff0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271ad504b88ac00000000", + "P2SH" + ], + [ + "The following is c99c49da4c38af669dea436d3e73780dfdb6c1ecf9958baa52960e8baee30e73" + ], + [ + "It is of interest because it contains a 0-sequence as well as a signature of SIGHASH type 0 (which is not a real type)" + ], + [ + [ + [ + "406b2b06bcd34d3c8733e6b79f7a394c8a431fbf4ff5ac705c93f4076bb77602", + 0, + "DUP HASH160 0x14 0xdc44b1164188067c3a32d4780f5996fa14a4f2d9 EQUALVERIFY CHECKSIG" + ] + ], + "01000000010276b76b07f4935c70acf54fbf1f438a4c397a9fb7e633873c4dd3bc062b6b40000000008c493046022100d23459d03ed7e9511a47d13292d3430a04627de6235b6e51a40f9cd386f2abe3022100e7d25b080f0bb8d8d5f878bba7d54ad2fda650ea8d158a33ee3cbd11768191fd004104b0e2c879e4daf7b9ab68350228c159766676a14f5815084ba166432aab46198d4cca98fa3e9981d0a90b2effc514b76279476550ba3663fdcaff94c38420e9d5000000000100093d00000000001976a9149a7b0f3b80c6baaeedce0a0842553800f832ba1f88ac00000000", + "P2SH" + ], + [ + "A nearly-standard transaction with CHECKSIGVERIFY 1 instead of CHECKSIG" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "DUP HASH160 0x14 0x5b6462475454710f3c22f5fdf0b40704c92f25c3 EQUALVERIFY CHECKSIGVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006a473044022067288ea50aa799543a536ff9306f8e1cba05b9c6b10951175b924f96732555ed022026d7b5265f38d21541519e4a1e55044d5b9e17e15cdbaf29ae3792e99e883e7a012103ba8c8b86dea131c22ab967e6dd99bdae8eff7a1f75a2c35f1f944109e3fe5e22ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "The following is f7fdd091fa6d8f5e7a8c2458f5c38faffff2d3f1406b6e4fe2c99dcc0d2d1cbb" + ], + [ + "It caught a bug in the workaround for 23b397edccd3740a74adb603c9756370fafcde9bcc4483eb271ecad09a94dd63 in an overly simple implementation" + ], + [ + [ + [ + "b464e85df2a238416f8bdae11d120add610380ea07f4ef19c5f9dfd472f96c3d", + 0, + "DUP HASH160 0x14 0xbef80ecf3a44500fda1bc92176e442891662aed2 EQUALVERIFY CHECKSIG" + ], + [ + "b7978cc96e59a8b13e0865d3f95657561a7f725be952438637475920bac9eb21", + 1, + "DUP HASH160 0x14 0xbef80ecf3a44500fda1bc92176e442891662aed2 EQUALVERIFY CHECKSIG" + ] + ], + "01000000023d6cf972d4dff9c519eff407ea800361dd0a121de1da8b6f4138a2f25de864b4000000008a4730440220ffda47bfc776bcd269da4832626ac332adfca6dd835e8ecd83cd1ebe7d709b0e022049cffa1cdc102a0b56e0e04913606c70af702a1149dc3b305ab9439288fee090014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff21ebc9ba20594737864352e95b727f1a565756f9d365083eb1a8596ec98c97b7010000008a4730440220503ff10e9f1e0de731407a4a245531c9ff17676eda461f8ceeb8c06049fa2c810220c008ac34694510298fa60b3f000df01caa244f165b727d4896eb84f81e46bcc4014104266abb36d66eb4218a6dd31f09bb92cf3cfa803c7ea72c1fc80a50f919273e613f895b855fb7465ccbc8919ad1bd4a306c783f22cd3227327694c4fa4c1c439affffffff01f0da5200000000001976a914857ccd42dded6df32949d4646dfa10a92458cfaa88ac00000000", + "P2SH" + ], + [ + "An invalid P2SH Transaction" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x7a052c840ba73af26755de42cf01cc9e0a49fef0 EQUAL" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000009085768617420697320ffffffff010000000000000000015100000000", + "NONE" + ], + [ + "A valid P2SH Transaction using the standard transaction type put forth in BIP 16" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x8febbed40483661de6958d957412f82deed8e2f7 EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100c66c9cdf4c43609586d15424c54707156e316d88b0a1534c9e6b0d4f311406310221009c0fe51dbc9c4ab7cc25d3fdbeccf6679fe6827f08edf2b4a9f16ee3eb0e438a0123210338e8034509af564c62644c07691942e0c056752008a173c89f60ab2a88ac2ebfacffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "Tests for CheckTransaction()" + ], + [ + "MAX_MONEY output" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x32afac281462b822adbec5094b8d4d337dd5bd6a EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006e493046022100e1eadba00d9296c743cb6ecc703fd9ddc9b3cd12906176a226ae4c18d6b00796022100a71aef7d2874deff681ba6080f1b278bac7bb99c61b08a85f4311970ffe7f63f012321030c0588dc44d92bdcbf8e72093466766fdc265ead8db64517b0c542275b70fffbacffffffff010040075af0750700015100000000", + "P2SH" + ], + [ + "MAX_MONEY output + 0 output" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0xb558cbf4930954aa6a344363a15668d7477ae716 EQUAL" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000006d483045022027deccc14aa6668e78a8c9da3484fbcd4f9dcc9bb7d1b85146314b21b9ae4d86022100d0b43dece8cfb07348de0ca8bc5b86276fa88f7f2138381128b7c36ab2e42264012321029bb13463ddd5d2cc05da6e84e37536cb9525703cfd8f43afdb414988987a92f6acffffffff020040075af075070001510000000000000000015100000000", + "P2SH" + ], + [ + "Coinbase of size 2" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000000", + -1, + "1" + ] + ], + "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff025151ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "Coinbase of size 100" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000000", + -1, + "1" + ] + ], + "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff6451515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151515151ffffffff010000000000000000015100000000", + "P2SH" + ], + [ + "Simple transaction with first input is signed with SIGHASH_ALL, second with SIGHASH_ANYONECANPAY" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ], + [ + "0000000000000000000000000000000000000000000000000000000000000200", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ] + ], + "010000000200010000000000000000000000000000000000000000000000000000000000000000000049483045022100d180fd2eb9140aeb4210c9204d3f358766eb53842b2a9473db687fa24b12a3cc022079781799cd4f038b85135bbe49ec2b57f306b2bb17101b17f71f000fcab2b6fb01ffffffff0002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000", + "P2SH" + ], + [ + "Same as above, but we change the sequence number of the first input to check that SIGHASH_ANYONECANPAY is being followed" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ], + [ + "0000000000000000000000000000000000000000000000000000000000000200", + 0, + "0x21 0x035e7f0d4d0841bcd56c39337ed086b1a633ee770c1ffdd94ac552a95ac2ce0efc CHECKSIG" + ] + ], + "01000000020001000000000000000000000000000000000000000000000000000000000000000000004948304502203a0f5f0e1f2bdbcd04db3061d18f3af70e07f4f467cbc1b8116f267025f5360b022100c792b6e215afc5afc721a351ec413e714305cb749aae3d7fee76621313418df101010000000002000000000000000000000000000000000000000000000000000000000000000000004847304402205f7530653eea9b38699e476320ab135b74771e1c48b81a5d041e2ca84b9be7a802200ac8d1f40fb026674fe5a5edd3dea715c27baa9baca51ed45ea750ac9dc0a55e81ffffffff010100000000000000015100000000", + "P2SH" + ], + [ + "afd9c17f8913577ec3509520bd6e5d63e9c0fd2a5f70c787993b097ba6ca9fae which has several SIGHASH_SINGLE signatures" + ], + [ + [ + [ + "63cfa5a09dc540bf63e53713b82d9ea3692ca97cd608c384f2aa88e51a0aac70", + 0, + "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG" + ], + [ + "04e8d0fcf3846c6734477b98f0f3d4badfb78f020ee097a0be5fe347645b817d", + 1, + "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG" + ], + [ + "ee1377aff5d0579909e11782e1d2f5f7b84d26537be7f5516dd4e43373091f3f", + 1, + "DUP HASH160 0x14 0xdcf72c4fd02f5a987cf9b02f2fabfcac3341a87d EQUALVERIFY CHECKSIG" + ] + ], + "010000000370ac0a1ae588aaf284c308d67ca92c69a39e2db81337e563bf40c59da0a5cf63000000006a4730440220360d20baff382059040ba9be98947fd678fb08aab2bb0c172efa996fd8ece9b702201b4fb0de67f015c90e7ac8a193aeab486a1f587e0f54d0fb9552ef7f5ce6caec032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff7d815b6447e35fbea097e00e028fb7dfbad4f3f0987b4734676c84f3fcd0e804010000006b483045022100c714310be1e3a9ff1c5f7cacc65c2d8e781fc3a88ceb063c6153bf950650802102200b2d0979c76e12bb480da635f192cc8dc6f905380dd4ac1ff35a4f68f462fffd032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff3f1f097333e4d46d51f5e77b53264db8f7f5d2e18217e1099957d0f5af7713ee010000006c493046022100b663499ef73273a3788dea342717c2640ac43c5a1cf862c9e09b206fcb3f6bb8022100b09972e75972d9148f2bdd462e5cb69b57c1214b88fc55ca638676c07cfc10d8032103579ca2e6d107522f012cd00b52b9a65fb46f0c57b9b8b6e377c48f526a44741affffffff0380841e00000000001976a914bfb282c70c4191f45b5a6665cad1682f2c9cfdfb88ac80841e00000000001976a9149857cc07bed33a5cf12b9c5e0500b675d500c81188ace0fd1c00000000001976a91443c52850606c872403c0601e69fa34b26f62db4a88ac00000000", + "P2SH" + ], + [ + "ddc454a1c0c35c188c98976b17670f69e586d9c0f3593ea879928332f0a069e7, which spends an input that pushes using a PUSHDATA1 that is negative when read as signed" + ], + [ + [ + [ + "c5510a5dd97a25f43175af1fe649b707b1df8e1a41489bac33a23087027a2f48", + 0, + "0x4c 0xae 0x606563686f2022553246736447566b58312b5a536e587574356542793066794778625456415675534a6c376a6a334878416945325364667657734f53474f36633338584d7439435c6e543249584967306a486956304f376e775236644546673d3d22203e20743b206f70656e73736c20656e63202d7061737320706173733a5b314a564d7751432d707269766b65792d6865785d202d64202d6165732d3235362d636263202d61202d696e207460 DROP DUP HASH160 0x14 0xbfd7436b6265aa9de506f8a994f881ff08cc2872 EQUALVERIFY CHECKSIG" + ] + ], + "0100000001482f7a028730a233ac9b48411a8edfb107b749e61faf7531f4257ad95d0a51c5000000008b483045022100bf0bbae9bde51ad2b222e87fbf67530fbafc25c903519a1e5dcc52a32ff5844e022028c4d9ad49b006dd59974372a54291d5764be541574bb0c4dc208ec51f80b7190141049dd4aad62741dc27d5f267f7b70682eee22e7e9c1923b9c0957bdae0b96374569b460eb8d5b40d972e8c7c0ad441de3d94c4a29864b212d56050acb980b72b2bffffffff0180969800000000001976a914e336d0017a9d28de99d16472f6ca6d5a3a8ebc9988ac00000000", + "P2SH" + ], + [ + "Correct signature order" + ], + [ + "Note the input is just required to make the tester happy" + ], + [ + [ + [ + "b3da01dd4aae683c7aee4d5d8b52a540a508e1115f77cd7fa9a291243f501223", + 0, + "HASH160 0x14 0xb1ce99298d5f07364b57b1e5c9cc00be0b04a954 EQUAL" + ] + ], + "01000000012312503f2491a2a97fcd775f11e108a540a5528b5d4dee7a3c68ae4add01dab300000000fdfe0000483045022100f6649b0eddfdfd4ad55426663385090d51ee86c3481bdc6b0c18ea6c0ece2c0b0220561c315b07cffa6f7dd9df96dbae9200c2dee09bf93cc35ca05e6cdf613340aa0148304502207aacee820e08b0b174e248abd8d7a34ed63b5da3abedb99934df9fddd65c05c4022100dfe87896ab5ee3df476c2655f9fbe5bd089dccbef3e4ea05b5d121169fe7f5f4014c695221031d11db38972b712a9fe1fc023577c7ae3ddb4a3004187d41c45121eecfdbb5b7210207ec36911b6ad2382860d32989c7b8728e9489d7bbc94a6b5509ef0029be128821024ea9fac06f666a4adc3fc1357b7bec1fd0bdece2b9d08579226a8ebde53058e453aeffffffff0180380100000000001976a914c9b99cddf847d10685a4fabaa0baf505f7c3dfab88ac00000000", + "P2SH" + ], + [ + "cc60b1f899ec0a69b7c3f25ddf32c4524096a9c5b01cbd84c6d0312a0c478984, which is a fairly strange transaction which relies on OP_CHECKSIG returning 0 when checking a completely invalid sig of length 0" + ], + [ + [ + [ + "cbebc4da731e8995fe97f6fadcd731b36ad40e5ecb31e38e904f6e5982fa09f7", + 0, + "0x2102085c6600657566acc2d6382a47bc3f324008d2aa10940dd7705a48aa2a5a5e33ac7c2103f5d0fb955f95dd6be6115ce85661db412ec6a08abcbfce7da0ba8297c6cc0ec4ac7c5379a820d68df9e32a147cffa36193c6f7c43a1c8c69cda530e1c6db354bfabdcfefaf3c875379a820f531f3041d3136701ea09067c53e7159c8f9b2746a56c3d82966c54bbc553226879a5479827701200122a59a5379827701200122a59a6353798277537982778779679a68" + ] + ], + "0100000001f709fa82596e4f908ee331cb5e0ed46ab331d7dcfaf697fe95891e73dac4ebcb000000008c20ca42095840735e89283fec298e62ac2ddea9b5f34a8cbb7097ad965b87568100201b1b01dc829177da4a14551d2fc96a9db00c6501edfa12f22cd9cefd335c227f483045022100a9df60536df5733dd0de6bc921fab0b3eee6426501b43a228afa2c90072eb5ca02201c78b74266fac7d1db5deff080d8a403743203f109fbcabf6d5a760bf87386d20100ffffffff01c075790000000000232103611f9a45c18f28f06f19076ad571c344c82ce8fcfe34464cf8085217a2d294a6ac00000000", + "P2SH" + ], + [ + "Empty pubkey" + ], + [ + [ + [ + "229257c295e7f555421c1bfec8538dd30a4b5c37c1c8810bbe83cafa7811652c", + 0, + "0x00 CHECKSIG NOT" + ] + ], + "01000000012c651178faca83be0b81c8c1375c4b0ad38d53c8fe1b1c4255f5e795c25792220000000049483045022100d6044562284ac76c985018fc4a90127847708c9edb280996c507b28babdc4b2a02203d74eca3f1a4d1eea7ff77b528fde6d5dc324ec2dbfdb964ba885f643b9704cd01ffffffff010100000000000000232102c2410f8891ae918cab4ffc4bb4a3b0881be67c7a1e7faa8b5acf9ab8932ec30cac00000000", + "P2SH" + ], + [ + "Empty signature" + ], + [ + [ + [ + "9ca93cfd8e3806b9d9e2ba1cf64e3cc6946ee0119670b1796a09928d14ea25f7", + 0, + "0x21 0x028a1d66975dbdf97897e3a4aef450ebeb5b5293e4a0b4a6d3a2daaa0b2b110e02 CHECKSIG NOT" + ] + ], + "0100000001f725ea148d92096a79b1709611e06e94c63c4ef61cbae2d9b906388efd3ca99c000000000100ffffffff0101000000000000002321028a1d66975dbdf97897e3a4aef450ebeb5b5293e4a0b4a6d3a2daaa0b2b110e02ac00000000", + "P2SH" + ], + [ + [ + [ + "444e00ed7840d41f20ecd9c11d3f91982326c731a02f3c05748414a4fa9e59be", + 0, + "1 0x00 0x21 0x02136b04758b0b6e363e7a6fbe83aaf527a153db2b060d36cc29f7f8309ba6e458 2 CHECKMULTISIG" + ] + ], + "0100000001be599efaa4148474053c2fa031c7262398913f1dc1d9ec201fd44078ed004e440000000048473044022022b29706cb2ed9ef0cb3c97b72677ca2dfd7b4160f7b4beb3ba806aa856c401502202d1e52582412eba2ed474f1f437a427640306fd3838725fab173ade7fe4eae4a01ffffffff010100000000000000232103ac4bba7e7ca3e873eea49e08132ad30c7f03640b6539e9b59903cf14fd016bbbac00000000", + "P2SH" + ], + [ + [ + [ + "e16abbe80bf30c080f63830c8dbf669deaef08957446e95940227d8c5e6db612", + 0, + "1 0x21 0x03905380c7013e36e6e19d305311c1b81fce6581f5ee1c86ef0627c68c9362fc9f 0x00 2 CHECKMULTISIG" + ] + ], + "010000000112b66d5e8c7d224059e946749508efea9d66bf8d0c83630f080cf30be8bb6ae1000000004847304402206ffe3f14caf38ad5c1544428e99da76ffa5455675ec8d9780fac215ca17953520220779502985e194d84baa36b9bd40a0dbd981163fa191eb884ae83fc5bd1c86b1101ffffffff010100000000000000232103905380c7013e36e6e19d305311c1b81fce6581f5ee1c86ef0627c68c9362fc9fac00000000", + "P2SH" + ], + [ + [ + [ + "ebbcf4bfce13292bd791d6a65a2a858d59adbf737e387e40370d4e64cc70efb0", + 0, + "2 0x21 0x033bcaa0a602f0d44cc9d5637c6e515b0471db514c020883830b7cefd73af04194 0x21 0x03a88b326f8767f4f192ce252afe33c94d25ab1d24f27f159b3cb3aa691ffe1423 2 CHECKMULTISIG NOT" + ] + ], + "0100000001b0ef70cc644e0d37407e387e73bfad598d852a5aa6d691d72b2913cebff4bceb0000000049473044022068cd4851fc7f9a892ab910df7a24e616f293bcb5c5fbdfbc304a194b26b60fba022078e6da13d8cb881a22939b952c24f88b97afd06b4c47a47d7f804c9a352a6d6d0100ffffffff0101000000000000002321033bcaa0a602f0d44cc9d5637c6e515b0471db514c020883830b7cefd73af04194ac00000000", + "P2SH" + ], + [ + [ + [ + "ba4cd7ae2ad4d4d13ebfc8ab1d93a63e4a6563f25089a18bf0fc68f282aa88c1", + 0, + "2 0x21 0x037c615d761e71d38903609bf4f46847266edc2fb37532047d747ba47eaae5ffe1 0x21 0x02edc823cd634f2c4033d94f5755207cb6b60c4b1f1f056ad7471c47de5f2e4d50 2 CHECKMULTISIG NOT" + ] + ], + "0100000001c188aa82f268fcf08ba18950f263654a3ea6931dabc8bf3ed1d4d42aaed74cba000000004a00483045022100940378576e069aca261a6b26fb38344e4497ca6751bb10905c76bb689f4222b002204833806b014c26fd801727b792b1260003c55710f87c5adbd7a9cb57446dbc9801ffffffff0101000000000000002321037c615d761e71d38903609bf4f46847266edc2fb37532047d747ba47eaae5ffe1ac00000000", + "P2SH" + ], + [ + "CHECKSIG is legal in scriptSigs" + ], + [ + [ + [ + "ccf7f4053a02e653c36ac75c891b7496d0dc5ce5214f6c913d9cf8f1329ebee0", + 0, + "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG" + ] + ], + "0100000001e0be9e32f1f89c3d916c4f21e55cdcd096741b895cc76ac353e6023a05f4f7cc00000000d86149304602210086e5f736a2c3622ebb62bd9d93d8e5d76508b98be922b97160edc3dcca6d8c47022100b23c312ac232a4473f19d2aeb95ab7bdf2b65518911a0d72d50e38b5dd31dc820121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac4730440220508fa761865c8abd81244a168392876ee1d94e8ed83897066b5e2df2400dad24022043f5ee7538e87e9c6aef7ef55133d3e51da7cc522830a9c4d736977a76ef755c0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", + "P2SH" + ], + [ + "CHECKLOCKTIMEVERIFY tests" + ], + [ + "By-height locks, with argument == 0 and == tx nLockTime" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "499999999 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ff64cd1d", + "P2SH" + ], + [ + "By-time locks, with argument just beyond tx nLockTime (but within numerical boundaries)" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "500000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "500000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000ffffffff", + "P2SH" + ], + [ + "Any non-maxint nSequence is fine" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000", + "P2SH" + ], + [ + "The argument can be calculated rather than created directly by a PUSHDATA" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "499999999 1ADD CHECKLOCKTIMEVERIFY 1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000065cd1d", + "P2SH" + ], + [ + "Perhaps even by an ADD producing a 5-byte result that is out of bounds for other opcodes" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483647 2147483647 ADD CHECKLOCKTIMEVERIFY 1" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000feffffff", + "P2SH" + ], + [ + "5 byte non-minimally-encoded arguments are valid" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x05 0x0000000000 CHECKLOCKTIMEVERIFY 1" + ] + ], + "010000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "Valid CHECKLOCKTIMEVERIFY in scriptSig" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "01000000010001000000000000000000000000000000000000000000000000000000000000000000000251b0000000000100000000000000000001000000", + "P2SH" + ], + [ + "Valid CHECKLOCKTIMEVERIFY in redeemScript" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x105469277736a89dc89ce794f6db2ff926ce3b EQUAL" + ] + ], + "0100000001000100000000000000000000000000000000000000000000000000000000000000000000040351b051000000000100000000000000000001000000", + "P2SH" + ], + [ + "A transaction with a non-standard DER signature." + ], + [ + [ + [ + "b1dbc81696c8a9c0fccd0693ab66d7c368dbc38c0def4e800685560ddd1b2132", + 0, + "DUP HASH160 0x14 0x4b3bd7eba3bc0284fd3007be7f3be275e94f5826 EQUALVERIFY CHECKSIG" + ] + ], + "010000000132211bdd0d568506804eef0d8cc3db68c3d766ab9306cdfcc0a9c89616c8dbb1000000006c493045022100c7bb0faea0522e74ff220c20c022d2cb6033f8d167fb89e75a50e237a35fd6d202203064713491b1f8ad5f79e623d0219ad32510bfaa1009ab30cbee77b59317d6e30001210237af13eb2d84e4545af287b919c2282019c9691cc509e78e196a9d8274ed1be0ffffffff0100000000000000001976a914f1b3ed2eda9a2ebe5a9374f692877cdf87c0f95b88ac00000000", + "P2SH" + ], + [ + "CHECKSEQUENCEVERIFY tests" + ], + [ + "By-height locks, with argument == 0 and == txin.nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "65535 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff00000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "65535 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", + "P2SH" + ], + [ + "By-time locks, with argument == 0 and == txin.nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194304 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4259839 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff40000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4259839 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194304 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", + "P2SH" + ], + [ + "Upper sequence with upper sequence is fine" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000800100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000800100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feffffff0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + "Argument 2^31 with various nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483648 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + "Argument 2^32-1 with various nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4294967295 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + "Argument 3<<31 with various nSequence" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "6442450944 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffbf7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "6442450944 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffff7f0100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "6442450944 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff0100000000000000000000000000", + "P2SH" + ], + [ + "5 byte non-minimally-encoded operandss are valid" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "0x05 0x0000000000 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + "The argument can be calculated rather than created directly by a PUSHDATA" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194303 1ADD CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "4194304 1SUB CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000ffff00000100000000000000000000000000", + "P2SH" + ], + [ + "An ADD producing a 5-byte result that sets CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483647 65536 CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000", + "P2SH" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "2147483647 4259840 ADD CHECKSEQUENCEVERIFY 1" + ] + ], + "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", + "P2SH" + ], + [ + "Valid CHECKSEQUENCEVERIFY in scriptSig" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "1" + ] + ], + "02000000010001000000000000000000000000000000000000000000000000000000000000000000000251b2010000000100000000000000000000000000", + "P2SH" + ], + [ + "Valid CHECKSEQUENCEVERIFY in redeemScript" + ], + [ + [ + [ + "0000000000000000000000000000000000000000000000000000000000000100", + 0, + "HASH160 0x14 0x7c17aff532f22beb54069942f9bf567a66133eaf EQUAL" + ] + ], + "0200000001000100000000000000000000000000000000000000000000000000000000000000000000030251b2010000000100000000000000000000000000", + "P2SH" + ], + [ + "Make diffs cleaner by leaving a comment here without comma at the end" + ] +] diff --git a/domain/consensus/utils/txscript/doc.go b/domain/consensus/utils/txscript/doc.go new file mode 100644 index 0000000..41718ca --- /dev/null +++ b/domain/consensus/utils/txscript/doc.go @@ -0,0 +1,34 @@ +/* +Package txscript implements the spectre transaction script language. + +This package provides data structures and functions to parse and execute +spectre transaction scripts. + +# Script Overview + +Spectre transaction scripts are written in a stack-base, FORTH-like language. + +The spectre script language consists of a number of opcodes which fall into +several categories such pushing and popping data to and from the stack, +performing basic and bitwise arithmetic, conditional branching, comparing +hashes, and checking cryptographic signatures. Scripts are processed from left +to right and intentionally do not provide loops. + +Typical spectre scripts at the time of this writing are of several standard +forms which consist of a spender providing a public key and a signature +which proves the spender owns the associated private key. This information +is used to prove the the spender is authorized to perform the transaction. + +One benefit of using a scripting language is added flexibility in specifying +what conditions must be met in order to spend spectre. + +# Errors + +Errors returned by this package are of type txscript.Error. This allows the +caller to programmatically determine the specific error by examining the +ErrorCode field of the type asserted txscript.Error while still providing rich +error messages with contextual information. A convenience function named +IsErrorCode is also provided to allow callers to easily check for a specific +error code. See ErrorCode in the package documentation for a full list. +*/ +package txscript diff --git a/domain/consensus/utils/txscript/engine.go b/domain/consensus/utils/txscript/engine.go new file mode 100644 index 0000000..ad760c9 --- /dev/null +++ b/domain/consensus/utils/txscript/engine.go @@ -0,0 +1,515 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// ScriptFlags is a bitmask defining additional operations or tests that will be +// done when executing a script pair. +type ScriptFlags uint32 + +const ( + // ScriptNoFlags is used when you want to use ScriptFlags without raising any flags + ScriptNoFlags ScriptFlags = 0 +) + +const ( + // MaxStackSize is the maximum combined height of stack and alt stack + // during execution. + MaxStackSize = 244 + + // MaxScriptSize is the maximum allowed length of a raw script. + MaxScriptSize = 10000 +) + +// Engine is the virtual machine that executes scripts. +type Engine struct { + scriptVersion uint16 + scripts [][]parsedOpcode + scriptIdx int + scriptOff int + dstack stack // data stack + astack stack // alt stack + tx externalapi.DomainTransaction + txIdx int + condStack []int + numOps int + flags ScriptFlags + sigCache *SigCache + sigCacheECDSA *SigCacheECDSA + sigHashReusedValues *consensushashing.SighashReusedValues + isP2SH bool // treat execution as pay-to-script-hash + savedFirstStack [][]byte // stack from first script for ps2h scripts +} + +// hasFlag returns whether the script engine instance has the passed flag set. +func (vm *Engine) hasFlag(flag ScriptFlags) bool { + return vm.flags&flag == flag +} + +// isBranchExecuting returns whether or not the current conditional branch is +// actively executing. For example, when the data stack has an OP_FALSE on it +// and an OP_IF is encountered, the branch is inactive until an OP_ELSE or +// OP_ENDIF is encountered. It properly handles nested conditionals. +func (vm *Engine) isBranchExecuting() bool { + if len(vm.condStack) == 0 { + return true + } + return vm.condStack[len(vm.condStack)-1] == OpCondTrue +} + +// executeOpcode peforms execution on the passed opcode. It takes into account +// whether or not it is hidden by conditionals, but some rules still must be +// tested in this case. +func (vm *Engine) executeOpcode(pop *parsedOpcode) error { + // Disabled opcodes are fail on program counter. + if pop.isDisabled() { + str := fmt.Sprintf("attempt to execute disabled opcode %s", + pop.opcode.name) + return scriptError(ErrDisabledOpcode, str) + } + + // Always-illegal opcodes are fail on program counter. + if pop.alwaysIllegal() { + str := fmt.Sprintf("attempt to execute reserved opcode %s", + pop.opcode.name) + return scriptError(ErrReservedOpcode, str) + } + + // Note that this includes OP_RESERVED which counts as a push operation. + if pop.opcode.value > Op16 { + vm.numOps++ + if vm.numOps > MaxOpsPerScript { + str := fmt.Sprintf("exceeded max operation limit of %d", + MaxOpsPerScript) + return scriptError(ErrTooManyOperations, str) + } + + } else if len(pop.data) > MaxScriptElementSize { + str := fmt.Sprintf("element size %d exceeds max allowed size %d", + len(pop.data), MaxScriptElementSize) + return scriptError(ErrElementTooBig, str) + } + + // Nothing left to do when this is not a conditional opcode and it is + // not in an executing branch. + if !vm.isBranchExecuting() && !pop.isConditional() { + return nil + } + + // Ensure all executed data push opcodes use the minimal encoding when + // the minimal data verification flag is set. + if vm.isBranchExecuting() && + pop.opcode.value != 0 && pop.opcode.value <= OpPushData4 { + + if err := pop.checkMinimalDataPush(); err != nil { + return err + } + } + + return pop.opcode.opfunc(pop, vm) +} + +// disasm is a helper function to produce the output for DisasmPC and +// DisasmScript. It produces the opcode prefixed by the program counter at the +// provided position in the script. It does no error checking and leaves that +// to the caller to provide a valid offset. +func (vm *Engine) disasm(scriptIdx int, scriptOff int) string { + return fmt.Sprintf("%02x:%04x: %s", scriptIdx, scriptOff, + vm.scripts[scriptIdx][scriptOff].print(false)) +} + +// validPC returns an error if the current script position is valid for +// execution, nil otherwise. +func (vm *Engine) validPC() error { + if vm.scriptIdx >= len(vm.scripts) { + str := fmt.Sprintf("past input scripts %d:%d %d:xxxx", + vm.scriptIdx, vm.scriptOff, len(vm.scripts)) + return scriptError(ErrInvalidProgramCounter, str) + } + if vm.scriptOff >= len(vm.scripts[vm.scriptIdx]) { + str := fmt.Sprintf("past input scripts %d:%d %d:%04d", + vm.scriptIdx, vm.scriptOff, vm.scriptIdx, + len(vm.scripts[vm.scriptIdx])) + return scriptError(ErrInvalidProgramCounter, str) + } + return nil +} + +// curPC returns either the current script and offset, or an error if the +// position isn't valid. +func (vm *Engine) curPC() (script int, off int, err error) { + err = vm.validPC() + if err != nil { + return 0, 0, err + } + return vm.scriptIdx, vm.scriptOff, nil +} + +// DisasmPC returns the string for the disassembly of the opcode that will be +// next to execute when Step() is called. +func (vm *Engine) DisasmPC() (string, error) { + scriptIdx, scriptOff, err := vm.curPC() + if err != nil { + return "", err + } + return vm.disasm(scriptIdx, scriptOff), nil +} + +// DisasmScript returns the disassembly string for the script at the requested +// offset index. Index 0 is the signature script and 1 is the public key +// script. +func (vm *Engine) DisasmScript(idx int) (string, error) { + if idx < 0 { + str := fmt.Sprintf("script index %d < 0", idx) + return "", scriptError(ErrInvalidIndex, str) + } + if idx >= len(vm.scripts) { + str := fmt.Sprintf("script index %d >= total scripts %d", idx, + len(vm.scripts)) + return "", scriptError(ErrInvalidIndex, str) + } + + var disstr string + for i := range vm.scripts[idx] { + disstr = disstr + vm.disasm(idx, i) + "\n" + } + return disstr, nil +} + +// CheckErrorCondition returns nil if the running script has ended and was +// successful, leaving a a true boolean on the stack. An error otherwise, +// including if the script has not finished. +func (vm *Engine) CheckErrorCondition(finalScript bool) error { + // Check execution is actually done. When pc is past the end of script + // array there are no more scripts to run. + if vm.scriptIdx < len(vm.scripts) { + return scriptError(ErrScriptUnfinished, + "error check when script unfinished") + } + + if finalScript { + if vm.dstack.Depth() > 1 { + str := fmt.Sprintf("stack contains %d unexpected items", + vm.dstack.Depth()-1) + return scriptError(ErrCleanStack, str) + } else if vm.dstack.Depth() < 1 { + return scriptError(ErrEmptyStack, + "stack empty at end of script execution") + } + } + + v, err := vm.dstack.PopBool() + if err != nil { + return err + } + if !v { + // Log interesting data. + log.Tracef("%s", logger.NewLogClosure(func() string { + dis0, _ := vm.DisasmScript(0) + dis1, _ := vm.DisasmScript(1) + return fmt.Sprintf("scripts failed: script0: %s\n"+ + "script1: %s", dis0, dis1) + })) + return scriptError(ErrEvalFalse, + "false stack entry at end of script execution") + } + return nil +} + +// Step will execute the next instruction and move the program counter to the +// next opcode in the script, or the next script if the current has ended. Step +// will return true in the case that the last opcode was successfully executed. +// +// The result of calling Step or any other method is undefined if an error is +// returned. +func (vm *Engine) Step() (done bool, err error) { + // Verify that it is pointing to a valid script address. + err = vm.validPC() + if err != nil { + return true, err + } + opcode := &vm.scripts[vm.scriptIdx][vm.scriptOff] + vm.scriptOff++ + + // Execute the opcode while taking into account several things such as + // disabled opcodes, illegal opcodes, maximum allowed operations per + // script, maximum script element sizes, and conditionals. + err = vm.executeOpcode(opcode) + if err != nil { + return true, err + } + + // The number of elements in the combination of the data and alt stacks + // must not exceed the maximum number of stack elements allowed. + combinedStackSize := vm.dstack.Depth() + vm.astack.Depth() + if combinedStackSize > MaxStackSize { + str := fmt.Sprintf("combined stack size %d > max allowed %d", + combinedStackSize, MaxStackSize) + return false, scriptError(ErrStackOverflow, str) + } + + // Prepare for next instruction. + if vm.scriptOff >= len(vm.scripts[vm.scriptIdx]) { + // Illegal to have an `if' that straddles two scripts. + if err == nil && len(vm.condStack) != 0 { + return false, scriptError(ErrUnbalancedConditional, + "end of script reached in conditional execution") + } + + // Alt stack doesn't persist. + _ = vm.astack.DropN(vm.astack.Depth()) + + vm.numOps = 0 // number of ops is per script. + vm.scriptOff = 0 + if vm.scriptIdx == 0 && vm.isP2SH { + vm.scriptIdx++ + vm.savedFirstStack = vm.GetStack() + } else if vm.scriptIdx == 1 && vm.isP2SH { + // Put us past the end for CheckErrorCondition() + vm.scriptIdx++ + // Check script ran successfully and pull the script + // out of the first stack and execute that. + err := vm.CheckErrorCondition(false) + if err != nil { + return false, err + } + + script := vm.savedFirstStack[len(vm.savedFirstStack)-1] + pops, err := parseScript(script) + if err != nil { + return false, err + } + vm.scripts = append(vm.scripts, pops) + + // Set stack to be the stack from first script minus the + // script itself + vm.SetStack(vm.savedFirstStack[:len(vm.savedFirstStack)-1]) + } else { + vm.scriptIdx++ + } + // there are zero length scripts in the wild + if vm.scriptIdx < len(vm.scripts) && vm.scriptOff >= len(vm.scripts[vm.scriptIdx]) { + vm.scriptIdx++ + } + if vm.scriptIdx >= len(vm.scripts) { + return true, nil + } + } + return false, nil +} + +// Execute will execute all scripts in the script engine and return either nil +// for successful validation or an error if one occurred. +func (vm *Engine) Execute() (err error) { + if vm.scriptVersion > constants.MaxScriptPublicKeyVersion { + log.Tracef("The version of the scriptPublicKey is higher than the known version - the Execute function returns true.") + return nil + } + done := false + for !done { + log.Tracef("%s", logger.NewLogClosure(func() string { + dis, err := vm.DisasmPC() + if err != nil { + return fmt.Sprintf("stepping (%s)", err) + } + return fmt.Sprintf("stepping %s", dis) + })) + + done, err = vm.Step() + if err != nil { + return err + } + log.Tracef("%s", logger.NewLogClosure(func() string { + var dstr, astr string + + // if we're tracing, dump the stacks. + if vm.dstack.Depth() != 0 { + dstr = "Stack:\n" + vm.dstack.String() + } + if vm.astack.Depth() != 0 { + astr = "AltStack:\n" + vm.astack.String() + } + + return dstr + astr + })) + } + + return vm.CheckErrorCondition(true) +} + +// currentScript returns the script currently being processed. +func (vm *Engine) currentScript() []parsedOpcode { + return vm.scripts[vm.scriptIdx] +} + +// checkPubKeyEncoding returns whether or not the passed public key adheres to +// the strict encoding requirements if enabled. +func (vm *Engine) checkPubKeyEncoding(pubKey []byte) error { + if len(pubKey) == 32 { + return nil + } + + return scriptError(ErrPubKeyFormat, "unsupported public key type") +} + +func (vm *Engine) checkPubKeyEncodingECDSA(pubKey []byte) error { + if len(pubKey) == 33 { + return nil + } + + return scriptError(ErrPubKeyFormat, "unsupported public key type") +} + +// checkSignatureLength returns whether or not the passed signature is +// in the correct Schnorr format. +func (vm *Engine) checkSignatureLength(sig []byte) error { + if len(sig) != 64 { + message := fmt.Sprintf("invalid signature length %d", len(sig)) + return scriptError(ErrSigLength, message) + } + return nil +} + +func (vm *Engine) checkSignatureLengthECDSA(sig []byte) error { + if len(sig) != 64 { + message := fmt.Sprintf("invalid signature length %d", len(sig)) + return scriptError(ErrSigLength, message) + } + return nil +} + +// getStack returns the contents of stack as a byte array bottom up +func getStack(stack *stack) [][]byte { + array := make([][]byte, stack.Depth()) + for i := range array { + // PeekByteArry can't fail due to overflow, already checked + array[len(array)-i-1], _ = stack.PeekByteArray(int32(i)) + } + return array +} + +// setStack sets the stack to the contents of the array where the last item in +// the array is the top item in the stack. +func setStack(stack *stack, data [][]byte) { + // This can not error. Only errors are for invalid arguments. + _ = stack.DropN(stack.Depth()) + + for i := range data { + stack.PushByteArray(data[i]) + } +} + +// GetStack returns the contents of the primary stack as an array. where the +// last item in the array is the top of the stack. +func (vm *Engine) GetStack() [][]byte { + return getStack(&vm.dstack) +} + +// SetStack sets the contents of the primary stack to the contents of the +// provided array where the last item in the array will be the top of the stack. +func (vm *Engine) SetStack(data [][]byte) { + setStack(&vm.dstack, data) +} + +// GetAltStack returns the contents of the alternate stack as an array where the +// last item in the array is the top of the stack. +func (vm *Engine) GetAltStack() [][]byte { + return getStack(&vm.astack) +} + +// SetAltStack sets the contents of the alternate stack to the contents of the +// provided array where the last item in the array will be the top of the stack. +func (vm *Engine) SetAltStack(data [][]byte) { + setStack(&vm.astack, data) +} + +// NewEngine returns a new script engine for the provided public key script, +// transaction, and input index. The flags modify the behavior of the script +// engine according to the description provided by each flag. +func NewEngine(scriptPubKey *externalapi.ScriptPublicKey, tx *externalapi.DomainTransaction, txIdx int, flags ScriptFlags, + sigCache *SigCache, sigCacheECDSA *SigCacheECDSA, sighashReusedValues *consensushashing.SighashReusedValues) (*Engine, error) { + + // The provided transaction input index must refer to a valid input. + if txIdx < 0 || txIdx >= len(tx.Inputs) { + str := fmt.Sprintf("transaction input index %d is negative or "+ + ">= %d", txIdx, len(tx.Inputs)) + return nil, scriptError(ErrInvalidIndex, str) + } + scriptSig := tx.Inputs[txIdx].SignatureScript + + // When both the signature script and public key script are empty the + // result is necessarily an error since the stack would end up being + // empty which is equivalent to a false top element. Thus, just return + // the relevant error now as an optimization. + if len(scriptSig) == 0 && len(scriptPubKey.Script) == 0 { + return nil, scriptError(ErrEvalFalse, + "false stack entry at end of script execution") + } + vm := Engine{scriptVersion: scriptPubKey.Version, flags: flags, sigCache: sigCache, sigCacheECDSA: sigCacheECDSA} + + if vm.scriptVersion > constants.MaxScriptPublicKeyVersion { + return &vm, nil + } + parsedScriptSig, err := parseScriptAndVerifySize(scriptSig) + if err != nil { + return nil, err + } + // The signature script must only contain data pushes + if !isPushOnly(parsedScriptSig) { + return nil, scriptError(ErrNotPushOnly, + "signature script is not push only") + } + + parsedScriptPubKey, err := parseScriptAndVerifySize(scriptPubKey.Script) + if err != nil { + return nil, err + } + + // The engine stores the scripts in parsed form using a slice. This + // allows multiple scripts to be executed in sequence. For example, + // with a pay-to-script-hash transaction, there will be ultimately be + // a third script to execute. + vm.scripts = [][]parsedOpcode{parsedScriptSig, parsedScriptPubKey} + + // Advance the program counter to the public key script if the signature + // script is empty since there is nothing to execute for it in that + // case. + if len(scriptSig) == 0 { + vm.scriptIdx++ + } + + if isScriptHash(vm.scripts[1]) { + // Only accept input scripts that push data for P2SH. + if !isPushOnly(vm.scripts[0]) { + return nil, scriptError(ErrNotPushOnly, + "pay to script hash is not push only") + } + vm.isP2SH = true + } + + vm.tx = *tx + vm.txIdx = txIdx + + vm.sigHashReusedValues = sighashReusedValues + + return &vm, nil +} + +func parseScriptAndVerifySize(script []byte) ([]parsedOpcode, error) { + if len(script) > MaxScriptSize { + str := fmt.Sprintf("script size %d is larger than max "+ + "allowed size %d", len(script), MaxScriptSize) + return nil, scriptError(ErrScriptTooBig, str) + } + return parseScript(script) +} diff --git a/domain/consensus/utils/txscript/engine_test.go b/domain/consensus/utils/txscript/engine_test.go new file mode 100644 index 0000000..b2c28cf --- /dev/null +++ b/domain/consensus/utils/txscript/engine_test.go @@ -0,0 +1,344 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestBadPC sets the pc to a deliberately bad result then confirms that Step() +// and Disasm fail correctly. +func TestBadPC(t *testing.T) { + t.Parallel() + + tests := []struct { + script, off int + }{ + {script: 2, off: 0}, + {script: 0, off: 2}, + } + + // tx with almost empty scripts. + inputs := []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc9, 0x97, 0xa5, 0xe5, + 0x6e, 0x10, 0x41, 0x02, + 0xfa, 0x20, 0x9c, 0x6a, + 0x85, 0x2d, 0xd9, 0x06, + 0x60, 0xa2, 0x0b, 0x2d, + 0x9c, 0x35, 0x24, 0x23, + 0xed, 0xce, 0x25, 0x85, + 0x7f, 0xcd, 0x37, 0x04, + }), + Index: 0, + }, + SignatureScript: mustParseShortForm("", 0), + Sequence: 4294967295, + }, + } + outputs := []*externalapi.DomainTransactionOutput{{ + Value: 1000000000, + ScriptPublicKey: nil, + }} + tx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: inputs, + Outputs: outputs, + } + scriptPubKey := &externalapi.ScriptPublicKey{Script: mustParseShortForm("NOP", 0), Version: 0} + + for _, test := range tests { + vm, err := NewEngine(scriptPubKey, tx, 0, 0, nil, nil, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Errorf("Failed to create script: %v", err) + } + + // set to after all scripts + vm.scriptIdx = test.script + vm.scriptOff = test.off + + _, err = vm.Step() + if err == nil { + t.Errorf("Step with invalid pc (%v) succeeds!", test) + continue + } + + _, err = vm.DisasmPC() + if err == nil { + t.Errorf("DisasmPC with invalid pc (%v) succeeds!", + test) + } + } +} + +func TestCheckErrorCondition(t *testing.T) { + tests := []struct { + script string + finalScript bool + stepCount int + expectedErr error + }{ + {"OP_1", true, 1, nil}, + {"NOP", true, 0, scriptError(ErrScriptUnfinished, "")}, + {"NOP", true, 1, scriptError(ErrEmptyStack, "")}, + {"OP_1 OP_1", true, 2, scriptError(ErrCleanStack, "")}, + {"OP_0", true, 1, scriptError(ErrEvalFalse, "")}, + } + + for i, test := range tests { + func() { + inputs := []*externalapi.DomainTransactionInput{{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc9, 0x97, 0xa5, 0xe5, + 0x6e, 0x10, 0x41, 0x02, + 0xfa, 0x20, 0x9c, 0x6a, + 0x85, 0x2d, 0xd9, 0x06, + 0x60, 0xa2, 0x0b, 0x2d, + 0x9c, 0x35, 0x24, 0x23, + 0xed, 0xce, 0x25, 0x85, + 0x7f, 0xcd, 0x37, 0x04, + }), + Index: 0, + }, + SignatureScript: nil, + Sequence: 4294967295, + }} + outputs := []*externalapi.DomainTransactionOutput{{ + Value: 1000000000, + ScriptPublicKey: nil, + }} + tx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: inputs, + Outputs: outputs, + } + + scriptPubKey := &externalapi.ScriptPublicKey{Script: mustParseShortForm(test.script, 0), Version: 0} + + vm, err := NewEngine(scriptPubKey, tx, 0, 0, nil, nil, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Errorf("TestCheckErrorCondition: %d: failed to create script: %v", i, err) + } + + for j := 0; j < test.stepCount; j++ { + _, err = vm.Step() + if err != nil { + t.Errorf("TestCheckErrorCondition: %d: failed to execute step No. %d: %v", i, j+1, err) + return + } + + if j != test.stepCount-1 { + err = vm.CheckErrorCondition(false) + if !IsErrorCode(err, ErrScriptUnfinished) { + t.Fatalf("TestCheckErrorCondition: %d: got unexepected error %v on %dth iteration", + i, err, j) + return + } + } + } + + err = vm.CheckErrorCondition(test.finalScript) + if e := checkScriptError(err, test.expectedErr); e != nil { + t.Errorf("TestCheckErrorCondition: %d: %s", i, e) + } + }() + } +} + +// TestCheckPubKeyEncoding ensures the internal checkPubKeyEncoding function +// works as expected. +func TestCheckPubKeyEncoding(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + key []byte + isValid bool + }{ + { + name: "uncompressed - invalid", + key: hexToBytes("0411db93e1dcdb8a016b49840f8c53bc1eb68" + + "a382e97b1482ecad7b148a6909a5cb2e0eaddfb84ccf" + + "9744464f82e160bfa9b8b64f9d4c03f999b8643f656b" + + "412a3"), + isValid: false, + }, + { + name: "compressed - invalid", + key: hexToBytes("02ce0b14fb842b1ba549fdd675c98075f12e9" + + "c510f8ef52bd021a9a1f4809d3b4d"), + isValid: false, + }, + { + name: "compressed - invalid", + key: hexToBytes("032689c7c2dab13309fb143e0e8fe39634252" + + "1887e976690b6b47f5b2a4b7d448e"), + isValid: false, + }, + { + name: "hybrid - invalid", + key: hexToBytes("0679be667ef9dcbbac55a06295ce870b07029" + + "bfcdb2dce28d959f2815b16f81798483ada7726a3c46" + + "55da4fbfc0e1108a8fd17b448a68554199c47d08ffb1" + + "0d4b8"), + isValid: false, + }, + { + name: "32 bytes pubkey - Ok", + key: hexToBytes("2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e"), + isValid: true, + }, + { + name: "empty", + key: nil, + isValid: false, + }, + } + + vm := Engine{} + for _, test := range tests { + err := vm.checkPubKeyEncoding(test.key) + if err != nil && test.isValid { + t.Errorf("checkSignatureLength test '%s' failed "+ + "when it should have succeeded: %v", test.name, + err) + } else if err == nil && !test.isValid { + t.Errorf("checkSignatureEncooding test '%s' succeeded "+ + "when it should have failed", test.name) + } + } + +} + +func TestDisasmPC(t *testing.T) { + t.Parallel() + + // tx with almost empty scripts. + inputs := []*externalapi.DomainTransactionInput{{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc9, 0x97, 0xa5, 0xe5, + 0x6e, 0x10, 0x41, 0x02, + 0xfa, 0x20, 0x9c, 0x6a, + 0x85, 0x2d, 0xd9, 0x06, + 0x60, 0xa2, 0x0b, 0x2d, + 0x9c, 0x35, 0x24, 0x23, + 0xed, 0xce, 0x25, 0x85, + 0x7f, 0xcd, 0x37, 0x04, + }), + Index: 0, + }, + SignatureScript: mustParseShortForm("OP_2", 0), + Sequence: 4294967295, + }} + outputs := []*externalapi.DomainTransactionOutput{{ + Value: 1000000000, + ScriptPublicKey: nil, + }} + tx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: inputs, + Outputs: outputs, + } + + scriptPubKey := &externalapi.ScriptPublicKey{Script: mustParseShortForm("OP_DROP NOP TRUE", 0), Version: 0} + + vm, err := NewEngine(scriptPubKey, tx, 0, 0, nil, nil, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("failed to create script: %v", err) + } + + tests := []struct { + expected string + expectedErr error + }{ + {"00:0000: OP_2", nil}, + {"01:0000: OP_DROP", nil}, + {"01:0001: OP_NOP", nil}, + {"01:0002: OP_1", nil}, + {"", scriptError(ErrInvalidProgramCounter, "")}, + } + + for i, test := range tests { + actual, err := vm.DisasmPC() + if e := checkScriptError(err, test.expectedErr); e != nil { + t.Errorf("TestDisasmPC: %d: %s", i, e) + } + + if actual != test.expected { + t.Errorf("TestDisasmPC: %d: expected: '%s'. Got: '%s'", i, test.expected, actual) + } + + // ignore results from vm.Step() to keep going even when no opcodes left, to hit error case + _, _ = vm.Step() + } +} + +func TestDisasmScript(t *testing.T) { + t.Parallel() + + // tx with almost empty scripts. + inputs := []*externalapi.DomainTransactionInput{{ + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xc9, 0x97, 0xa5, 0xe5, + 0x6e, 0x10, 0x41, 0x02, + 0xfa, 0x20, 0x9c, 0x6a, + 0x85, 0x2d, 0xd9, 0x06, + 0x60, 0xa2, 0x0b, 0x2d, + 0x9c, 0x35, 0x24, 0x23, + 0xed, 0xce, 0x25, 0x85, + 0x7f, 0xcd, 0x37, 0x04, + }), + Index: 0, + }, + SignatureScript: mustParseShortForm("OP_2", 0), + Sequence: 4294967295, + }} + outputs := []*externalapi.DomainTransactionOutput{{ + Value: 1000000000, + ScriptPublicKey: nil, + }} + tx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: inputs, + Outputs: outputs, + } + + scriptPubKey := &externalapi.ScriptPublicKey{Script: mustParseShortForm("OP_DROP NOP TRUE", 0), Version: 0} + vm, err := NewEngine(scriptPubKey, tx, 0, 0, nil, nil, &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("failed to create script: %v", err) + } + + tests := []struct { + index int + expected string + expectedErr error + }{ + {-1, "", scriptError(ErrInvalidIndex, "")}, + {0, "00:0000: OP_2\n", nil}, + {1, "01:0000: OP_DROP\n01:0001: OP_NOP\n01:0002: OP_1\n", nil}, + {2, "", scriptError(ErrInvalidIndex, "")}, + } + + for _, test := range tests { + actual, err := vm.DisasmScript(test.index) + if e := checkScriptError(err, test.expectedErr); e != nil { + t.Errorf("TestDisasmScript: %d: %s", test.index, e) + } + + if actual != test.expected { + t.Errorf("TestDisasmScript: %d: expected: '%s'. Got: '%s'", test.index, test.expected, actual) + } + } +} diff --git a/domain/consensus/utils/txscript/error.go b/domain/consensus/utils/txscript/error.go new file mode 100644 index 0000000..a884fc3 --- /dev/null +++ b/domain/consensus/utils/txscript/error.go @@ -0,0 +1,308 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// ErrorCode identifies a kind of script error. +type ErrorCode int + +// These constants are used to identify a specific Error. +const ( + // ErrInternal is returned if internal consistency checks fail. In + // practice this error should never be seen as it would mean there is an + // error in the engine logic. + ErrInternal ErrorCode = iota + + // --------------------------------------- + // Failures related to improper API usage. + // --------------------------------------- + + // ErrInvalidFlags is returned when the passed flags to NewEngine + // contain an invalid combination. + ErrInvalidFlags + + // ErrInvalidIndex is returned when an out-of-bounds index is passed to + // a function. + ErrInvalidIndex + + // ErrUnsupportedAddress is returned when a concrete type that + // implements a util.Address is not a supported type. + ErrUnsupportedAddress + + // ErrNotMultisigScript is returned from CalcMultiSigStats when the + // provided script is not a multisig script. + ErrNotMultisigScript + + // ErrTooManyRequiredSigs is returned from MultiSigScript when the + // specified number of required signatures is larger than the number of + // provided public keys. + ErrTooManyRequiredSigs + + // ------------------------------------------ + // Failures related to final execution state. + // ------------------------------------------ + + // ErrEarlyReturn is returned when OP_RETURN is executed in the script. + ErrEarlyReturn + + // ErrEmptyStack is returned when the script evaluated without error, + // but terminated with an empty top stack element. + ErrEmptyStack + + // ErrEvalFalse is returned when the script evaluated without error but + // terminated with a false top stack element. + ErrEvalFalse + + // ErrScriptUnfinished is returned when CheckErrorCondition is called on + // a script that has not finished executing. + ErrScriptUnfinished + + // ErrScriptDone is returned when an attempt to execute an opcode is + // made once all of them have already been executed. This can happen + // due to things such as a second call to Execute or calling Step after + // all opcodes have already been executed. + ErrInvalidProgramCounter + + // ----------------------------------------------------- + // Failures related to exceeding maximum allowed limits. + // ----------------------------------------------------- + + // ErrScriptTooBig is returned if a script is larger than MaxScriptSize. + ErrScriptTooBig + + // ErrElementTooBig is returned if the size of an element to be pushed + // to the stack is over MaxScriptElementSize. + ErrElementTooBig + + // ErrTooManyOperations is returned if a script has more than + // MaxOpsPerScript opcodes that do not push data. + ErrTooManyOperations + + // ErrStackOverflow is returned when stack and altstack combined depth + // is over the limit. + ErrStackOverflow + + // ErrInvalidPubKeyCount is returned when the number of public keys + // specified for a multsig is either negative or greater than + // MaxPubKeysPerMultiSig. + ErrInvalidPubKeyCount + + // ErrInvalidSignatureCount is returned when the number of signatures + // specified for a multisig is either negative or greater than the + // number of public keys. + ErrInvalidSignatureCount + + // ErrNumberTooBig is returned when the argument for an opcode that + // expects numeric input is larger than the expected maximum number of + // bytes. For the most part, opcodes that deal with stack manipulation + // via offsets, arithmetic, numeric comparison, and boolean logic are + // those that this applies to. However, any opcode that expects numeric + // input may fail with this code. + ErrNumberTooBig + + // -------------------------------------------- + // Failures related to verification operations. + // -------------------------------------------- + + // ErrVerify is returned when OP_VERIFY is encountered in a script and + // the top item on the data stack does not evaluate to true. + ErrVerify + + // ErrEqualVerify is returned when OP_EQUALVERIFY is encountered in a + // script and the top item on the data stack does not evaluate to true. + ErrEqualVerify + + // ErrNumEqualVerify is returned when OP_NUMEQUALVERIFY is encountered + // in a script and the top item on the data stack does not evaluate to + // true. + ErrNumEqualVerify + + // ErrCheckSigVerify is returned when OP_CHECKSIGVERIFY is encountered + // in a script and the top item on the data stack does not evaluate to + // true. + ErrCheckSigVerify + + // ErrCheckSigVerify is returned when OP_CHECKMULTISIGVERIFY is + // encountered in a script and the top item on the data stack does not + // evaluate to true. + ErrCheckMultiSigVerify + + // -------------------------------------------- + // Failures related to improper use of opcodes. + // -------------------------------------------- + + // ErrDisabledOpcode is returned when a disabled opcode is encountered + // in a script. + ErrDisabledOpcode + + // ErrReservedOpcode is returned when an opcode marked as reserved + // is encountered in a script. + ErrReservedOpcode + + // ErrMalformedPush is returned when a data push opcode tries to push + // more bytes than are left in the script. + ErrMalformedPush + + // ErrInvalidStackOperation is returned when a stack operation is + // attempted with a number that is invalid for the current stack size. + ErrInvalidStackOperation + + // ErrUnbalancedConditional is returned when an OP_ELSE or OP_ENDIF is + // encountered in a script without first having an OP_IF or OP_NOTIF or + // the end of script is reached without encountering an OP_ENDIF when + // an OP_IF or OP_NOTIF was previously encountered. + ErrUnbalancedConditional + + // --------------------------------- + // Failures related to malleability. + // --------------------------------- + + // ErrMinimalData is returned when the script contains + // push operations that do not use the minimal opcode required. + ErrMinimalData + + // ErrInvalidSigHashType is returned when a signature hash type is not + // one of the supported types. + ErrInvalidSigHashType + + // ErrSigLength is returned when Schnorr signature is of incorrect length + ErrSigLength + + // ErrSigHighS is returned when the ScriptVerifyLowS flag is set and the + // script contains any signatures whose S values are higher than the + // half order. + ErrSigHighS + + // ErrNotPushOnly is returned when a script that is required to only + // push data to the stack performs other operations. + ErrNotPushOnly + + // ErrPubKeyFormat is returned when the script contains invalid public keys. + // A valid pubkey should be in uncompressed format as a 64 byte string prefixed with 0x04, + // or to be in compressed format as a 32 byte string prefixed with 0x02 or 0x03 to signal oddness. + ErrPubKeyFormat + + // ErrCleanStack is returned when after evaluation, the stack + // contains more than one element. + ErrCleanStack + + // ErrNullFail is returned when signatures are not empty + // on failed checksig or checkmultisig operations. + ErrNullFail + + // ------------------------------- + // Failures related to soft forks. + // ------------------------------- + + // ErrNegativeLockTime is returned when a script contains an opcode that + // interprets a negative lock time. + ErrNegativeLockTime + + // ErrUnsatisfiedLockTime is returned when a script contains an opcode + // that involves a lock time and the required lock time has not been + // reached. + ErrUnsatisfiedLockTime + + // ErrMinimalIf is returned if the operand of an OP_IF/OP_NOTIF + // is not either an empty vector or [0x01]. + ErrMinimalIf + + // numErrorCodes is the maximum error code number used in tests. This + // entry MUST be the last entry in the enum. + numErrorCodes +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrInternal: "ErrInternal", + ErrInvalidFlags: "ErrInvalidFlags", + ErrInvalidIndex: "ErrInvalidIndex", + ErrUnsupportedAddress: "ErrUnsupportedAddress", + ErrNotMultisigScript: "ErrNotMultisigScript", + ErrTooManyRequiredSigs: "ErrTooManyRequiredSigs", + ErrEarlyReturn: "ErrEarlyReturn", + ErrEmptyStack: "ErrEmptyStack", + ErrEvalFalse: "ErrEvalFalse", + ErrScriptUnfinished: "ErrScriptUnfinished", + ErrInvalidProgramCounter: "ErrInvalidProgramCounter", + ErrScriptTooBig: "ErrScriptTooBig", + ErrElementTooBig: "ErrElementTooBig", + ErrTooManyOperations: "ErrTooManyOperations", + ErrStackOverflow: "ErrStackOverflow", + ErrInvalidPubKeyCount: "ErrInvalidPubKeyCount", + ErrInvalidSignatureCount: "ErrInvalidSignatureCount", + ErrNumberTooBig: "ErrNumberTooBig", + ErrVerify: "ErrVerify", + ErrEqualVerify: "ErrEqualVerify", + ErrNumEqualVerify: "ErrNumEqualVerify", + ErrCheckSigVerify: "ErrCheckSigVerify", + ErrCheckMultiSigVerify: "ErrCheckMultiSigVerify", + ErrDisabledOpcode: "ErrDisabledOpcode", + ErrReservedOpcode: "ErrReservedOpcode", + ErrMalformedPush: "ErrMalformedPush", + ErrInvalidStackOperation: "ErrInvalidStackOperation", + ErrUnbalancedConditional: "ErrUnbalancedConditional", + ErrMinimalData: "ErrMinimalData", + ErrInvalidSigHashType: "ErrInvalidSigHashType", + ErrSigLength: "ErrSigLength", + ErrSigHighS: "ErrSigHighS", + ErrNotPushOnly: "ErrNotPushOnly", + ErrPubKeyFormat: "ErrPubKeyFormat", + ErrCleanStack: "ErrCleanStack", + ErrNullFail: "ErrNullFail", + ErrNegativeLockTime: "ErrNegativeLockTime", + ErrUnsatisfiedLockTime: "ErrUnsatisfiedLockTime", + ErrMinimalIf: "ErrMinimalIf", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", int(e)) +} + +// Error identifies a script-related error. It is used to indicate three +// classes of errors: +// 1. Script execution failures due to violating one of the many requirements +// imposed by the script engine or evaluating to false +// 2. Improper API usage by callers +// 3. Internal consistency check failures +// +// The caller can use type assertions on the returned errors to access the +// ErrorCode field to ascertain the specific reason for the error. As an +// additional convenience, the caller may make use of the IsErrorCode function +// to check for a specific error code. +type Error struct { + ErrorCode ErrorCode + Description string +} + +// Error satisfies the error interface and prints human-readable errors. +func (e Error) Error() string { + return e.Description +} + +// scriptError creates an Error given a set of arguments. +func scriptError(c ErrorCode, desc string) Error { + return Error{ErrorCode: c, Description: desc} +} + +// IsErrorCode returns whether or not the provided error is a script error with +// the provided error code. +func IsErrorCode(err error, c ErrorCode) bool { + var errError Error + if ok := errors.As(err, &errError); ok { + return errError.ErrorCode == c + } + + return false +} diff --git a/domain/consensus/utils/txscript/error_test.go b/domain/consensus/utils/txscript/error_test.go new file mode 100644 index 0000000..d69158f --- /dev/null +++ b/domain/consensus/utils/txscript/error_test.go @@ -0,0 +1,105 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "testing" +) + +// TestErrorCodeStringer tests the stringized output for the ErrorCode type. +func TestErrorCodeStringer(t *testing.T) { + t.Parallel() + + tests := []struct { + in ErrorCode + want string + }{ + {ErrInternal, "ErrInternal"}, + {ErrInvalidFlags, "ErrInvalidFlags"}, + {ErrInvalidIndex, "ErrInvalidIndex"}, + {ErrUnsupportedAddress, "ErrUnsupportedAddress"}, + {ErrTooManyRequiredSigs, "ErrTooManyRequiredSigs"}, + {ErrNotMultisigScript, "ErrNotMultisigScript"}, + {ErrEarlyReturn, "ErrEarlyReturn"}, + {ErrEmptyStack, "ErrEmptyStack"}, + {ErrEvalFalse, "ErrEvalFalse"}, + {ErrScriptUnfinished, "ErrScriptUnfinished"}, + {ErrInvalidProgramCounter, "ErrInvalidProgramCounter"}, + {ErrScriptTooBig, "ErrScriptTooBig"}, + {ErrElementTooBig, "ErrElementTooBig"}, + {ErrTooManyOperations, "ErrTooManyOperations"}, + {ErrStackOverflow, "ErrStackOverflow"}, + {ErrInvalidPubKeyCount, "ErrInvalidPubKeyCount"}, + {ErrInvalidSignatureCount, "ErrInvalidSignatureCount"}, + {ErrNumberTooBig, "ErrNumberTooBig"}, + {ErrVerify, "ErrVerify"}, + {ErrEqualVerify, "ErrEqualVerify"}, + {ErrNumEqualVerify, "ErrNumEqualVerify"}, + {ErrCheckSigVerify, "ErrCheckSigVerify"}, + {ErrCheckMultiSigVerify, "ErrCheckMultiSigVerify"}, + {ErrDisabledOpcode, "ErrDisabledOpcode"}, + {ErrReservedOpcode, "ErrReservedOpcode"}, + {ErrMalformedPush, "ErrMalformedPush"}, + {ErrInvalidStackOperation, "ErrInvalidStackOperation"}, + {ErrUnbalancedConditional, "ErrUnbalancedConditional"}, + {ErrMinimalData, "ErrMinimalData"}, + {ErrInvalidSigHashType, "ErrInvalidSigHashType"}, + {ErrSigLength, "ErrSigLength"}, + {ErrSigHighS, "ErrSigHighS"}, + {ErrNotPushOnly, "ErrNotPushOnly"}, + {ErrPubKeyFormat, "ErrPubKeyFormat"}, + {ErrCleanStack, "ErrCleanStack"}, + {ErrNullFail, "ErrNullFail"}, + {ErrNegativeLockTime, "ErrNegativeLockTime"}, + {ErrUnsatisfiedLockTime, "ErrUnsatisfiedLockTime"}, + {ErrMinimalIf, "ErrMinimalIf"}, + {0xffff, "Unknown ErrorCode (65535)"}, + } + + // Detect additional error codes that don't have the stringer added. + if len(tests)-1 != int(numErrorCodes) { + t.Errorf("It appears an error code was added without adding an " + + "associated stringer test") + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} + +// TestError tests the error output for the Error type. +func TestError(t *testing.T) { + t.Parallel() + + tests := []struct { + in Error + want string + }{ + { + Error{Description: "some error"}, + "some error", + }, + { + Error{Description: "human-readable error"}, + "human-readable error", + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.Error() + if result != test.want { + t.Errorf("Error #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} diff --git a/domain/consensus/utils/txscript/example_test.go b/domain/consensus/utils/txscript/example_test.go new file mode 100644 index 0000000..42b3c46 --- /dev/null +++ b/domain/consensus/utils/txscript/example_test.go @@ -0,0 +1,80 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript_test + +import ( + "encoding/hex" + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +// This example demonstrates creating a script which pays to a spectre address. +// It also prints the created script hex and uses the DisasmString function to +// display the disassembled script. +func ExamplePayToAddrScript() { + // Parse the address to send the coins to into a util.Address + // which is useful to ensure the accuracy of the address and determine + // the address type. It is also required for the upcoming call to + // PayToAddrScript. + addressStr := "spectre:qqj9fg59mptxkr9j0y53j5mwurcmda5mtza9n6v9pm9uj8h0wgk6udg8a3p4a" + address, err := util.DecodeAddress(addressStr, util.Bech32PrefixSpectre) + if err != nil { + fmt.Println(err) + return + } + + // Create a public key script that pays to the address. + script, err := txscript.PayToAddrScript(address) + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("Script Hex: %x\n", script.Script) + + disasm, err := txscript.DisasmString(script.Version, script.Script) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("Script Disassembly:", disasm) + + // Output: + // Script Hex: 202454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722daeac + // Script Disassembly: 2454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722dae OP_CHECKSIG +} + +// This example demonstrates extracting information from a standard public key +// script. +func ExampleExtractScriptPubKeyAddress() { + // Start with a standard pay-to-pubkey script. + scriptHex := "2089ac24ea10bb751af4939623ccc5e550d96842b64e8fca0f63e94b4373fd555eac" + script, err := hex.DecodeString(scriptHex) + if err != nil { + fmt.Println(err) + return + } + + // Extract and print details from the script. + scriptClass, address, err := txscript.ExtractScriptPubKeyAddress( + &externalapi.ScriptPublicKey{ + Script: script, + Version: 0, + }, &dagconfig.MainnetParams) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("Script Class:", scriptClass) + fmt.Println("Address:", address) + + // Output: + // Script Class: pubkey + // Address: spectre:qzy6cf82zzah2xh5jwtz8nx9u4gdj6zzke8gljs0v055ksmnl424uvulzq0dw +} diff --git a/domain/consensus/utils/txscript/log.go b/domain/consensus/utils/txscript/log.go new file mode 100644 index 0000000..e19e4ab --- /dev/null +++ b/domain/consensus/utils/txscript/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("SCRP") diff --git a/domain/consensus/utils/txscript/main_test.go b/domain/consensus/utils/txscript/main_test.go new file mode 100644 index 0000000..986385b --- /dev/null +++ b/domain/consensus/utils/txscript/main_test.go @@ -0,0 +1,16 @@ +package txscript + +import ( + "os" + "testing" + + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func TestMain(m *testing.M) { + // set log level to trace, so that logClosures passed to log.Tracef are covered + log.SetLevel(logger.LevelTrace) + logger.InitLogStdout(logger.LevelTrace) + + os.Exit(m.Run()) +} diff --git a/domain/consensus/utils/txscript/opcode.go b/domain/consensus/utils/txscript/opcode.go new file mode 100644 index 0000000..fb2d937 --- /dev/null +++ b/domain/consensus/utils/txscript/opcode.go @@ -0,0 +1,2495 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "crypto/sha256" + "encoding/binary" + "fmt" + "hash" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "golang.org/x/crypto/blake2b" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/go-secp256k1" +) + +// An opcode defines the information related to a txscript opcode. opfunc, if +// present, is the function to call to perform the opcode on the script. The +// current script is passed in as a slice with the first member being the opcode +// itself. +type opcode struct { + value byte + name string + length int + opfunc func(*parsedOpcode, *Engine) error +} + +// These constants are the values of the spectre script opcodes. +const ( + Op0 = 0x00 // 0 + OpFalse = 0x00 // 0 - AKA Op0 + OpData1 = 0x01 // 1 + OpData2 = 0x02 // 2 + OpData3 = 0x03 // 3 + OpData4 = 0x04 // 4 + OpData5 = 0x05 // 5 + OpData6 = 0x06 // 6 + OpData7 = 0x07 // 7 + OpData8 = 0x08 // 8 + OpData9 = 0x09 // 9 + OpData10 = 0x0a // 10 + OpData11 = 0x0b // 11 + OpData12 = 0x0c // 12 + OpData13 = 0x0d // 13 + OpData14 = 0x0e // 14 + OpData15 = 0x0f // 15 + OpData16 = 0x10 // 16 + OpData17 = 0x11 // 17 + OpData18 = 0x12 // 18 + OpData19 = 0x13 // 19 + OpData20 = 0x14 // 20 + OpData21 = 0x15 // 21 + OpData22 = 0x16 // 22 + OpData23 = 0x17 // 23 + OpData24 = 0x18 // 24 + OpData25 = 0x19 // 25 + OpData26 = 0x1a // 26 + OpData27 = 0x1b // 27 + OpData28 = 0x1c // 28 + OpData29 = 0x1d // 29 + OpData30 = 0x1e // 30 + OpData31 = 0x1f // 31 + OpData32 = 0x20 // 32 + OpData33 = 0x21 // 33 + OpData34 = 0x22 // 34 + OpData35 = 0x23 // 35 + OpData36 = 0x24 // 36 + OpData37 = 0x25 // 37 + OpData38 = 0x26 // 38 + OpData39 = 0x27 // 39 + OpData40 = 0x28 // 40 + OpData41 = 0x29 // 41 + OpData42 = 0x2a // 42 + OpData43 = 0x2b // 43 + OpData44 = 0x2c // 44 + OpData45 = 0x2d // 45 + OpData46 = 0x2e // 46 + OpData47 = 0x2f // 47 + OpData48 = 0x30 // 48 + OpData49 = 0x31 // 49 + OpData50 = 0x32 // 50 + OpData51 = 0x33 // 51 + OpData52 = 0x34 // 52 + OpData53 = 0x35 // 53 + OpData54 = 0x36 // 54 + OpData55 = 0x37 // 55 + OpData56 = 0x38 // 56 + OpData57 = 0x39 // 57 + OpData58 = 0x3a // 58 + OpData59 = 0x3b // 59 + OpData60 = 0x3c // 60 + OpData61 = 0x3d // 61 + OpData62 = 0x3e // 62 + OpData63 = 0x3f // 63 + OpData64 = 0x40 // 64 + OpData65 = 0x41 // 65 + OpData66 = 0x42 // 66 + OpData67 = 0x43 // 67 + OpData68 = 0x44 // 68 + OpData69 = 0x45 // 69 + OpData70 = 0x46 // 70 + OpData71 = 0x47 // 71 + OpData72 = 0x48 // 72 + OpData73 = 0x49 // 73 + OpData74 = 0x4a // 74 + OpData75 = 0x4b // 75 + OpPushData1 = 0x4c // 76 + OpPushData2 = 0x4d // 77 + OpPushData4 = 0x4e // 78 + Op1Negate = 0x4f // 79 + OpReserved = 0x50 // 80 + Op1 = 0x51 // 81 - AKA OpTrue + OpTrue = 0x51 // 81 + Op2 = 0x52 // 82 + Op3 = 0x53 // 83 + Op4 = 0x54 // 84 + Op5 = 0x55 // 85 + Op6 = 0x56 // 86 + Op7 = 0x57 // 87 + Op8 = 0x58 // 88 + Op9 = 0x59 // 89 + Op10 = 0x5a // 90 + Op11 = 0x5b // 91 + Op12 = 0x5c // 92 + Op13 = 0x5d // 93 + Op14 = 0x5e // 94 + Op15 = 0x5f // 95 + Op16 = 0x60 // 96 + OpNop = 0x61 // 97 + OpVer = 0x62 // 98 + OpIf = 0x63 // 99 + OpNotIf = 0x64 // 100 + OpVerIf = 0x65 // 101 + OpVerNotIf = 0x66 // 102 + OpElse = 0x67 // 103 + OpEndIf = 0x68 // 104 + OpVerify = 0x69 // 105 + OpReturn = 0x6a // 106 + OpToAltStack = 0x6b // 107 + OpFromAltStack = 0x6c // 108 + Op2Drop = 0x6d // 109 + Op2Dup = 0x6e // 110 + Op3Dup = 0x6f // 111 + Op2Over = 0x70 // 112 + Op2Rot = 0x71 // 113 + Op2Swap = 0x72 // 114 + OpIfDup = 0x73 // 115 + OpDepth = 0x74 // 116 + OpDrop = 0x75 // 117 + OpDup = 0x76 // 118 + OpNip = 0x77 // 119 + OpOver = 0x78 // 120 + OpPick = 0x79 // 121 + OpRoll = 0x7a // 122 + OpRot = 0x7b // 123 + OpSwap = 0x7c // 124 + OpTuck = 0x7d // 125 + OpCat = 0x7e // 126 + OpSubStr = 0x7f // 127 + OpLeft = 0x80 // 128 + OpRight = 0x81 // 129 + OpSize = 0x82 // 130 + OpInvert = 0x83 // 131 + OpAnd = 0x84 // 132 + OpOr = 0x85 // 133 + OpXor = 0x86 // 134 + OpEqual = 0x87 // 135 + OpEqualVerify = 0x88 // 136 + OpReserved1 = 0x89 // 137 + OpReserved2 = 0x8a // 138 + Op1Add = 0x8b // 139 + Op1Sub = 0x8c // 140 + Op2Mul = 0x8d // 141 + Op2Div = 0x8e // 142 + OpNegate = 0x8f // 143 + OpAbs = 0x90 // 144 + OpNot = 0x91 // 145 + Op0NotEqual = 0x92 // 146 + OpAdd = 0x93 // 147 + OpSub = 0x94 // 148 + OpMul = 0x95 // 149 + OpDiv = 0x96 // 150 + OpMod = 0x97 // 151 + OpLShift = 0x98 // 152 + OpRShift = 0x99 // 153 + OpBoolAnd = 0x9a // 154 + OpBoolOr = 0x9b // 155 + OpNumEqual = 0x9c // 156 + OpNumEqualVerify = 0x9d // 157 + OpNumNotEqual = 0x9e // 158 + OpLessThan = 0x9f // 159 + OpGreaterThan = 0xa0 // 160 + OpLessThanOrEqual = 0xa1 // 161 + OpGreaterThanOrEqual = 0xa2 // 162 + OpMin = 0xa3 // 163 + OpMax = 0xa4 // 164 + OpWithin = 0xa5 // 165 + OpUnknown166 = 0xa6 // 166 + OpUnknown167 = 0xa7 // 167 + OpSHA256 = 0xa8 // 168 + OpCheckMultiSigECDSA = 0xa9 // 169 + OpBlake2b = 0xaa // 170 + OpCheckSigECDSA = 0xab // 171 + OpCheckSig = 0xac // 172 + OpCheckSigVerify = 0xad // 173 + OpCheckMultiSig = 0xae // 174 + OpCheckMultiSigVerify = 0xaf // 175 + OpCheckLockTimeVerify = 0xb0 // 176 + OpCheckSequenceVerify = 0xb1 // 177 + OpUnknown178 = 0xb2 // 178 + OpUnknown179 = 0xb3 // 179 + OpUnknown180 = 0xb4 // 180 + OpUnknown181 = 0xb5 // 181 + OpUnknown182 = 0xb6 // 182 + OpUnknown183 = 0xb7 // 183 + OpUnknown184 = 0xb8 // 184 + OpUnknown185 = 0xb9 // 185 + OpUnknown186 = 0xba // 186 + OpUnknown187 = 0xbb // 187 + OpUnknown188 = 0xbc // 188 + OpUnknown189 = 0xbd // 189 + OpUnknown190 = 0xbe // 190 + OpUnknown191 = 0xbf // 191 + OpUnknown192 = 0xc0 // 192 + OpUnknown193 = 0xc1 // 193 + OpUnknown194 = 0xc2 // 194 + OpUnknown195 = 0xc3 // 195 + OpUnknown196 = 0xc4 // 196 + OpUnknown197 = 0xc5 // 197 + OpUnknown198 = 0xc6 // 198 + OpUnknown199 = 0xc7 // 199 + OpUnknown200 = 0xc8 // 200 + OpUnknown201 = 0xc9 // 201 + OpUnknown202 = 0xca // 202 + OpUnknown203 = 0xcb // 203 + OpUnknown204 = 0xcc // 204 + OpUnknown205 = 0xcd // 205 + OpUnknown206 = 0xce // 206 + OpUnknown207 = 0xcf // 207 + OpUnknown208 = 0xd0 // 208 + OpUnknown209 = 0xd1 // 209 + OpUnknown210 = 0xd2 // 210 + OpUnknown211 = 0xd3 // 211 + OpUnknown212 = 0xd4 // 212 + OpUnknown213 = 0xd5 // 213 + OpUnknown214 = 0xd6 // 214 + OpUnknown215 = 0xd7 // 215 + OpUnknown216 = 0xd8 // 216 + OpUnknown217 = 0xd9 // 217 + OpUnknown218 = 0xda // 218 + OpUnknown219 = 0xdb // 219 + OpUnknown220 = 0xdc // 220 + OpUnknown221 = 0xdd // 221 + OpUnknown222 = 0xde // 222 + OpUnknown223 = 0xdf // 223 + OpUnknown224 = 0xe0 // 224 + OpUnknown225 = 0xe1 // 225 + OpUnknown226 = 0xe2 // 226 + OpUnknown227 = 0xe3 // 227 + OpUnknown228 = 0xe4 // 228 + OpUnknown229 = 0xe5 // 229 + OpUnknown230 = 0xe6 // 230 + OpUnknown231 = 0xe7 // 231 + OpUnknown232 = 0xe8 // 232 + OpUnknown233 = 0xe9 // 233 + OpUnknown234 = 0xea // 234 + OpUnknown235 = 0xeb // 235 + OpUnknown236 = 0xec // 236 + OpUnknown237 = 0xed // 237 + OpUnknown238 = 0xee // 238 + OpUnknown239 = 0xef // 239 + OpUnknown240 = 0xf0 // 240 + OpUnknown241 = 0xf1 // 241 + OpUnknown242 = 0xf2 // 242 + OpUnknown243 = 0xf3 // 243 + OpUnknown244 = 0xf4 // 244 + OpUnknown245 = 0xf5 // 245 + OpUnknown246 = 0xf6 // 246 + OpUnknown247 = 0xf7 // 247 + OpUnknown248 = 0xf8 // 248 + OpUnknown249 = 0xf9 // 249 + OpSmallInteger = 0xfa // 250 + OpPubKeys = 0xfb // 251 + OpUnknown252 = 0xfc // 252 + OpPubKeyHash = 0xfd // 253 + OpPubKey = 0xfe // 254 + OpInvalidOpCode = 0xff // 255 +) + +// Conditional execution constants. +const ( + OpCondFalse = 0 + OpCondTrue = 1 + OpCondSkip = 2 +) + +// opcodeArray holds details about all possible opcodes such as how many bytes +// the opcode and any associated data should take, its human-readable name, and +// the handler function. +var opcodeArray = [256]opcode{ + // Data push opcodes. + OpFalse: {OpFalse, "OP_0", 1, opcodeFalse}, + OpData1: {OpData1, "OP_DATA_1", 2, opcodePushData}, + OpData2: {OpData2, "OP_DATA_2", 3, opcodePushData}, + OpData3: {OpData3, "OP_DATA_3", 4, opcodePushData}, + OpData4: {OpData4, "OP_DATA_4", 5, opcodePushData}, + OpData5: {OpData5, "OP_DATA_5", 6, opcodePushData}, + OpData6: {OpData6, "OP_DATA_6", 7, opcodePushData}, + OpData7: {OpData7, "OP_DATA_7", 8, opcodePushData}, + OpData8: {OpData8, "OP_DATA_8", 9, opcodePushData}, + OpData9: {OpData9, "OP_DATA_9", 10, opcodePushData}, + OpData10: {OpData10, "OP_DATA_10", 11, opcodePushData}, + OpData11: {OpData11, "OP_DATA_11", 12, opcodePushData}, + OpData12: {OpData12, "OP_DATA_12", 13, opcodePushData}, + OpData13: {OpData13, "OP_DATA_13", 14, opcodePushData}, + OpData14: {OpData14, "OP_DATA_14", 15, opcodePushData}, + OpData15: {OpData15, "OP_DATA_15", 16, opcodePushData}, + OpData16: {OpData16, "OP_DATA_16", 17, opcodePushData}, + OpData17: {OpData17, "OP_DATA_17", 18, opcodePushData}, + OpData18: {OpData18, "OP_DATA_18", 19, opcodePushData}, + OpData19: {OpData19, "OP_DATA_19", 20, opcodePushData}, + OpData20: {OpData20, "OP_DATA_20", 21, opcodePushData}, + OpData21: {OpData21, "OP_DATA_21", 22, opcodePushData}, + OpData22: {OpData22, "OP_DATA_22", 23, opcodePushData}, + OpData23: {OpData23, "OP_DATA_23", 24, opcodePushData}, + OpData24: {OpData24, "OP_DATA_24", 25, opcodePushData}, + OpData25: {OpData25, "OP_DATA_25", 26, opcodePushData}, + OpData26: {OpData26, "OP_DATA_26", 27, opcodePushData}, + OpData27: {OpData27, "OP_DATA_27", 28, opcodePushData}, + OpData28: {OpData28, "OP_DATA_28", 29, opcodePushData}, + OpData29: {OpData29, "OP_DATA_29", 30, opcodePushData}, + OpData30: {OpData30, "OP_DATA_30", 31, opcodePushData}, + OpData31: {OpData31, "OP_DATA_31", 32, opcodePushData}, + OpData32: {OpData32, "OP_DATA_32", 33, opcodePushData}, + OpData33: {OpData33, "OP_DATA_33", 34, opcodePushData}, + OpData34: {OpData34, "OP_DATA_34", 35, opcodePushData}, + OpData35: {OpData35, "OP_DATA_35", 36, opcodePushData}, + OpData36: {OpData36, "OP_DATA_36", 37, opcodePushData}, + OpData37: {OpData37, "OP_DATA_37", 38, opcodePushData}, + OpData38: {OpData38, "OP_DATA_38", 39, opcodePushData}, + OpData39: {OpData39, "OP_DATA_39", 40, opcodePushData}, + OpData40: {OpData40, "OP_DATA_40", 41, opcodePushData}, + OpData41: {OpData41, "OP_DATA_41", 42, opcodePushData}, + OpData42: {OpData42, "OP_DATA_42", 43, opcodePushData}, + OpData43: {OpData43, "OP_DATA_43", 44, opcodePushData}, + OpData44: {OpData44, "OP_DATA_44", 45, opcodePushData}, + OpData45: {OpData45, "OP_DATA_45", 46, opcodePushData}, + OpData46: {OpData46, "OP_DATA_46", 47, opcodePushData}, + OpData47: {OpData47, "OP_DATA_47", 48, opcodePushData}, + OpData48: {OpData48, "OP_DATA_48", 49, opcodePushData}, + OpData49: {OpData49, "OP_DATA_49", 50, opcodePushData}, + OpData50: {OpData50, "OP_DATA_50", 51, opcodePushData}, + OpData51: {OpData51, "OP_DATA_51", 52, opcodePushData}, + OpData52: {OpData52, "OP_DATA_52", 53, opcodePushData}, + OpData53: {OpData53, "OP_DATA_53", 54, opcodePushData}, + OpData54: {OpData54, "OP_DATA_54", 55, opcodePushData}, + OpData55: {OpData55, "OP_DATA_55", 56, opcodePushData}, + OpData56: {OpData56, "OP_DATA_56", 57, opcodePushData}, + OpData57: {OpData57, "OP_DATA_57", 58, opcodePushData}, + OpData58: {OpData58, "OP_DATA_58", 59, opcodePushData}, + OpData59: {OpData59, "OP_DATA_59", 60, opcodePushData}, + OpData60: {OpData60, "OP_DATA_60", 61, opcodePushData}, + OpData61: {OpData61, "OP_DATA_61", 62, opcodePushData}, + OpData62: {OpData62, "OP_DATA_62", 63, opcodePushData}, + OpData63: {OpData63, "OP_DATA_63", 64, opcodePushData}, + OpData64: {OpData64, "OP_DATA_64", 65, opcodePushData}, + OpData65: {OpData65, "OP_DATA_65", 66, opcodePushData}, + OpData66: {OpData66, "OP_DATA_66", 67, opcodePushData}, + OpData67: {OpData67, "OP_DATA_67", 68, opcodePushData}, + OpData68: {OpData68, "OP_DATA_68", 69, opcodePushData}, + OpData69: {OpData69, "OP_DATA_69", 70, opcodePushData}, + OpData70: {OpData70, "OP_DATA_70", 71, opcodePushData}, + OpData71: {OpData71, "OP_DATA_71", 72, opcodePushData}, + OpData72: {OpData72, "OP_DATA_72", 73, opcodePushData}, + OpData73: {OpData73, "OP_DATA_73", 74, opcodePushData}, + OpData74: {OpData74, "OP_DATA_74", 75, opcodePushData}, + OpData75: {OpData75, "OP_DATA_75", 76, opcodePushData}, + OpPushData1: {OpPushData1, "OP_PUSHDATA1", -1, opcodePushData}, + OpPushData2: {OpPushData2, "OP_PUSHDATA2", -2, opcodePushData}, + OpPushData4: {OpPushData4, "OP_PUSHDATA4", -4, opcodePushData}, + Op1Negate: {Op1Negate, "OP_1NEGATE", 1, opcode1Negate}, + OpReserved: {OpReserved, "OP_RESERVED", 1, opcodeReserved}, + OpTrue: {OpTrue, "OP_1", 1, opcodeN}, + Op2: {Op2, "OP_2", 1, opcodeN}, + Op3: {Op3, "OP_3", 1, opcodeN}, + Op4: {Op4, "OP_4", 1, opcodeN}, + Op5: {Op5, "OP_5", 1, opcodeN}, + Op6: {Op6, "OP_6", 1, opcodeN}, + Op7: {Op7, "OP_7", 1, opcodeN}, + Op8: {Op8, "OP_8", 1, opcodeN}, + Op9: {Op9, "OP_9", 1, opcodeN}, + Op10: {Op10, "OP_10", 1, opcodeN}, + Op11: {Op11, "OP_11", 1, opcodeN}, + Op12: {Op12, "OP_12", 1, opcodeN}, + Op13: {Op13, "OP_13", 1, opcodeN}, + Op14: {Op14, "OP_14", 1, opcodeN}, + Op15: {Op15, "OP_15", 1, opcodeN}, + Op16: {Op16, "OP_16", 1, opcodeN}, + + // Control opcodes. + OpNop: {OpNop, "OP_NOP", 1, opcodeNop}, + OpVer: {OpVer, "OP_VER", 1, opcodeReserved}, + OpIf: {OpIf, "OP_IF", 1, opcodeIf}, + OpNotIf: {OpNotIf, "OP_NOTIF", 1, opcodeNotIf}, + OpVerIf: {OpVerIf, "OP_VERIF", 1, opcodeReserved}, + OpVerNotIf: {OpVerNotIf, "OP_VERNOTIF", 1, opcodeReserved}, + OpElse: {OpElse, "OP_ELSE", 1, opcodeElse}, + OpEndIf: {OpEndIf, "OP_ENDIF", 1, opcodeEndif}, + OpVerify: {OpVerify, "OP_VERIFY", 1, opcodeVerify}, + OpReturn: {OpReturn, "OP_RETURN", 1, opcodeReturn}, + OpCheckLockTimeVerify: {OpCheckLockTimeVerify, "OP_CHECKLOCKTIMEVERIFY", 1, opcodeCheckLockTimeVerify}, + OpCheckSequenceVerify: {OpCheckSequenceVerify, "OP_CHECKSEQUENCEVERIFY", 1, opcodeCheckSequenceVerify}, + + // Stack opcodes. + OpToAltStack: {OpToAltStack, "OP_TOALTSTACK", 1, opcodeToAltStack}, + OpFromAltStack: {OpFromAltStack, "OP_FROMALTSTACK", 1, opcodeFromAltStack}, + Op2Drop: {Op2Drop, "OP_2DROP", 1, opcode2Drop}, + Op2Dup: {Op2Dup, "OP_2DUP", 1, opcode2Dup}, + Op3Dup: {Op3Dup, "OP_3DUP", 1, opcode3Dup}, + Op2Over: {Op2Over, "OP_2OVER", 1, opcode2Over}, + Op2Rot: {Op2Rot, "OP_2ROT", 1, opcode2Rot}, + Op2Swap: {Op2Swap, "OP_2SWAP", 1, opcode2Swap}, + OpIfDup: {OpIfDup, "OP_IFDUP", 1, opcodeIfDup}, + OpDepth: {OpDepth, "OP_DEPTH", 1, opcodeDepth}, + OpDrop: {OpDrop, "OP_DROP", 1, opcodeDrop}, + OpDup: {OpDup, "OP_DUP", 1, opcodeDup}, + OpNip: {OpNip, "OP_NIP", 1, opcodeNip}, + OpOver: {OpOver, "OP_OVER", 1, opcodeOver}, + OpPick: {OpPick, "OP_PICK", 1, opcodePick}, + OpRoll: {OpRoll, "OP_ROLL", 1, opcodeRoll}, + OpRot: {OpRot, "OP_ROT", 1, opcodeRot}, + OpSwap: {OpSwap, "OP_SWAP", 1, opcodeSwap}, + OpTuck: {OpTuck, "OP_TUCK", 1, opcodeTuck}, + + // Splice opcodes. + OpCat: {OpCat, "OP_CAT", 1, opcodeDisabled}, + OpSubStr: {OpSubStr, "OP_SUBSTR", 1, opcodeDisabled}, + OpLeft: {OpLeft, "OP_LEFT", 1, opcodeDisabled}, + OpRight: {OpRight, "OP_RIGHT", 1, opcodeDisabled}, + OpSize: {OpSize, "OP_SIZE", 1, opcodeSize}, + + // Bitwise logic opcodes. + OpInvert: {OpInvert, "OP_INVERT", 1, opcodeDisabled}, + OpAnd: {OpAnd, "OP_AND", 1, opcodeDisabled}, + OpOr: {OpOr, "OP_OR", 1, opcodeDisabled}, + OpXor: {OpXor, "OP_XOR", 1, opcodeDisabled}, + OpEqual: {OpEqual, "OP_EQUAL", 1, opcodeEqual}, + OpEqualVerify: {OpEqualVerify, "OP_EQUALVERIFY", 1, opcodeEqualVerify}, + OpReserved1: {OpReserved1, "OP_RESERVED1", 1, opcodeReserved}, + OpReserved2: {OpReserved2, "OP_RESERVED2", 1, opcodeReserved}, + + // Numeric related opcodes. + Op1Add: {Op1Add, "OP_1ADD", 1, opcode1Add}, + Op1Sub: {Op1Sub, "OP_1SUB", 1, opcode1Sub}, + Op2Mul: {Op2Mul, "OP_2MUL", 1, opcodeDisabled}, + Op2Div: {Op2Div, "OP_2DIV", 1, opcodeDisabled}, + OpNegate: {OpNegate, "OP_NEGATE", 1, opcodeNegate}, + OpAbs: {OpAbs, "OP_ABS", 1, opcodeAbs}, + OpNot: {OpNot, "OP_NOT", 1, opcodeNot}, + Op0NotEqual: {Op0NotEqual, "OP_0NOTEQUAL", 1, opcode0NotEqual}, + OpAdd: {OpAdd, "OP_ADD", 1, opcodeAdd}, + OpSub: {OpSub, "OP_SUB", 1, opcodeSub}, + OpMul: {OpMul, "OP_MUL", 1, opcodeDisabled}, + OpDiv: {OpDiv, "OP_DIV", 1, opcodeDisabled}, + OpMod: {OpMod, "OP_MOD", 1, opcodeDisabled}, + OpLShift: {OpLShift, "OP_LSHIFT", 1, opcodeDisabled}, + OpRShift: {OpRShift, "OP_RSHIFT", 1, opcodeDisabled}, + OpBoolAnd: {OpBoolAnd, "OP_BOOLAND", 1, opcodeBoolAnd}, + OpBoolOr: {OpBoolOr, "OP_BOOLOR", 1, opcodeBoolOr}, + OpNumEqual: {OpNumEqual, "OP_NUMEQUAL", 1, opcodeNumEqual}, + OpNumEqualVerify: {OpNumEqualVerify, "OP_NUMEQUALVERIFY", 1, opcodeNumEqualVerify}, + OpNumNotEqual: {OpNumNotEqual, "OP_NUMNOTEQUAL", 1, opcodeNumNotEqual}, + OpLessThan: {OpLessThan, "OP_LESSTHAN", 1, opcodeLessThan}, + OpGreaterThan: {OpGreaterThan, "OP_GREATERTHAN", 1, opcodeGreaterThan}, + OpLessThanOrEqual: {OpLessThanOrEqual, "OP_LESSTHANOREQUAL", 1, opcodeLessThanOrEqual}, + OpGreaterThanOrEqual: {OpGreaterThanOrEqual, "OP_GREATERTHANOREQUAL", 1, opcodeGreaterThanOrEqual}, + OpMin: {OpMin, "OP_MIN", 1, opcodeMin}, + OpMax: {OpMax, "OP_MAX", 1, opcodeMax}, + OpWithin: {OpWithin, "OP_WITHIN", 1, opcodeWithin}, + + // Crypto opcodes. + OpCheckMultiSigECDSA: {OpCheckMultiSigECDSA, "OP_CHECKMULTISIGECDSA", 1, opcodeCheckMultiSigECDSA}, + OpSHA256: {OpSHA256, "OP_SHA256", 1, opcodeSha256}, + OpBlake2b: {OpBlake2b, "OP_BLAKE2B", 1, opcodeBlake2b}, + OpCheckSigECDSA: {OpCheckSigECDSA, "OP_CHECKSIGECDSA", 1, opcodeCheckSigECDSA}, + OpCheckSig: {OpCheckSig, "OP_CHECKSIG", 1, opcodeCheckSig}, + OpCheckSigVerify: {OpCheckSigVerify, "OP_CHECKSIGVERIFY", 1, opcodeCheckSigVerify}, + OpCheckMultiSig: {OpCheckMultiSig, "OP_CHECKMULTISIG", 1, opcodeCheckMultiSig}, + OpCheckMultiSigVerify: {OpCheckMultiSigVerify, "OP_CHECKMULTISIGVERIFY", 1, opcodeCheckMultiSigVerify}, + + // Undefined opcodes. + OpUnknown166: {OpUnknown166, "OP_UNKNOWN166", 1, opcodeInvalid}, + OpUnknown167: {OpUnknown167, "OP_UNKNOWN167", 1, opcodeInvalid}, + OpUnknown178: {OpUnknown188, "OP_UNKNOWN178", 1, opcodeInvalid}, + OpUnknown179: {OpUnknown189, "OP_UNKNOWN179", 1, opcodeInvalid}, + OpUnknown180: {OpUnknown190, "OP_UNKNOWN180", 1, opcodeInvalid}, + OpUnknown181: {OpUnknown191, "OP_UNKNOWN181", 1, opcodeInvalid}, + OpUnknown182: {OpUnknown192, "OP_UNKNOWN182", 1, opcodeInvalid}, + OpUnknown183: {OpUnknown193, "OP_UNKNOWN183", 1, opcodeInvalid}, + OpUnknown184: {OpUnknown194, "OP_UNKNOWN184", 1, opcodeInvalid}, + OpUnknown185: {OpUnknown195, "OP_UNKNOWN185", 1, opcodeInvalid}, + OpUnknown186: {OpUnknown196, "OP_UNKNOWN186", 1, opcodeInvalid}, + OpUnknown187: {OpUnknown197, "OP_UNKNOWN187", 1, opcodeInvalid}, + OpUnknown188: {OpUnknown188, "OP_UNKNOWN188", 1, opcodeInvalid}, + OpUnknown189: {OpUnknown189, "OP_UNKNOWN189", 1, opcodeInvalid}, + OpUnknown190: {OpUnknown190, "OP_UNKNOWN190", 1, opcodeInvalid}, + OpUnknown191: {OpUnknown191, "OP_UNKNOWN191", 1, opcodeInvalid}, + OpUnknown192: {OpUnknown192, "OP_UNKNOWN192", 1, opcodeInvalid}, + OpUnknown193: {OpUnknown193, "OP_UNKNOWN193", 1, opcodeInvalid}, + OpUnknown194: {OpUnknown194, "OP_UNKNOWN194", 1, opcodeInvalid}, + OpUnknown195: {OpUnknown195, "OP_UNKNOWN195", 1, opcodeInvalid}, + OpUnknown196: {OpUnknown196, "OP_UNKNOWN196", 1, opcodeInvalid}, + OpUnknown197: {OpUnknown197, "OP_UNKNOWN197", 1, opcodeInvalid}, + OpUnknown198: {OpUnknown198, "OP_UNKNOWN198", 1, opcodeInvalid}, + OpUnknown199: {OpUnknown199, "OP_UNKNOWN199", 1, opcodeInvalid}, + OpUnknown200: {OpUnknown200, "OP_UNKNOWN200", 1, opcodeInvalid}, + OpUnknown201: {OpUnknown201, "OP_UNKNOWN201", 1, opcodeInvalid}, + OpUnknown202: {OpUnknown202, "OP_UNKNOWN202", 1, opcodeInvalid}, + OpUnknown203: {OpUnknown203, "OP_UNKNOWN203", 1, opcodeInvalid}, + OpUnknown204: {OpUnknown204, "OP_UNKNOWN204", 1, opcodeInvalid}, + OpUnknown205: {OpUnknown205, "OP_UNKNOWN205", 1, opcodeInvalid}, + OpUnknown206: {OpUnknown206, "OP_UNKNOWN206", 1, opcodeInvalid}, + OpUnknown207: {OpUnknown207, "OP_UNKNOWN207", 1, opcodeInvalid}, + OpUnknown208: {OpUnknown208, "OP_UNKNOWN208", 1, opcodeInvalid}, + OpUnknown209: {OpUnknown209, "OP_UNKNOWN209", 1, opcodeInvalid}, + OpUnknown210: {OpUnknown210, "OP_UNKNOWN210", 1, opcodeInvalid}, + OpUnknown211: {OpUnknown211, "OP_UNKNOWN211", 1, opcodeInvalid}, + OpUnknown212: {OpUnknown212, "OP_UNKNOWN212", 1, opcodeInvalid}, + OpUnknown213: {OpUnknown213, "OP_UNKNOWN213", 1, opcodeInvalid}, + OpUnknown214: {OpUnknown214, "OP_UNKNOWN214", 1, opcodeInvalid}, + OpUnknown215: {OpUnknown215, "OP_UNKNOWN215", 1, opcodeInvalid}, + OpUnknown216: {OpUnknown216, "OP_UNKNOWN216", 1, opcodeInvalid}, + OpUnknown217: {OpUnknown217, "OP_UNKNOWN217", 1, opcodeInvalid}, + OpUnknown218: {OpUnknown218, "OP_UNKNOWN218", 1, opcodeInvalid}, + OpUnknown219: {OpUnknown219, "OP_UNKNOWN219", 1, opcodeInvalid}, + OpUnknown220: {OpUnknown220, "OP_UNKNOWN220", 1, opcodeInvalid}, + OpUnknown221: {OpUnknown221, "OP_UNKNOWN221", 1, opcodeInvalid}, + OpUnknown222: {OpUnknown222, "OP_UNKNOWN222", 1, opcodeInvalid}, + OpUnknown223: {OpUnknown223, "OP_UNKNOWN223", 1, opcodeInvalid}, + OpUnknown224: {OpUnknown224, "OP_UNKNOWN224", 1, opcodeInvalid}, + OpUnknown225: {OpUnknown225, "OP_UNKNOWN225", 1, opcodeInvalid}, + OpUnknown226: {OpUnknown226, "OP_UNKNOWN226", 1, opcodeInvalid}, + OpUnknown227: {OpUnknown227, "OP_UNKNOWN227", 1, opcodeInvalid}, + OpUnknown228: {OpUnknown228, "OP_UNKNOWN228", 1, opcodeInvalid}, + OpUnknown229: {OpUnknown229, "OP_UNKNOWN229", 1, opcodeInvalid}, + OpUnknown230: {OpUnknown230, "OP_UNKNOWN230", 1, opcodeInvalid}, + OpUnknown231: {OpUnknown231, "OP_UNKNOWN231", 1, opcodeInvalid}, + OpUnknown232: {OpUnknown232, "OP_UNKNOWN232", 1, opcodeInvalid}, + OpUnknown233: {OpUnknown233, "OP_UNKNOWN233", 1, opcodeInvalid}, + OpUnknown234: {OpUnknown234, "OP_UNKNOWN234", 1, opcodeInvalid}, + OpUnknown235: {OpUnknown235, "OP_UNKNOWN235", 1, opcodeInvalid}, + OpUnknown236: {OpUnknown236, "OP_UNKNOWN236", 1, opcodeInvalid}, + OpUnknown237: {OpUnknown237, "OP_UNKNOWN237", 1, opcodeInvalid}, + OpUnknown238: {OpUnknown238, "OP_UNKNOWN238", 1, opcodeInvalid}, + OpUnknown239: {OpUnknown239, "OP_UNKNOWN239", 1, opcodeInvalid}, + OpUnknown240: {OpUnknown240, "OP_UNKNOWN240", 1, opcodeInvalid}, + OpUnknown241: {OpUnknown241, "OP_UNKNOWN241", 1, opcodeInvalid}, + OpUnknown242: {OpUnknown242, "OP_UNKNOWN242", 1, opcodeInvalid}, + OpUnknown243: {OpUnknown243, "OP_UNKNOWN243", 1, opcodeInvalid}, + OpUnknown244: {OpUnknown244, "OP_UNKNOWN244", 1, opcodeInvalid}, + OpUnknown245: {OpUnknown245, "OP_UNKNOWN245", 1, opcodeInvalid}, + OpUnknown246: {OpUnknown246, "OP_UNKNOWN246", 1, opcodeInvalid}, + OpUnknown247: {OpUnknown247, "OP_UNKNOWN247", 1, opcodeInvalid}, + OpUnknown248: {OpUnknown248, "OP_UNKNOWN248", 1, opcodeInvalid}, + OpUnknown249: {OpUnknown249, "OP_UNKNOWN249", 1, opcodeInvalid}, + + OpSmallInteger: {OpSmallInteger, "OP_SMALLINTEGER", 1, opcodeInvalid}, + OpPubKeys: {OpPubKeys, "OP_PUBKEYS", 1, opcodeInvalid}, + OpUnknown252: {OpUnknown252, "OP_UNKNOWN252", 1, opcodeInvalid}, + OpPubKeyHash: {OpPubKeyHash, "OP_PUBKEYHASH", 1, opcodeInvalid}, + OpPubKey: {OpPubKey, "OP_PUBKEY", 1, opcodeInvalid}, + + OpInvalidOpCode: {OpInvalidOpCode, "OP_INVALIDOPCODE", 1, opcodeInvalid}, +} + +// opcodeOnelineRepls defines opcode names which are replaced when doing a +// one-line disassembly. This is done to match the output of the reference +// implementation while not changing the opcode names in the nicer full +// disassembly. +var opcodeOnelineRepls = map[string]string{ + "OP_1NEGATE": "-1", + "OP_0": "0", + "OP_1": "1", + "OP_2": "2", + "OP_3": "3", + "OP_4": "4", + "OP_5": "5", + "OP_6": "6", + "OP_7": "7", + "OP_8": "8", + "OP_9": "9", + "OP_10": "10", + "OP_11": "11", + "OP_12": "12", + "OP_13": "13", + "OP_14": "14", + "OP_15": "15", + "OP_16": "16", +} + +// parsedOpcode represents an opcode that has been parsed and includes any +// potential data associated with it. +type parsedOpcode struct { + opcode *opcode + data []byte +} + +// isDisabled returns whether or not the opcode is disabled and thus is always +// bad to see in the instruction stream (even if turned off by a conditional). +func (pop *parsedOpcode) isDisabled() bool { + switch pop.opcode.value { + case OpCat: + return true + case OpSubStr: + return true + case OpLeft: + return true + case OpRight: + return true + case OpInvert: + return true + case OpAnd: + return true + case OpOr: + return true + case OpXor: + return true + case Op2Mul: + return true + case Op2Div: + return true + case OpMul: + return true + case OpDiv: + return true + case OpMod: + return true + case OpLShift: + return true + case OpRShift: + return true + default: + return false + } +} + +// alwaysIllegal returns whether or not the opcode is always illegal when passed +// over by the program counter even if in a non-executed branch (it isn't a +// coincidence that they are conditionals). +func (pop *parsedOpcode) alwaysIllegal() bool { + switch pop.opcode.value { + case OpVerIf: + return true + case OpVerNotIf: + return true + default: + return false + } +} + +// isConditional returns whether or not the opcode is a conditional opcode which +// changes the conditional execution stack when executed. +func (pop *parsedOpcode) isConditional() bool { + switch pop.opcode.value { + case OpIf: + return true + case OpNotIf: + return true + case OpElse: + return true + case OpEndIf: + return true + default: + return false + } +} + +// checkMinimalDataPush returns whether or not the current data push uses the +// smallest possible opcode to represent it. For example, the value 15 could +// be pushed with OP_DATA_1 15 (among other variations); however, OP_15 is a +// single opcode that represents the same value and is only a single byte versus +// two bytes. +func (pop *parsedOpcode) checkMinimalDataPush() error { + data := pop.data + dataLen := len(data) + opcode := pop.opcode.value + + if dataLen == 0 && opcode != Op0 { + str := fmt.Sprintf("zero length data push is encoded with "+ + "opcode %s instead of OP_0", pop.opcode.name) + return scriptError(ErrMinimalData, str) + } else if dataLen == 1 && data[0] >= 1 && data[0] <= 16 { + if opcode != Op1+data[0]-1 { + // Should have used OP_1 .. OP_16 + str := fmt.Sprintf("data push of the value %d encoded "+ + "with opcode %s instead of OP_%d", data[0], + pop.opcode.name, data[0]) + return scriptError(ErrMinimalData, str) + } + } else if dataLen == 1 && data[0] == 0x81 { + if opcode != Op1Negate { + str := fmt.Sprintf("data push of the value -1 encoded "+ + "with opcode %s instead of OP_1NEGATE", + pop.opcode.name) + return scriptError(ErrMinimalData, str) + } + } else if dataLen <= 75 { + if int(opcode) != dataLen { + // Should have used a direct push + str := fmt.Sprintf("data push of %d bytes encoded "+ + "with opcode %s instead of OP_DATA_%d", dataLen, + pop.opcode.name, dataLen) + return scriptError(ErrMinimalData, str) + } + } else if dataLen <= 255 { + if opcode != OpPushData1 { + str := fmt.Sprintf("data push of %d bytes encoded "+ + "with opcode %s instead of OP_PUSHDATA1", + dataLen, pop.opcode.name) + return scriptError(ErrMinimalData, str) + } + } else if dataLen <= 65535 { + if opcode != OpPushData2 { + str := fmt.Sprintf("data push of %d bytes encoded "+ + "with opcode %s instead of OP_PUSHDATA2", + dataLen, pop.opcode.name) + return scriptError(ErrMinimalData, str) + } + } + return nil +} + +// print returns a human-readable string representation of the opcode for use +// in script disassembly. +func (pop *parsedOpcode) print(oneline bool) string { + // The reference implementation one-line disassembly replaces opcodes + // which represent values (e.g. OP_0 through OP_16 and OP_1NEGATE) + // with the raw value. However, when not doing a one-line dissassembly, + // we prefer to show the actual opcode names. Thus, only replace the + // opcodes in question when the oneline flag is set. + opcodeName := pop.opcode.name + if oneline { + if replName, ok := opcodeOnelineRepls[opcodeName]; ok { + opcodeName = replName + } + + // Nothing more to do for non-data push opcodes. + if pop.opcode.length == 1 { + return opcodeName + } + + return fmt.Sprintf("%x", pop.data) + } + + // Nothing more to do for non-data push opcodes. + if pop.opcode.length == 1 { + return opcodeName + } + + // Add length for the OP_PUSHDATA# opcodes. + retString := opcodeName + switch pop.opcode.length { + case -1: + retString += fmt.Sprintf(" 0x%02x", len(pop.data)) + case -2: + retString += fmt.Sprintf(" 0x%04x", len(pop.data)) + case -4: + retString += fmt.Sprintf(" 0x%08x", len(pop.data)) + } + + return fmt.Sprintf("%s 0x%02x", retString, pop.data) +} + +// bytes returns any data associated with the opcode encoded as it would be in +// a script. This is used for unparsing scripts from parsed opcodes. +func (pop *parsedOpcode) bytes() ([]byte, error) { + var retbytes []byte + if pop.opcode.length > 0 { + retbytes = make([]byte, 1, pop.opcode.length) + } else { + retbytes = make([]byte, 1, 1+len(pop.data)- + pop.opcode.length) + } + + retbytes[0] = pop.opcode.value + if pop.opcode.length == 1 { + if len(pop.data) != 0 { + str := fmt.Sprintf("internal consistency error - "+ + "parsed opcode %s has data length %d when %d "+ + "was expected", pop.opcode.name, len(pop.data), + 0) + return nil, scriptError(ErrInternal, str) + } + return retbytes, nil + } + nbytes := pop.opcode.length + if pop.opcode.length < 0 { + l := len(pop.data) + // tempting just to hardcode to avoid the complexity here. + switch pop.opcode.length { + case -1: + retbytes = append(retbytes, byte(l)) + nbytes = int(retbytes[1]) + len(retbytes) + case -2: + retbytes = append(retbytes, byte(l&0xff), + byte(l>>8&0xff)) + nbytes = int(binary.LittleEndian.Uint16(retbytes[1:])) + + len(retbytes) + case -4: + retbytes = append(retbytes, byte(l&0xff), + byte((l>>8)&0xff), byte((l>>16)&0xff), + byte((l>>24)&0xff)) + nbytes = int(binary.LittleEndian.Uint32(retbytes[1:])) + + len(retbytes) + } + } + + retbytes = append(retbytes, pop.data...) + + if len(retbytes) != nbytes { + str := fmt.Sprintf("internal consistency error - "+ + "parsed opcode %s has data length %d when %d was "+ + "expected", pop.opcode.name, len(retbytes), nbytes) + return nil, scriptError(ErrInternal, str) + } + + return retbytes, nil +} + +// ******************************************* +// Opcode implementation functions start here. +// ******************************************* + +// opcodeDisabled is a common handler for disabled opcodes. It returns an +// appropriate error indicating the opcode is disabled. While it would +// ordinarily make more sense to detect if the script contains any disabled +// opcodes before executing in an initial parse step, the consensus rules +// dictate the script doesn't fail until the program counter passes over a +// disabled opcode (even when they appear in a branch that is not executed). +func opcodeDisabled(op *parsedOpcode, vm *Engine) error { + str := fmt.Sprintf("attempt to execute disabled opcode %s", + op.opcode.name) + return scriptError(ErrDisabledOpcode, str) +} + +// opcodeReserved is a common handler for all reserved opcodes. It returns an +// appropriate error indicating the opcode is reserved. +func opcodeReserved(op *parsedOpcode, vm *Engine) error { + str := fmt.Sprintf("attempt to execute reserved opcode %s", + op.opcode.name) + return scriptError(ErrReservedOpcode, str) +} + +// opcodeInvalid is a common handler for all invalid opcodes. It returns an +// appropriate error indicating the opcode is invalid. +func opcodeInvalid(op *parsedOpcode, vm *Engine) error { + str := fmt.Sprintf("attempt to execute invalid opcode %s", + op.opcode.name) + return scriptError(ErrReservedOpcode, str) +} + +// opcodeFalse pushes an empty array to the data stack to represent false. Note +// that 0, when encoded as a number according to the numeric encoding consensus +// rules, is an empty array. +func opcodeFalse(op *parsedOpcode, vm *Engine) error { + vm.dstack.PushByteArray(nil) + return nil +} + +// opcodePushData is a common handler for the vast majority of opcodes that push +// raw data (bytes) to the data stack. +func opcodePushData(op *parsedOpcode, vm *Engine) error { + vm.dstack.PushByteArray(op.data) + return nil +} + +// opcode1Negate pushes -1, encoded as a number, to the data stack. +func opcode1Negate(op *parsedOpcode, vm *Engine) error { + vm.dstack.PushInt(scriptNum(-1)) + return nil +} + +// opcodeN is a common handler for the small integer data push opcodes. It +// pushes the numeric value the opcode represents (which will be from 1 to 16) +// onto the data stack. +func opcodeN(op *parsedOpcode, vm *Engine) error { + // The opcodes are all defined consecutively, so the numeric value is + // the difference. + vm.dstack.PushInt(scriptNum((op.opcode.value - (Op1 - 1)))) + return nil +} + +// opcodeNop is a common handler for the NOP family of opcodes. As the name +// implies it generally does nothing, however, it will return an error when +// the flag to discourage use of NOPs is set for select opcodes. +func opcodeNop(op *parsedOpcode, vm *Engine) error { + return nil +} + +// popIfBool enforces the "minimal if" policy. In order to +// eliminate an additional source of nuisance malleability, we +// require the following: for OP_IF and OP_NOTIF, the top stack item MUST +// either be an empty byte slice, or [0x01]. Otherwise, the item at the top of +// the stack will be popped and interpreted as a boolean. +func popIfBool(vm *Engine) (bool, error) { + so, err := vm.dstack.PopByteArray() + if err != nil { + return false, err + } + + if len(so) == 1 && so[0] == 0x01 { + return true, nil + } + + if len(so) == 0 { + return false, nil + } + + str := fmt.Sprintf("with OP_IF or OP_NOTIF top stack item MUST "+ + "be an empty byte array or 0x01, and is instead: %x", + so) + return false, scriptError(ErrMinimalIf, str) +} + +// opcodeIf treats the top item on the data stack as a boolean and removes it. +// +// An appropriate entry is added to the conditional stack depending on whether +// the boolean is true and whether this if is on an executing branch in order +// to allow proper execution of further opcodes depending on the conditional +// logic. When the boolean is true, the first branch will be executed (unless +// this opcode is nested in a non-executed branch). +// +// if [statements] [else [statements]] endif +// +// Note that, unlike for all non-conditional opcodes, this is executed even when +// it is on a non-executing branch so proper nesting is maintained. +// +// Data stack transformation: [... bool] -> [...] +// Conditional stack transformation: [...] -> [... OpCondValue] +func opcodeIf(op *parsedOpcode, vm *Engine) error { + condVal := OpCondFalse + if vm.isBranchExecuting() { + ok, err := popIfBool(vm) + + if err != nil { + return err + } + + if ok { + condVal = OpCondTrue + } + } else { + condVal = OpCondSkip + } + vm.condStack = append(vm.condStack, condVal) + return nil +} + +// opcodeNotIf treats the top item on the data stack as a boolean and removes +// it. +// +// An appropriate entry is added to the conditional stack depending on whether +// the boolean is true and whether this if is on an executing branch in order +// to allow proper execution of further opcodes depending on the conditional +// logic. When the boolean is false, the first branch will be executed (unless +// this opcode is nested in a non-executed branch). +// +// notif [statements] [else [statements]] endif +// +// Note that, unlike for all non-conditional opcodes, this is executed even when +// it is on a non-executing branch so proper nesting is maintained. +// +// Data stack transformation: [... bool] -> [...] +// Conditional stack transformation: [...] -> [... OpCondValue] +func opcodeNotIf(op *parsedOpcode, vm *Engine) error { + condVal := OpCondFalse + if vm.isBranchExecuting() { + ok, err := popIfBool(vm) + if err != nil { + return err + } + + if !ok { + condVal = OpCondTrue + } + } else { + condVal = OpCondSkip + } + vm.condStack = append(vm.condStack, condVal) + return nil +} + +// opcodeElse inverts conditional execution for other half of if/else/endif. +// +// An error is returned if there has not already been a matching OP_IF. +// +// Conditional stack transformation: [... OpCondValue] -> [... !OpCondValue] +func opcodeElse(op *parsedOpcode, vm *Engine) error { + if len(vm.condStack) == 0 { + str := fmt.Sprintf("encountered opcode %s with no matching "+ + "opcode to begin conditional execution", op.opcode.name) + return scriptError(ErrUnbalancedConditional, str) + } + + conditionalIdx := len(vm.condStack) - 1 + switch vm.condStack[conditionalIdx] { + case OpCondTrue: + vm.condStack[conditionalIdx] = OpCondFalse + case OpCondFalse: + vm.condStack[conditionalIdx] = OpCondTrue + case OpCondSkip: + // Value doesn't change in skip since it indicates this opcode + // is nested in a non-executed branch. + } + return nil +} + +// opcodeEndif terminates a conditional block, removing the value from the +// conditional execution stack. +// +// An error is returned if there has not already been a matching OP_IF. +// +// Conditional stack transformation: [... OpCondValue] -> [...] +func opcodeEndif(op *parsedOpcode, vm *Engine) error { + if len(vm.condStack) == 0 { + str := fmt.Sprintf("encountered opcode %s with no matching "+ + "opcode to begin conditional execution", op.opcode.name) + return scriptError(ErrUnbalancedConditional, str) + } + + vm.condStack = vm.condStack[:len(vm.condStack)-1] + return nil +} + +// abstractVerify examines the top item on the data stack as a boolean value and +// verifies it evaluates to true. An error is returned either when there is no +// item on the stack or when that item evaluates to false. In the latter case +// where the verification fails specifically due to the top item evaluating +// to false, the returned error will use the passed error code. +func abstractVerify(op *parsedOpcode, vm *Engine, c ErrorCode) error { + verified, err := vm.dstack.PopBool() + if err != nil { + return err + } + + if !verified { + str := fmt.Sprintf("%s failed", op.opcode.name) + return scriptError(c, str) + } + return nil +} + +// opcodeVerify examines the top item on the data stack as a boolean value and +// verifies it evaluates to true. An error is returned if it does not. +func opcodeVerify(op *parsedOpcode, vm *Engine) error { + return abstractVerify(op, vm, ErrVerify) +} + +// opcodeReturn returns an appropriate error since it is always an error to +// return early from a script. +func opcodeReturn(op *parsedOpcode, vm *Engine) error { + return scriptError(ErrEarlyReturn, "script returned early") +} + +func verifyLockTimeWithThreshold(txLockTime, threshold, lockTime uint64) error { + err := verifyLockTimeThreshold(txLockTime, threshold, lockTime) + if err != nil { + return err + } + return verifyLockTime(txLockTime, lockTime) +} + +// checkLockTimeRequirement is a helper function used to validate locktimes. +func verifyLockTime(txLockTime, lockTime uint64) error { + if lockTime > txLockTime { + str := fmt.Sprintf("locktime requirement not satisfied -- "+ + "locktime is greater than the transaction locktime: "+ + "%d > %d", lockTime, txLockTime) + return scriptError(ErrUnsatisfiedLockTime, str) + } + + return nil +} + +// verifyLockTimeThreshold is a helper function used to verify the lockTimes in both the script and transaction have the same type. +func verifyLockTimeThreshold(txLockTime, threshold, lockTime uint64) error { + if !((txLockTime < threshold && lockTime < threshold) || + (txLockTime >= threshold && lockTime >= threshold)) { + str := fmt.Sprintf("mismatched locktime types -- tx locktime "+ + "%d, stack locktime %d", txLockTime, lockTime) + return scriptError(ErrUnsatisfiedLockTime, str) + } + + return nil +} + +// opcodeCheckLockTimeVerify compares the top item on the data stack to the +// LockTime field of the transaction containing the script signature +// validating if the transaction outputs are spendable yet. +func opcodeCheckLockTimeVerify(op *parsedOpcode, vm *Engine) error { + lockTimeBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // Make sure lockTimeBytes is exactly 8 bytes. + // If more - return ErrNumberTooBig + // If less - pad with 0's + if len(lockTimeBytes) > 8 { + str := fmt.Sprintf("lockTime value represented as %x is longer then 8 bytes", lockTimeBytes) + return scriptError(ErrNumberTooBig, str) + } + if len(lockTimeBytes) < 8 { + paddedLockTimeBytes := make([]byte, 8) + copy(paddedLockTimeBytes, lockTimeBytes) + lockTimeBytes = paddedLockTimeBytes + } + stackLockTime := binary.LittleEndian.Uint64(lockTimeBytes) + // The lock time field of a transaction is either a DAA score at + // which the transaction is finalized or a timestamp depending on if the + // value is before the constants.LockTimeThreshold. When it is under the + // threshold it is a DAA score. + err = verifyLockTimeWithThreshold(vm.tx.LockTime, constants.LockTimeThreshold, stackLockTime) + if err != nil { + return err + } + + // The lock time feature can also be disabled, thereby bypassing + // OP_CHECKLOCKTIMEVERIFY, if every transaction input has been finalized by + // setting its sequence to the maximum value (constants.MaxTxInSequenceNum). This + // condition would result in the transaction being allowed into the blockDAG + // making the opcode ineffective. + // + // This condition is prevented by enforcing that the input being used by + // the opcode is unlocked (its sequence number is less than the max + // value). This is sufficient to prove correctness without having to + // check every input. + // + // NOTE: This implies that even if the transaction is not finalized due to + // another input being unlocked, the opcode execution will still fail when the + // input being used by the opcode is locked. + if vm.tx.Inputs[vm.txIdx].Sequence == constants.MaxTxInSequenceNum { + return scriptError(ErrUnsatisfiedLockTime, + "transaction input is finalized") + } + + return nil +} + +// opcodeCheckSequenceVerify compares the top item on the data stack to the +// LockTime field of the transaction containing the script signature +// validating if the transaction outputs are spendable yet. +func opcodeCheckSequenceVerify(op *parsedOpcode, vm *Engine) error { + sequenceBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // Make sure sequenceBytes is exactly 8 bytes. + // If more - return ErrNumberTooBig + // If less - pad with 0's + if len(sequenceBytes) > 8 { + str := fmt.Sprintf("sequence value represented as %x is longer then 8 bytes", sequenceBytes) + return scriptError(ErrNumberTooBig, str) + } + if len(sequenceBytes) < 8 { + paddedSequenceBytes := make([]byte, 8) + copy(paddedSequenceBytes, sequenceBytes) + sequenceBytes = paddedSequenceBytes + } + + // Don't use makeScriptNum here, since sequence is not an actual number, minimal encoding rules don't apply to it, + // and is more convenient to be represented as an unsigned int. + stackSequence := binary.LittleEndian.Uint64(sequenceBytes) + + // To provide for future soft-fork extensibility, if the + // operand has the disabled lock-time flag set, + // CHECKSEQUENCEVERIFY behaves as a NOP. + if stackSequence&constants.SequenceLockTimeDisabled != 0 { + return nil + } + + // Sequence numbers with their most significant bit set are not + // consensus constrained. Testing that the transaction's sequence + // number does not have this bit set prevents using this property + // to get around a CHECKSEQUENCEVERIFY check. + txSequence := vm.tx.Inputs[vm.txIdx].Sequence + if txSequence&constants.SequenceLockTimeDisabled != 0 { + str := fmt.Sprintf("transaction sequence has sequence "+ + "locktime disabled bit set: 0x%x", txSequence) + return scriptError(ErrUnsatisfiedLockTime, str) + } + + // Mask off non-consensus bits before doing comparisons. + maskedTxSequence := txSequence & constants.SequenceLockTimeMask + maskedStackSequence := stackSequence & constants.SequenceLockTimeMask + return verifyLockTime(maskedTxSequence, maskedStackSequence) +} + +// opcodeToAltStack removes the top item from the main data stack and pushes it +// onto the alternate data stack. +// +// Main data stack transformation: [... x1 x2 x3] -> [... x1 x2] +// Alt data stack transformation: [... y1 y2 y3] -> [... y1 y2 y3 x3] +func opcodeToAltStack(op *parsedOpcode, vm *Engine) error { + so, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + vm.astack.PushByteArray(so) + + return nil +} + +// opcodeFromAltStack removes the top item from the alternate data stack and +// pushes it onto the main data stack. +// +// Main data stack transformation: [... x1 x2 x3] -> [... x1 x2 x3 y3] +// Alt data stack transformation: [... y1 y2 y3] -> [... y1 y2] +func opcodeFromAltStack(op *parsedOpcode, vm *Engine) error { + so, err := vm.astack.PopByteArray() + if err != nil { + return err + } + vm.dstack.PushByteArray(so) + + return nil +} + +// opcode2Drop removes the top 2 items from the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1] +func opcode2Drop(op *parsedOpcode, vm *Engine) error { + return vm.dstack.DropN(2) +} + +// opcode2Dup duplicates the top 2 items on the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2 x3 x2 x3] +func opcode2Dup(op *parsedOpcode, vm *Engine) error { + return vm.dstack.DupN(2) +} + +// opcode3Dup duplicates the top 3 items on the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2 x3 x1 x2 x3] +func opcode3Dup(op *parsedOpcode, vm *Engine) error { + return vm.dstack.DupN(3) +} + +// opcode2Over duplicates the 2 items before the top 2 items on the data stack. +// +// Stack transformation: [... x1 x2 x3 x4] -> [... x1 x2 x3 x4 x1 x2] +func opcode2Over(op *parsedOpcode, vm *Engine) error { + return vm.dstack.OverN(2) +} + +// opcode2Rot rotates the top 6 items on the data stack to the left twice. +// +// Stack transformation: [... x1 x2 x3 x4 x5 x6] -> [... x3 x4 x5 x6 x1 x2] +func opcode2Rot(op *parsedOpcode, vm *Engine) error { + return vm.dstack.RotN(2) +} + +// opcode2Swap swaps the top 2 items on the data stack with the 2 that come +// before them. +// +// Stack transformation: [... x1 x2 x3 x4] -> [... x3 x4 x1 x2] +func opcode2Swap(op *parsedOpcode, vm *Engine) error { + return vm.dstack.SwapN(2) +} + +// opcodeIfDup duplicates the top item of the stack if it is not zero. +// +// Stack transformation (x1==0): [... x1] -> [... x1] +// Stack transformation (x1!=0): [... x1] -> [... x1 x1] +func opcodeIfDup(op *parsedOpcode, vm *Engine) error { + so, err := vm.dstack.PeekByteArray(0) + if err != nil { + return err + } + + // Push copy of data iff it isn't zero + if asBool(so) { + vm.dstack.PushByteArray(so) + } + + return nil +} + +// opcodeDepth pushes the depth of the data stack prior to executing this +// opcode, encoded as a number, onto the data stack. +// +// Stack transformation: [...] -> [... ] +// Example with 2 items: [x1 x2] -> [x1 x2 2] +// Example with 3 items: [x1 x2 x3] -> [x1 x2 x3 3] +func opcodeDepth(op *parsedOpcode, vm *Engine) error { + vm.dstack.PushInt(scriptNum(vm.dstack.Depth())) + return nil +} + +// opcodeDrop removes the top item from the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2] +func opcodeDrop(op *parsedOpcode, vm *Engine) error { + return vm.dstack.DropN(1) +} + +// opcodeDup duplicates the top item on the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2 x3 x3] +func opcodeDup(op *parsedOpcode, vm *Engine) error { + return vm.dstack.DupN(1) +} + +// opcodeNip removes the item before the top item on the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x3] +func opcodeNip(op *parsedOpcode, vm *Engine) error { + return vm.dstack.NipN(1) +} + +// opcodeOver duplicates the item before the top item on the data stack. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2 x3 x2] +func opcodeOver(op *parsedOpcode, vm *Engine) error { + return vm.dstack.OverN(1) +} + +// opcodePick treats the top item on the data stack as an integer and duplicates +// the item on the stack that number of items back to the top. +// +// Stack transformation: [xn ... x2 x1 x0 n] -> [xn ... x2 x1 x0 xn] +// Example with n=1: [x2 x1 x0 1] -> [x2 x1 x0 x1] +// Example with n=2: [x2 x1 x0 2] -> [x2 x1 x0 x2] +func opcodePick(op *parsedOpcode, vm *Engine) error { + val, err := vm.dstack.PopInt() + if err != nil { + return err + } + + return vm.dstack.PickN(val.Int32()) +} + +// opcodeRoll treats the top item on the data stack as an integer and moves +// the item on the stack that number of items back to the top. +// +// Stack transformation: [xn ... x2 x1 x0 n] -> [... x2 x1 x0 xn] +// Example with n=1: [x2 x1 x0 1] -> [x2 x0 x1] +// Example with n=2: [x2 x1 x0 2] -> [x1 x0 x2] +func opcodeRoll(op *parsedOpcode, vm *Engine) error { + val, err := vm.dstack.PopInt() + if err != nil { + return err + } + + return vm.dstack.RollN(val.Int32()) +} + +// opcodeRot rotates the top 3 items on the data stack to the left. +// +// Stack transformation: [... x1 x2 x3] -> [... x2 x3 x1] +func opcodeRot(op *parsedOpcode, vm *Engine) error { + return vm.dstack.RotN(1) +} + +// opcodeSwap swaps the top two items on the stack. +// +// Stack transformation: [... x1 x2] -> [... x2 x1] +func opcodeSwap(op *parsedOpcode, vm *Engine) error { + return vm.dstack.SwapN(1) +} + +// opcodeTuck inserts a duplicate of the top item of the data stack before the +// second-to-top item. +// +// Stack transformation: [... x1 x2] -> [... x2 x1 x2] +func opcodeTuck(op *parsedOpcode, vm *Engine) error { + return vm.dstack.Tuck() +} + +// opcodeSize pushes the size of the top item of the data stack onto the data +// stack. +// +// Stack transformation: [... x1] -> [... x1 len(x1)] +func opcodeSize(op *parsedOpcode, vm *Engine) error { + so, err := vm.dstack.PeekByteArray(0) + if err != nil { + return err + } + + vm.dstack.PushInt(scriptNum(len(so))) + return nil +} + +// opcodeEqual removes the top 2 items of the data stack, compares them as raw +// bytes, and pushes the result, encoded as a boolean, back to the stack. +// +// Stack transformation: [... x1 x2] -> [... bool] +func opcodeEqual(op *parsedOpcode, vm *Engine) error { + a, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + b, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + vm.dstack.PushBool(bytes.Equal(a, b)) + return nil +} + +// opcodeEqualVerify is a combination of opcodeEqual and opcodeVerify. +// Specifically, it removes the top 2 items of the data stack, compares them, +// and pushes the result, encoded as a boolean, back to the stack. Then, it +// examines the top item on the data stack as a boolean value and verifies it +// evaluates to true. An error is returned if it does not. +// +// Stack transformation: [... x1 x2] -> [... bool] -> [...] +func opcodeEqualVerify(op *parsedOpcode, vm *Engine) error { + err := opcodeEqual(op, vm) + if err == nil { + err = abstractVerify(op, vm, ErrEqualVerify) + } + return err +} + +// opcode1Add treats the top item on the data stack as an integer and replaces +// it with its incremented value (plus 1). +// +// Stack transformation: [... x1 x2] -> [... x1 x2+1] +func opcode1Add(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + + vm.dstack.PushInt(m + 1) + return nil +} + +// opcode1Sub treats the top item on the data stack as an integer and replaces +// it with its decremented value (minus 1). +// +// Stack transformation: [... x1 x2] -> [... x1 x2-1] +func opcode1Sub(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + vm.dstack.PushInt(m - 1) + + return nil +} + +// opcodeNegate treats the top item on the data stack as an integer and replaces +// it with its negation. +// +// Stack transformation: [... x1 x2] -> [... x1 -x2] +func opcodeNegate(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + + vm.dstack.PushInt(-m) + return nil +} + +// opcodeAbs treats the top item on the data stack as an integer and replaces it +// it with its absolute value. +// +// Stack transformation: [... x1 x2] -> [... x1 abs(x2)] +func opcodeAbs(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if m < 0 { + m = -m + } + vm.dstack.PushInt(m) + return nil +} + +// opcodeNot treats the top item on the data stack as an integer and replaces +// it with its "inverted" value (0 becomes 1, non-zero becomes 0). +// +// NOTE: While it would probably make more sense to treat the top item as a +// boolean, and push the opposite, which is really what the intention of this +// opcode is, it is extremely important that is not done because integers are +// interpreted differently than booleans and the consensus rules for this opcode +// dictate the item is interpreted as an integer. +// +// Stack transformation (x2==0): [... x1 0] -> [... x1 1] +// Stack transformation (x2!=0): [... x1 1] -> [... x1 0] +// Stack transformation (x2!=0): [... x1 17] -> [... x1 0] +func opcodeNot(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if m == 0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + return nil +} + +// opcode0NotEqual treats the top item on the data stack as an integer and +// replaces it with either a 0 if it is zero, or a 1 if it is not zero. +// +// Stack transformation (x2==0): [... x1 0] -> [... x1 0] +// Stack transformation (x2!=0): [... x1 1] -> [... x1 1] +// Stack transformation (x2!=0): [... x1 17] -> [... x1 1] +func opcode0NotEqual(op *parsedOpcode, vm *Engine) error { + m, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if m != 0 { + m = 1 + } + vm.dstack.PushInt(m) + return nil +} + +// opcodeAdd treats the top two items on the data stack as integers and replaces +// them with their sum. +// +// Stack transformation: [... x1 x2] -> [... x1+x2] +func opcodeAdd(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + vm.dstack.PushInt(v0 + v1) + return nil +} + +// opcodeSub treats the top two items on the data stack as integers and replaces +// them with the result of subtracting the top entry from the second-to-top +// entry. +// +// Stack transformation: [... x1 x2] -> [... x1-x2] +func opcodeSub(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + vm.dstack.PushInt(v1 - v0) + return nil +} + +// opcodeBoolAnd treats the top two items on the data stack as integers. When +// both of them are not zero, they are replaced with a 1, otherwise a 0. +// +// Stack transformation (x1==0, x2==0): [... 0 0] -> [... 0] +// Stack transformation (x1!=0, x2==0): [... 5 0] -> [... 0] +// Stack transformation (x1==0, x2!=0): [... 0 7] -> [... 0] +// Stack transformation (x1!=0, x2!=0): [... 4 8] -> [... 1] +func opcodeBoolAnd(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v0 != 0 && v1 != 0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeBoolOr treats the top two items on the data stack as integers. When +// either of them are not zero, they are replaced with a 1, otherwise a 0. +// +// Stack transformation (x1==0, x2==0): [... 0 0] -> [... 0] +// Stack transformation (x1!=0, x2==0): [... 5 0] -> [... 1] +// Stack transformation (x1==0, x2!=0): [... 0 7] -> [... 1] +// Stack transformation (x1!=0, x2!=0): [... 4 8] -> [... 1] +func opcodeBoolOr(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v0 != 0 || v1 != 0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeNumEqual treats the top two items on the data stack as integers. When +// they are equal, they are replaced with a 1, otherwise a 0. +// +// Stack transformation (x1==x2): [... 5 5] -> [... 1] +// Stack transformation (x1!=x2): [... 5 7] -> [... 0] +func opcodeNumEqual(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v0 == v1 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeNumEqualVerify is a combination of opcodeNumEqual and opcodeVerify. +// +// Specifically, treats the top two items on the data stack as integers. When +// they are equal, they are replaced with a 1, otherwise a 0. Then, it examines +// the top item on the data stack as a boolean value and verifies it evaluates +// to true. An error is returned if it does not. +// +// Stack transformation: [... x1 x2] -> [... bool] -> [...] +func opcodeNumEqualVerify(op *parsedOpcode, vm *Engine) error { + err := opcodeNumEqual(op, vm) + if err == nil { + err = abstractVerify(op, vm, ErrNumEqualVerify) + } + return err +} + +// opcodeNumNotEqual treats the top two items on the data stack as integers. +// When they are NOT equal, they are replaced with a 1, otherwise a 0. +// +// Stack transformation (x1==x2): [... 5 5] -> [... 0] +// Stack transformation (x1!=x2): [... 5 7] -> [... 1] +func opcodeNumNotEqual(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v0 != v1 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeLessThan treats the top two items on the data stack as integers. When +// the second-to-top item is less than the top item, they are replaced with a 1, +// otherwise a 0. +// +// Stack transformation: [... x1 x2] -> [... bool] +func opcodeLessThan(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 < v0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeGreaterThan treats the top two items on the data stack as integers. +// When the second-to-top item is greater than the top item, they are replaced +// with a 1, otherwise a 0. +// +// Stack transformation: [... x1 x2] -> [... bool] +func opcodeGreaterThan(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 > v0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + return nil +} + +// opcodeLessThanOrEqual treats the top two items on the data stack as integers. +// When the second-to-top item is less than or equal to the top item, they are +// replaced with a 1, otherwise a 0. +// +// Stack transformation: [... x1 x2] -> [... bool] +func opcodeLessThanOrEqual(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 <= v0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + return nil +} + +// opcodeGreaterThanOrEqual treats the top two items on the data stack as +// integers. When the second-to-top item is greater than or equal to the top +// item, they are replaced with a 1, otherwise a 0. +// +// Stack transformation: [... x1 x2] -> [... bool] +func opcodeGreaterThanOrEqual(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 >= v0 { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + + return nil +} + +// opcodeMin treats the top two items on the data stack as integers and replaces +// them with the minimum of the two. +// +// Stack transformation: [... x1 x2] -> [... min(x1, x2)] +func opcodeMin(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 < v0 { + vm.dstack.PushInt(v1) + } else { + vm.dstack.PushInt(v0) + } + + return nil +} + +// opcodeMax treats the top two items on the data stack as integers and replaces +// them with the maximum of the two. +// +// Stack transformation: [... x1 x2] -> [... max(x1, x2)] +func opcodeMax(op *parsedOpcode, vm *Engine) error { + v0, err := vm.dstack.PopInt() + if err != nil { + return err + } + + v1, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if v1 > v0 { + vm.dstack.PushInt(v1) + } else { + vm.dstack.PushInt(v0) + } + + return nil +} + +// opcodeWithin treats the top 3 items on the data stack as integers. When the +// value to test is within the specified range (left inclusive), they are +// replaced with a 1, otherwise a 0. +// +// The top item is the max value, the second-top-item is the minimum value, and +// the third-to-top item is the value to test. +// +// Stack transformation: [... x1 min max] -> [... bool] +func opcodeWithin(op *parsedOpcode, vm *Engine) error { + maxVal, err := vm.dstack.PopInt() + if err != nil { + return err + } + + minVal, err := vm.dstack.PopInt() + if err != nil { + return err + } + + x, err := vm.dstack.PopInt() + if err != nil { + return err + } + + if x >= minVal && x < maxVal { + vm.dstack.PushInt(scriptNum(1)) + } else { + vm.dstack.PushInt(scriptNum(0)) + } + return nil +} + +// calcHash calculates the hash of hasher over buf. +func calcHash(buf []byte, hasher hash.Hash) []byte { + hasher.Write(buf) + return hasher.Sum(nil) +} + +// opcodeSha256 treats the top item of the data stack as raw bytes and replaces +// it with sha256(data). +// +// Stack transformation: [... x1] -> [... sha256(x1)] +func opcodeSha256(op *parsedOpcode, vm *Engine) error { + buf, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + hash := sha256.Sum256(buf) + vm.dstack.PushByteArray(hash[:]) + return nil +} + +// opcodeBlake2b treats the top item of the data stack as raw bytes and replaces +// it with blake2b(data). +// +// Stack transformation: [... x1] -> [... blake2b(x1)] +func opcodeBlake2b(op *parsedOpcode, vm *Engine) error { + buf, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + hash := blake2b.Sum256(buf) + vm.dstack.PushByteArray(hash[:]) + return nil +} + +// opcodeCheckSig treats the top 2 items on the stack as a public key and a +// signature and replaces them with a bool which indicates if the signature was +// successfully verified. +// +// The process of verifying a signature requires calculating a signature hash in +// the same way the transaction signer did. It involves hashing portions of the +// transaction based on the hash type byte (which is the final byte of the +// signature) and the script. +// Once this "script hash" is calculated, the signature is checked using standard +// cryptographic methods against the provided public key. +// +// Stack transformation: [... signature pubkey] -> [... bool] +func opcodeCheckSig(op *parsedOpcode, vm *Engine) error { + pkBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + fullSigBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // The signature actually needs needs to be longer than this, but at + // least 1 byte is needed for the hash type below. The full length is + // checked depending on the script flags and upon parsing the signature. + if len(fullSigBytes) < 1 { + vm.dstack.PushBool(false) + return nil + } + + // Trim off hashtype from the signature string and check if the + // signature and pubkey conform to the strict encoding requirements + // depending on the flags. + // + // NOTE: When the strict encoding flags are set, any errors in the + // signature or public encoding here result in an immediate script error + // (and thus no result bool is pushed to the data stack). This differs + // from the logic below where any errors in parsing the signature is + // treated as the signature failure resulting in false being pushed to + // the data stack. This is required because the more general script + // validation consensus rules do not have the new strict encoding + // requirements enabled by the flags. + hashType := consensushashing.SigHashType(fullSigBytes[len(fullSigBytes)-1]) + sigBytes := fullSigBytes[:len(fullSigBytes)-1] + if !hashType.IsStandardSigHashType() { + return scriptError(ErrInvalidSigHashType, fmt.Sprintf("invalid hash type 0x%x", hashType)) + } + if err := vm.checkSignatureLength(sigBytes); err != nil { + return err + } + if err := vm.checkPubKeyEncoding(pkBytes); err != nil { + return err + } + + // Generate the signature hash based on the signature hash type. + sigHash, err := consensushashing.CalculateSignatureHashSchnorr(&vm.tx, vm.txIdx, hashType, vm.sigHashReusedValues) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + pubKey, err := secp256k1.DeserializeSchnorrPubKey(pkBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + signature, err := secp256k1.DeserializeSchnorrSignatureFromSlice(sigBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + var valid bool + secpHash := secp256k1.Hash(*sigHash.ByteArray()) + if vm.sigCache != nil { + + valid = vm.sigCache.Exists(secpHash, signature, pubKey) + if !valid && pubKey.SchnorrVerify(&secpHash, signature) { + vm.sigCache.Add(secpHash, signature, pubKey) + valid = true + } + } else { + valid = pubKey.SchnorrVerify(&secpHash, signature) + } + + if !valid && len(sigBytes) > 0 { + str := "signature not empty on failed checksig" + return scriptError(ErrNullFail, str) + } + + vm.dstack.PushBool(valid) + return nil +} + +func opcodeCheckSigECDSA(op *parsedOpcode, vm *Engine) error { + pkBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + fullSigBytes, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + + // The signature actually needs needs to be longer than this, but at + // least 1 byte is needed for the hash type below. The full length is + // checked depending on the script flags and upon parsing the signature. + if len(fullSigBytes) < 1 { + vm.dstack.PushBool(false) + return nil + } + + // Trim off hashtype from the signature string and check if the + // signature and pubkey conform to the strict encoding requirements + // depending on the flags. + // + // NOTE: When the strict encoding flags are set, any errors in the + // signature or public encoding here result in an immediate script error + // (and thus no result bool is pushed to the data stack). This differs + // from the logic below where any errors in parsing the signature is + // treated as the signature failure resulting in false being pushed to + // the data stack. This is required because the more general script + // validation consensus rules do not have the new strict encoding + // requirements enabled by the flags. + hashType := consensushashing.SigHashType(fullSigBytes[len(fullSigBytes)-1]) + sigBytes := fullSigBytes[:len(fullSigBytes)-1] + if !hashType.IsStandardSigHashType() { + return scriptError(ErrInvalidSigHashType, fmt.Sprintf("invalid hash type 0x%x", hashType)) + } + if err := vm.checkSignatureLengthECDSA(sigBytes); err != nil { + return err + } + if err := vm.checkPubKeyEncodingECDSA(pkBytes); err != nil { + return err + } + + // Generate the signature hash based on the signature hash type. + sigHash, err := consensushashing.CalculateSignatureHashECDSA(&vm.tx, vm.txIdx, hashType, vm.sigHashReusedValues) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + pubKey, err := secp256k1.DeserializeECDSAPubKey(pkBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + signature, err := secp256k1.DeserializeECDSASignatureFromSlice(sigBytes) + if err != nil { + vm.dstack.PushBool(false) + return nil + } + + var valid bool + secpHash := secp256k1.Hash(*sigHash.ByteArray()) + if vm.sigCacheECDSA != nil { + + valid = vm.sigCacheECDSA.Exists(secpHash, signature, pubKey) + if !valid && pubKey.ECDSAVerify(&secpHash, signature) { + vm.sigCacheECDSA.Add(secpHash, signature, pubKey) + valid = true + } + } else { + valid = pubKey.ECDSAVerify(&secpHash, signature) + } + + if !valid && len(sigBytes) > 0 { + str := "signature not empty on failed checksig" + return scriptError(ErrNullFail, str) + } + + vm.dstack.PushBool(valid) + return nil +} + +// opcodeCheckSigVerify is a combination of opcodeCheckSig and opcodeVerify. +// The opcodeCheckSig function is invoked followed by opcodeVerify. See the +// documentation for each of those opcodes for more details. +// +// Stack transformation: signature pubkey] -> [... bool] -> [...] +func opcodeCheckSigVerify(op *parsedOpcode, vm *Engine) error { + err := opcodeCheckSig(op, vm) + if err == nil { + err = abstractVerify(op, vm, ErrCheckSigVerify) + } + return err +} + +// parsedSigInfo houses a raw signature along with its parsed form and a flag +// for whether or not it has already been parsed. It is used to prevent parsing +// the same signature multiple times when verifying a multisig. +type parsedSigInfo struct { + signature []byte + parsedSignature *secp256k1.SchnorrSignature + parsed bool +} + +type parsedSigInfoECDSA struct { + signature []byte + parsedSignature *secp256k1.ECDSASignature + parsed bool +} + +// opcodeCheckMultiSig treats the top item on the stack as an integer number of +// public keys, followed by that many entries as raw data representing the public +// keys, followed by the integer number of signatures, followed by that many +// entries as raw data representing the signatures. +// +// All of the aforementioned stack items are replaced with a bool which +// indicates if the requisite number of signatures were successfully verified. +// +// See the opcodeCheckSigVerify documentation for more details about the process +// for verifying each signature. +// +// Stack transformation: +// [... [sig ...] numsigs [pubkey ...] numpubkeys] -> [... bool] +func opcodeCheckMultiSig(op *parsedOpcode, vm *Engine) error { + numKeys, err := vm.dstack.PopInt() + if err != nil { + return err + } + + numPubKeys := int(numKeys.Int32()) + if numPubKeys < 0 { + str := fmt.Sprintf("number of pubkeys %d is negative", + numPubKeys) + return scriptError(ErrInvalidPubKeyCount, str) + } + if numPubKeys > MaxPubKeysPerMultiSig { + str := fmt.Sprintf("too many pubkeys: %d > %d", + numPubKeys, MaxPubKeysPerMultiSig) + return scriptError(ErrInvalidPubKeyCount, str) + } + vm.numOps += numPubKeys + if vm.numOps > MaxOpsPerScript { + str := fmt.Sprintf("exceeded max operation limit of %d", + MaxOpsPerScript) + return scriptError(ErrTooManyOperations, str) + } + + pubKeys := make([][]byte, 0, numPubKeys) + for i := 0; i < numPubKeys; i++ { + pubKey, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + pubKeys = append(pubKeys, pubKey) + } + + numSigs, err := vm.dstack.PopInt() + if err != nil { + return err + } + numSignatures := int(numSigs.Int32()) + if numSignatures < 0 { + str := fmt.Sprintf("number of signatures %d is negative", + numSignatures) + return scriptError(ErrInvalidSignatureCount, str) + + } + if numSignatures > numPubKeys { + str := fmt.Sprintf("more signatures than pubkeys: %d > %d", + numSignatures, numPubKeys) + return scriptError(ErrInvalidSignatureCount, str) + } + + signatures := make([]*parsedSigInfo, 0, numSignatures) + for i := 0; i < numSignatures; i++ { + signature, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + sigInfo := &parsedSigInfo{signature: signature} + signatures = append(signatures, sigInfo) + } + + success := true + numPubKeys++ + pubKeyIdx := -1 + signatureIdx := 0 + + for numSignatures > 0 { + // When there are more signatures than public keys remaining, + // there is no way to succeed since too many signatures are + // invalid, so exit early. + pubKeyIdx++ + numPubKeys-- + if numSignatures > numPubKeys { + success = false + break + } + + sigInfo := signatures[signatureIdx] + pubKey := pubKeys[pubKeyIdx] + + // The order of the signature and public key evaluation is + // important here since it can be distinguished by an + // OP_CHECKMULTISIG NOT when the strict encoding flag is set. + + rawSig := sigInfo.signature + if len(rawSig) == 0 { + // Skip to the next pubkey if signature is empty. + continue + } + + // Split the signature into hash type and signature components. + hashType := consensushashing.SigHashType(rawSig[len(rawSig)-1]) + signature := rawSig[:len(rawSig)-1] + + // Only parse and check the signature encoding once. + var parsedSig *secp256k1.SchnorrSignature + if !sigInfo.parsed { + if !hashType.IsStandardSigHashType() { + return scriptError(ErrInvalidSigHashType, fmt.Sprintf("invalid hash type 0x%x", hashType)) + } + if err := vm.checkSignatureLength(signature); err != nil { + return err + } + + // Parse the signature. + parsedSig, err = secp256k1.DeserializeSchnorrSignatureFromSlice(signature) + sigInfo.parsed = true + if err != nil { + continue + } + + sigInfo.parsedSignature = parsedSig + } else { + // Skip to the next pubkey if the signature is invalid. + if sigInfo.parsedSignature == nil { + continue + } + + // Use the already parsed signature. + parsedSig = sigInfo.parsedSignature + } + + if err := vm.checkPubKeyEncoding(pubKey); err != nil { + return err + } + + // Parse the pubkey. + parsedPubKey, err := secp256k1.DeserializeSchnorrPubKey(pubKey) + if err != nil { + continue + } + + // Generate the signature hash based on the signature hash type. + sigHash, err := consensushashing.CalculateSignatureHashSchnorr(&vm.tx, vm.txIdx, hashType, vm.sigHashReusedValues) + if err != nil { + return err + } + + secpHash := secp256k1.Hash(*sigHash.ByteArray()) + var valid bool + if vm.sigCache != nil { + valid = vm.sigCache.Exists(secpHash, parsedSig, parsedPubKey) + if !valid && parsedPubKey.SchnorrVerify(&secpHash, parsedSig) { + vm.sigCache.Add(secpHash, parsedSig, parsedPubKey) + valid = true + } + } else { + valid = parsedPubKey.SchnorrVerify(&secpHash, parsedSig) + } + + if valid { + // PubKey verified, move on to the next signature. + signatureIdx++ + numSignatures-- + } + } + + if !success { + for _, sig := range signatures { + if len(sig.signature) > 0 { + str := "not all signatures empty on failed checkmultisig" + return scriptError(ErrNullFail, str) + } + } + } + + vm.dstack.PushBool(success) + return nil +} + +func opcodeCheckMultiSigECDSA(op *parsedOpcode, vm *Engine) error { + numKeys, err := vm.dstack.PopInt() + if err != nil { + return err + } + + numPubKeys := int(numKeys.Int32()) + if numPubKeys < 0 { + str := fmt.Sprintf("number of pubkeys %d is negative", + numPubKeys) + return scriptError(ErrInvalidPubKeyCount, str) + } + if numPubKeys > MaxPubKeysPerMultiSig { + str := fmt.Sprintf("too many pubkeys: %d > %d", + numPubKeys, MaxPubKeysPerMultiSig) + return scriptError(ErrInvalidPubKeyCount, str) + } + vm.numOps += numPubKeys + if vm.numOps > MaxOpsPerScript { + str := fmt.Sprintf("exceeded max operation limit of %d", + MaxOpsPerScript) + return scriptError(ErrTooManyOperations, str) + } + + pubKeys := make([][]byte, 0, numPubKeys) + for i := 0; i < numPubKeys; i++ { + pubKey, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + pubKeys = append(pubKeys, pubKey) + } + + numSigs, err := vm.dstack.PopInt() + if err != nil { + return err + } + numSignatures := int(numSigs.Int32()) + if numSignatures < 0 { + str := fmt.Sprintf("number of signatures %d is negative", + numSignatures) + return scriptError(ErrInvalidSignatureCount, str) + + } + if numSignatures > numPubKeys { + str := fmt.Sprintf("more signatures than pubkeys: %d > %d", + numSignatures, numPubKeys) + return scriptError(ErrInvalidSignatureCount, str) + } + + signatures := make([]*parsedSigInfoECDSA, 0, numSignatures) + for i := 0; i < numSignatures; i++ { + signature, err := vm.dstack.PopByteArray() + if err != nil { + return err + } + sigInfo := &parsedSigInfoECDSA{signature: signature} + signatures = append(signatures, sigInfo) + } + + success := true + numPubKeys++ + pubKeyIdx := -1 + signatureIdx := 0 + + for numSignatures > 0 { + // When there are more signatures than public keys remaining, + // there is no way to succeed since too many signatures are + // invalid, so exit early. + pubKeyIdx++ + numPubKeys-- + if numSignatures > numPubKeys { + success = false + break + } + + sigInfo := signatures[signatureIdx] + pubKey := pubKeys[pubKeyIdx] + + // The order of the signature and public key evaluation is + // important here since it can be distinguished by an + // OP_CHECKMULTISIG NOT when the strict encoding flag is set. + + rawSig := sigInfo.signature + if len(rawSig) == 0 { + // Skip to the next pubkey if signature is empty. + continue + } + + // Split the signature into hash type and signature components. + hashType := consensushashing.SigHashType(rawSig[len(rawSig)-1]) + signature := rawSig[:len(rawSig)-1] + + // Only parse and check the signature encoding once. + var parsedSig *secp256k1.ECDSASignature + if !sigInfo.parsed { + if !hashType.IsStandardSigHashType() { + return scriptError(ErrInvalidSigHashType, fmt.Sprintf("invalid hash type 0x%x", hashType)) + } + if err := vm.checkSignatureLengthECDSA(signature); err != nil { + return err + } + + // Parse the signature. + parsedSig, err = secp256k1.DeserializeECDSASignatureFromSlice(signature) + sigInfo.parsed = true + if err != nil { + continue + } + + sigInfo.parsedSignature = parsedSig + } else { + // Skip to the next pubkey if the signature is invalid. + if sigInfo.parsedSignature == nil { + continue + } + + // Use the already parsed signature. + parsedSig = sigInfo.parsedSignature + } + + if err := vm.checkPubKeyEncodingECDSA(pubKey); err != nil { + return err + } + + // Parse the pubkey. + parsedPubKey, err := secp256k1.DeserializeECDSAPubKey(pubKey) + if err != nil { + continue + } + + // Generate the signature hash based on the signature hash type. + sigHash, err := consensushashing.CalculateSignatureHashECDSA(&vm.tx, vm.txIdx, hashType, vm.sigHashReusedValues) + if err != nil { + return err + } + + secpHash := secp256k1.Hash(*sigHash.ByteArray()) + var valid bool + if vm.sigCacheECDSA != nil { + valid = vm.sigCacheECDSA.Exists(secpHash, parsedSig, parsedPubKey) + if !valid && parsedPubKey.ECDSAVerify(&secpHash, parsedSig) { + vm.sigCacheECDSA.Add(secpHash, parsedSig, parsedPubKey) + valid = true + } + } else { + valid = parsedPubKey.ECDSAVerify(&secpHash, parsedSig) + } + + if valid { + // PubKey verified, move on to the next signature. + signatureIdx++ + numSignatures-- + } + } + + if !success { + for _, sig := range signatures { + if len(sig.signature) > 0 { + str := "not all signatures empty on failed checkmultisig" + return scriptError(ErrNullFail, str) + } + } + } + + vm.dstack.PushBool(success) + return nil +} + +// opcodeCheckMultiSigVerify is a combination of opcodeCheckMultiSig and +// opcodeVerify. The opcodeCheckMultiSig is invoked followed by opcodeVerify. +// See the documentation for each of those opcodes for more details. +// +// Stack transformation: +// [... [sig ...] numsigs [pubkey ...] numpubkeys] -> [... bool] -> [...] +func opcodeCheckMultiSigVerify(op *parsedOpcode, vm *Engine) error { + err := opcodeCheckMultiSig(op, vm) + if err == nil { + err = abstractVerify(op, vm, ErrCheckMultiSigVerify) + } + return err +} + +// OpcodeByName is a map that can be used to lookup an opcode by its +// human-readable name (OP_CHECKMULTISIG, OP_CHECKSIG, etc). +var OpcodeByName = make(map[string]byte) + +func init() { + // Initialize the opcode name to value map using the contents of the + // opcode array. Also add entries for "OP_FALSE" and "OP_TRUE" + // since they are aliases for "OP_0" and "OP_1" respectively. + for _, op := range opcodeArray { + OpcodeByName[op.name] = op.value + } + OpcodeByName["OP_FALSE"] = OpFalse + OpcodeByName["OP_TRUE"] = OpTrue +} diff --git a/domain/consensus/utils/txscript/opcode_test.go b/domain/consensus/utils/txscript/opcode_test.go new file mode 100644 index 0000000..c52217b --- /dev/null +++ b/domain/consensus/utils/txscript/opcode_test.go @@ -0,0 +1,183 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "testing" +) + +// TestOpcodeDisabled tests the opcodeDisabled function manually because all +// disabled opcodes result in a script execution failure when executed normally, +// so the function is not called under normal circumstances. +func TestOpcodeDisabled(t *testing.T) { + t.Parallel() + + tests := []byte{OpCat, OpSubStr, OpLeft, OpRight, OpInvert, + OpAnd, OpOr, Op2Mul, Op2Div, OpMul, OpDiv, OpMod, + OpLShift, OpRShift, + } + for _, opcodeVal := range tests { + pop := parsedOpcode{opcode: &opcodeArray[opcodeVal], data: nil} + err := opcodeDisabled(&pop, nil) + if !IsErrorCode(err, ErrDisabledOpcode) { + t.Errorf("opcodeDisabled: unexpected error - got %v, "+ + "want %v", err, ErrDisabledOpcode) + continue + } + } +} + +// TestOpcodeDisasm tests the print function for all opcodes in both the oneline +// and full modes to ensure it provides the expected disassembly. +func TestOpcodeDisasm(t *testing.T) { + t.Parallel() + + // First, test the oneline disassembly. + + // The expected strings for the data push opcodes are replaced in the + // test loops below since they involve repeating bytes. Also, the + // OP_NOP# and OP_UNKNOWN# are replaced below too, since it's easier + // than manually listing them here. + oneBytes := []byte{0x01} + oneStr := "01" + expectedStrings := [256]string{0x00: "0", 0x4f: "-1", + 0x50: "OP_RESERVED", 0x61: "OP_NOP", 0x62: "OP_VER", + 0x63: "OP_IF", 0x64: "OP_NOTIF", 0x65: "OP_VERIF", + 0x66: "OP_VERNOTIF", 0x67: "OP_ELSE", 0x68: "OP_ENDIF", + 0x69: "OP_VERIFY", 0x6a: "OP_RETURN", 0x6b: "OP_TOALTSTACK", + 0x6c: "OP_FROMALTSTACK", 0x6d: "OP_2DROP", 0x6e: "OP_2DUP", + 0x6f: "OP_3DUP", 0x70: "OP_2OVER", 0x71: "OP_2ROT", + 0x72: "OP_2SWAP", 0x73: "OP_IFDUP", 0x74: "OP_DEPTH", + 0x75: "OP_DROP", 0x76: "OP_DUP", 0x77: "OP_NIP", + 0x78: "OP_OVER", 0x79: "OP_PICK", 0x7a: "OP_ROLL", + 0x7b: "OP_ROT", 0x7c: "OP_SWAP", 0x7d: "OP_TUCK", + 0x7e: "OP_CAT", 0x7f: "OP_SUBSTR", 0x80: "OP_LEFT", + 0x81: "OP_RIGHT", 0x82: "OP_SIZE", 0x83: "OP_INVERT", + 0x84: "OP_AND", 0x85: "OP_OR", 0x86: "OP_XOR", + 0x87: "OP_EQUAL", 0x88: "OP_EQUALVERIFY", 0x89: "OP_RESERVED1", + 0x8a: "OP_RESERVED2", 0x8b: "OP_1ADD", 0x8c: "OP_1SUB", + 0x8d: "OP_2MUL", 0x8e: "OP_2DIV", 0x8f: "OP_NEGATE", + 0x90: "OP_ABS", 0x91: "OP_NOT", 0x92: "OP_0NOTEQUAL", + 0x93: "OP_ADD", 0x94: "OP_SUB", 0x95: "OP_MUL", 0x96: "OP_DIV", + 0x97: "OP_MOD", 0x98: "OP_LSHIFT", 0x99: "OP_RSHIFT", + 0x9a: "OP_BOOLAND", 0x9b: "OP_BOOLOR", 0x9c: "OP_NUMEQUAL", + 0x9d: "OP_NUMEQUALVERIFY", 0x9e: "OP_NUMNOTEQUAL", + 0x9f: "OP_LESSTHAN", 0xa0: "OP_GREATERTHAN", + 0xa1: "OP_LESSTHANOREQUAL", 0xa2: "OP_GREATERTHANOREQUAL", + 0xa3: "OP_MIN", 0xa4: "OP_MAX", 0xa5: "OP_WITHIN", + 0xa8: "OP_SHA256", 0xa9: "OP_CHECKMULTISIGECDSA", + 0xaa: "OP_BLAKE2B", + 0xab: "OP_CHECKSIGECDSA", 0xac: "OP_CHECKSIG", 0xad: "OP_CHECKSIGVERIFY", + 0xae: "OP_CHECKMULTISIG", 0xaf: "OP_CHECKMULTISIGVERIFY", + 0xb0: "OP_CHECKLOCKTIMEVERIFY", 0xb1: "OP_CHECKSEQUENCEVERIFY", + 0xfa: "OP_SMALLINTEGER", 0xfb: "OP_PUBKEYS", + 0xfd: "OP_PUBKEYHASH", 0xfe: "OP_PUBKEY", + 0xff: "OP_INVALIDOPCODE", + } + for opcodeVal, expectedStr := range expectedStrings { + var data []byte + switch { + // OP_DATA_1 through OP_DATA_65 display the pushed data. + case opcodeVal >= 0x01 && opcodeVal < 0x4c: + data = bytes.Repeat(oneBytes, opcodeVal) + expectedStr = strings.Repeat(oneStr, opcodeVal) + + // OP_PUSHDATA1. + case opcodeVal == 0x4c: + data = bytes.Repeat(oneBytes, 1) + expectedStr = strings.Repeat(oneStr, 1) + + // OP_PUSHDATA2. + case opcodeVal == 0x4d: + data = bytes.Repeat(oneBytes, 2) + expectedStr = strings.Repeat(oneStr, 2) + + // OP_PUSHDATA4. + case opcodeVal == 0x4e: + data = bytes.Repeat(oneBytes, 3) + expectedStr = strings.Repeat(oneStr, 3) + + // OP_1 through OP_16 display the numbers themselves. + case opcodeVal >= 0x51 && opcodeVal <= 0x60: + val := byte(opcodeVal - (0x51 - 1)) + data = []byte{val} + expectedStr = strconv.Itoa(int(val)) + + // OP_UNKNOWN#. + case isOpUnknown(opcodeVal): + expectedStr = "OP_UNKNOWN" + strconv.Itoa(int(opcodeVal)) + } + + pop := parsedOpcode{opcode: &opcodeArray[opcodeVal], data: data} + gotStr := pop.print(true) + if gotStr != expectedStr { + t.Errorf("pop.print (opcode %x): Unexpected disasm "+ + "string - got %v, want %v", opcodeVal, gotStr, + expectedStr) + continue + } + } + + // Now, replace the relevant fields and test the full disassembly. + expectedStrings[0x00] = "OP_0" + expectedStrings[0x4f] = "OP_1NEGATE" + for opcodeVal, expectedStr := range expectedStrings { + var data []byte + switch { + // OP_DATA_1 through OP_DATA_65 display the opcode followed by + // the pushed data. + case opcodeVal >= 0x01 && opcodeVal < 0x4c: + data = bytes.Repeat(oneBytes, opcodeVal) + expectedStr = fmt.Sprintf("OP_DATA_%d 0x%s", opcodeVal, + strings.Repeat(oneStr, opcodeVal)) + + // OP_PUSHDATA1. + case opcodeVal == 0x4c: + data = bytes.Repeat(oneBytes, 1) + expectedStr = fmt.Sprintf("OP_PUSHDATA1 0x%02x 0x%s", + len(data), strings.Repeat(oneStr, 1)) + + // OP_PUSHDATA2. + case opcodeVal == 0x4d: + data = bytes.Repeat(oneBytes, 2) + expectedStr = fmt.Sprintf("OP_PUSHDATA2 0x%04x 0x%s", + len(data), strings.Repeat(oneStr, 2)) + + // OP_PUSHDATA4. + case opcodeVal == 0x4e: + data = bytes.Repeat(oneBytes, 3) + expectedStr = fmt.Sprintf("OP_PUSHDATA4 0x%08x 0x%s", + len(data), strings.Repeat(oneStr, 3)) + + // OP_1 through OP_16. + case opcodeVal >= 0x51 && opcodeVal <= 0x60: + val := byte(opcodeVal - (0x51 - 1)) + data = []byte{val} + expectedStr = "OP_" + strconv.Itoa(int(val)) + + // OP_UNKNOWN#. + case isOpUnknown(opcodeVal): + expectedStr = "OP_UNKNOWN" + strconv.Itoa(int(opcodeVal)) + } + + pop := parsedOpcode{opcode: &opcodeArray[opcodeVal], data: data} + gotStr := pop.print(false) + if gotStr != expectedStr { + t.Errorf("pop.print (opcode %x): Unexpected disasm "+ + "string - got %v, want %v", opcodeVal, gotStr, + expectedStr) + continue + } + } +} + +func isOpUnknown(opcodeVal int) bool { + return opcodeVal >= 0xb2 && opcodeVal <= 0xf9 || opcodeVal == 0xfc || + opcodeVal == 0xa6 || opcodeVal == 0xa7 +} diff --git a/domain/consensus/utils/txscript/reference_test.go b/domain/consensus/utils/txscript/reference_test.go new file mode 100644 index 0000000..03e253f --- /dev/null +++ b/domain/consensus/utils/txscript/reference_test.go @@ -0,0 +1,402 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "strconv" + "strings" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// scriptTestName returns a descriptive test name for the given reference script +// test data. +func scriptTestName(test []interface{}) (string, error) { + // The test must consist of a signature script, public key script, flags, + // and expected error. Finally, it may optionally contain a comment. + if len(test) < 4 || len(test) > 5 { + return "", errors.Errorf("invalid test length %d", len(test)) + } + + // Use the comment for the test name if one is specified, otherwise, + // construct the name based on the signature script, public key script, + // and flags. + var name string + if len(test) == 5 { + name = fmt.Sprintf("test (%s)", test[4]) + } else { + name = fmt.Sprintf("test ([%s, %s, %s])", test[0], + test[1], test[2]) + } + return name, nil +} + +// parse hex string into a []byte. +func parseHex(tok string) ([]byte, error) { + if !strings.HasPrefix(tok, "0x") { + return nil, errors.New("not a hex number") + } + return hex.DecodeString(tok[2:]) +} + +// shortFormOps holds a map of opcode names to values for use in short form +// parsing. It is declared here so it only needs to be created once. +var shortFormOps map[string]byte + +// parseShortForm parses a string into a script as follows: +// - Opcodes other than the push opcodes and unknown are present as +// either OP_NAME or just NAME +// - Plain numbers are made into push operations +// - Numbers beginning with 0x are inserted into the []byte as-is (so +// 0x14 is OP_DATA_20) +// - Single quoted strings are pushed as data +// - Anything else is an error +func parseShortForm(script string, version uint16) ([]byte, error) { + if version > constants.MaxScriptPublicKeyVersion { + return nil, errors.Errorf("unknown version %d (max: %d)", + version, constants.MaxScriptPublicKeyVersion) + } + + // Only create the short form opcode map once. + if shortFormOps == nil { + ops := make(map[string]byte) + for opcodeName, opcodeValue := range OpcodeByName { + if strings.Contains(opcodeName, "OP_UNKNOWN") { + continue + } + ops[opcodeName] = opcodeValue + + // The opcodes named OP_# can't have the OP_ prefix + // stripped or they would conflict with the plain + // numbers. Also, since OP_FALSE and OP_TRUE are + // aliases for the OP_0, and OP_1, respectively, they + // have the same value, so detect those by name and + // allow them. + if (opcodeName == "OP_FALSE" || opcodeName == "OP_TRUE") || + (opcodeValue != Op0 && (opcodeValue < Op1 || + opcodeValue > Op16)) { + + ops[strings.TrimPrefix(opcodeName, "OP_")] = opcodeValue + } + } + shortFormOps = ops + } + + // Split only does one separator so convert all \n and tab into space. + script = strings.Replace(script, "\n", " ", -1) + script = strings.Replace(script, "\t", " ", -1) + tokens := strings.Split(script, " ") + builder := NewScriptBuilder() + + for _, tok := range tokens { + if len(tok) == 0 { + continue + } + // if parses as a plain number + if num, err := strconv.ParseInt(tok, 10, 64); err == nil { + builder.AddInt64(num) + continue + } else if bts, err := parseHex(tok); err == nil { + // Concatenate the bytes manually since the test code + // intentionally creates scripts that are too large and + // would cause the builder to error otherwise. + if builder.err == nil { + builder.script = append(builder.script, bts...) + } + } else if len(tok) >= 2 && + tok[0] == '\'' && tok[len(tok)-1] == '\'' { + builder.AddFullData([]byte(tok[1 : len(tok)-1])) + } else if opcode, ok := shortFormOps[tok]; ok { + builder.AddOp(opcode) + } else { + return nil, errors.Errorf("bad token %q", tok) + } + + } + return builder.Script() +} + +// parseScriptFlags parses the provided flags string from the format used in the +// reference tests into ScriptFlags suitable for use in the script engine. +func parseScriptFlags(flagStr string) (ScriptFlags, error) { + var flags ScriptFlags + + sFlags := strings.Split(flagStr, ",") + for _, flag := range sFlags { + switch flag { + case "": + // Nothing. + default: + return flags, errors.Errorf("invalid flag: %s", flag) + } + } + return flags, nil +} + +// parseExpectedResult parses the provided expected result string into allowed +// script error codes. An error is returned if the expected result string is +// not supported. +func parseExpectedResult(expected string) ([]ErrorCode, error) { + switch expected { + case "OK": + return nil, nil + case "UNKNOWN_ERROR": + return []ErrorCode{ErrNumberTooBig, ErrMinimalData}, nil + case "PUBKEYFORMAT": + return []ErrorCode{ErrPubKeyFormat}, nil + case "EVAL_FALSE": + return []ErrorCode{ErrEvalFalse, ErrEmptyStack}, nil + case "EMPTY_STACK": + return []ErrorCode{ErrEmptyStack}, nil + case "EQUALVERIFY": + return []ErrorCode{ErrEqualVerify}, nil + case "NULLFAIL": + return []ErrorCode{ErrNullFail}, nil + case "SIG_HIGH_S": + return []ErrorCode{ErrSigHighS}, nil + case "SIG_HASHTYPE": + return []ErrorCode{ErrInvalidSigHashType}, nil + case "SIG_PUSHONLY": + return []ErrorCode{ErrNotPushOnly}, nil + case "CLEANSTACK": + return []ErrorCode{ErrCleanStack}, nil + case "BAD_OPCODE": + return []ErrorCode{ErrReservedOpcode, ErrMalformedPush}, nil + case "UNBALANCED_CONDITIONAL": + return []ErrorCode{ErrUnbalancedConditional, + ErrInvalidStackOperation}, nil + case "OP_RETURN": + return []ErrorCode{ErrEarlyReturn}, nil + case "VERIFY": + return []ErrorCode{ErrVerify}, nil + case "INVALID_STACK_OPERATION", "INVALID_ALTSTACK_OPERATION": + return []ErrorCode{ErrInvalidStackOperation}, nil + case "DISABLED_OPCODE": + return []ErrorCode{ErrDisabledOpcode}, nil + case "PUSH_SIZE": + return []ErrorCode{ErrElementTooBig}, nil + case "OP_COUNT": + return []ErrorCode{ErrTooManyOperations}, nil + case "STACK_SIZE": + return []ErrorCode{ErrStackOverflow}, nil + case "SCRIPT_SIZE": + return []ErrorCode{ErrScriptTooBig}, nil + case "PUBKEY_COUNT": + return []ErrorCode{ErrInvalidPubKeyCount}, nil + case "SIG_COUNT": + return []ErrorCode{ErrInvalidSignatureCount}, nil + case "MINIMALDATA": + return []ErrorCode{ErrMinimalData}, nil + case "NEGATIVE_LOCKTIME": + return []ErrorCode{ErrNegativeLockTime}, nil + case "UNSATISFIED_LOCKTIME": + return []ErrorCode{ErrUnsatisfiedLockTime}, nil + case "MINIMALIF": + return []ErrorCode{ErrMinimalIf}, nil + } + + return nil, errors.Errorf("unrecognized expected result in test data: %v", + expected) +} + +// createSpendTx generates a basic spending transaction given the passed +// signature and public key scripts. +func createSpendingTx(sigScript []byte, scriptPubKey *externalapi.ScriptPublicKey) *externalapi.DomainTransaction { + outpoint := externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: ^uint32(0), + } + input := &externalapi.DomainTransactionInput{ + PreviousOutpoint: outpoint, + SignatureScript: []byte{Op0, Op0}, + Sequence: constants.MaxTxInSequenceNum, + } + output := &externalapi.DomainTransactionOutput{Value: 0, ScriptPublicKey: scriptPubKey} + coinbaseTx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: []*externalapi.DomainTransactionOutput{output}, + } + + outpoint = externalapi.DomainOutpoint{ + TransactionID: *consensushashing.TransactionID(coinbaseTx), + Index: 0, + } + input = &externalapi.DomainTransactionInput{ + PreviousOutpoint: outpoint, + SignatureScript: sigScript, + Sequence: constants.MaxTxInSequenceNum, + } + output = &externalapi.DomainTransactionOutput{Value: 0, ScriptPublicKey: nil} + spendingTx := &externalapi.DomainTransaction{ + Version: 1, + Inputs: []*externalapi.DomainTransactionInput{input}, + Outputs: []*externalapi.DomainTransactionOutput{output}, + } + + return spendingTx +} + +// testScripts ensures all of the passed script tests execute with the expected +// results with or without using a signature cache, as specified by the +// parameter. +func testScripts(t *testing.T, tests [][]interface{}, useSigCache bool) { + // Create a signature cache to use only if requested. + var sigCache *SigCache + var sigCacheECDSA *SigCacheECDSA + if useSigCache { + sigCache = NewSigCache(10) + sigCacheECDSA = NewSigCacheECDSA(10) + } + + for i, test := range tests { + // "Format is: [[wit..., amount]?, scriptSig, scriptPubKey, + // flags, expected_scripterror, ... comments]" + + // Skip single line comments. + if len(test) == 1 { + continue + } + + // Construct a name for the test based on the comment and test + // data. + name, err := scriptTestName(test) + if err != nil { + t.Errorf("TestScripts: invalid test #%d: %v", i, err) + continue + } + + // Extract and parse the signature script from the test fields. + scriptSigStr, ok := test[0].(string) + if !ok { + t.Errorf("%s: signature script is not a string", name) + continue + } + scriptSig, err := parseShortForm(scriptSigStr, 0) + if err != nil { + t.Errorf("%s: can't parse signature script: %v", name, + err) + continue + } + + // Extract and parse the public key script from the test fields. + scriptPubKeyStr, ok := test[1].(string) + if !ok { + t.Errorf("%s: public key script is not a string", name) + continue + } + script, err := parseShortForm(scriptPubKeyStr, 0) + if err != nil { + t.Errorf("%s: can't parse public key script: %v", name, + err) + continue + } + scriptPubKey := &externalapi.ScriptPublicKey{Script: script, Version: 0} + + // Extract and parse the script flags from the test fields. + flagsStr, ok := test[2].(string) + if !ok { + t.Errorf("%s: flags field is not a string", name) + continue + } + flags, err := parseScriptFlags(flagsStr) + if err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Extract and parse the expected result from the test fields. + // + // Convert the expected result string into the allowed script + // error codes. This is necessary because txscript is more + // fine grained with its errors than the reference test data, so + // some of the reference test data errors map to more than one + // possibility. + resultStr, ok := test[3].(string) + if !ok { + t.Errorf("%s: result field is not a string", name) + continue + } + allowedErrorCodes, err := parseExpectedResult(resultStr) + if err != nil { + t.Errorf("%s: %v", name, err) + continue + } + + // Generate a transaction pair such that one spends from the + // other and the provided signature and public key scripts are + // used, then create a new engine to execute the scripts. + tx := createSpendingTx(scriptSig, scriptPubKey) + + vm, err := NewEngine(scriptPubKey, tx, 0, flags, sigCache, sigCacheECDSA, &consensushashing.SighashReusedValues{}) + if err == nil { + err = vm.Execute() + } + + // Ensure there were no errors when the expected result is OK. + if resultStr == "OK" { + if err != nil { + t.Errorf("%s failed to execute: %v", name, err) + } + continue + } + + // At this point an error was expected so ensure the result of + // the execution matches it. + success := false + for _, code := range allowedErrorCodes { + if IsErrorCode(err, code) { + success = true + break + } + } + if !success { + var scriptErr Error + if ok := errors.As(err, &scriptErr); ok { + t.Errorf("%s: want error codes %v, got %v", name, + allowedErrorCodes, scriptErr.ErrorCode) + continue + } + t.Errorf("%s: want error codes %v, got err: %v (%T)", + name, allowedErrorCodes, err, err) + continue + } + } +} + +// TestScripts ensures all of the tests in script_tests.json execute with the +// expected results as defined in the test data. +func TestScripts(t *testing.T) { + file, err := ioutil.ReadFile("data/script_tests.json") + if err != nil { + t.Fatalf("TestScripts: %v\n", err) + } + + var tests [][]interface{} + err = json.Unmarshal(file, &tests) + if err != nil { + t.Fatalf("TestScripts couldn't Unmarshal: %v", err) + } + + // Disable non-test logs + logLevel := log.Level() + log.SetLevel(logger.LevelOff) + defer log.SetLevel(logLevel) + + // Run all script tests with and without the signature cache. + testScripts(t, tests, true) + testScripts(t, tests, false) +} diff --git a/domain/consensus/utils/txscript/script.go b/domain/consensus/utils/txscript/script.go new file mode 100644 index 0000000..1641f9b --- /dev/null +++ b/domain/consensus/utils/txscript/script.go @@ -0,0 +1,333 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// These are the constants specified for maximums in individual scripts. +const ( + MaxOpsPerScript = 201 // Max number of non-push operations. + MaxPubKeysPerMultiSig = 20 // Multisig can't have more sigs than this. + MaxScriptElementSize = 520 // Max bytes pushable to the stack. +) + +// isSmallInt returns whether or not the opcode is considered a small integer, +// which is an OP_0, or OP_1 through OP_16. +func isSmallInt(op *opcode) bool { + if op.value == Op0 || (op.value >= Op1 && op.value <= Op16) { + return true + } + return false +} + +// isScriptHash returns true if the script passed is a pay-to-script-hash +// transaction, false otherwise. +func isScriptHash(pops []parsedOpcode) bool { + return len(pops) == 3 && + pops[0].opcode.value == OpBlake2b && + pops[1].opcode.value == OpData32 && + pops[2].opcode.value == OpEqual +} + +// IsPayToScriptHash returns true if the script is in the standard +// pay-to-script-hash (P2SH) format, false otherwise. +func IsPayToScriptHash(script *externalapi.ScriptPublicKey) bool { + pops, err := parseScript(script.Script) + if err != nil { + return false + } + return isScriptHash(pops) +} + +// isPushOnly returns true if the script only pushes data, false otherwise. +func isPushOnly(pops []parsedOpcode) bool { + // NOTE: This function does NOT verify opcodes directly since it is + // internal and is only called with parsed opcodes for scripts that did + // not have any parse errors. Thus, consensus is properly maintained. + + for _, pop := range pops { + // All opcodes up to OP_16 are data push instructions. + // NOTE: This does consider OP_RESERVED to be a data push + // instruction, but execution of OP_RESERVED will fail anyways + // and matches the behavior required by consensus. + if pop.opcode.value > Op16 { + return false + } + } + return true +} + +// parseScriptTemplate is the same as parseScript but allows the passing of the +// template list for testing purposes. When there are parse errors, it returns +// the list of parsed opcodes up to the point of failure along with the error. +func parseScriptTemplate(script []byte, opcodes *[256]opcode) ([]parsedOpcode, error) { + retScript := make([]parsedOpcode, 0, len(script)) + for i := 0; i < len(script); { + instr := script[i] + op := &opcodes[instr] + pop := parsedOpcode{opcode: op} + + // Parse data out of instruction. + switch { + // No additional data. Note that some of the opcodes, notably + // OP_1NEGATE, OP_0, and OP_[1-16] represent the data + // themselves. + case op.length == 1: + i++ + + // Data pushes of specific lengths -- OP_DATA_[1-75]. + case op.length > 1: + if len(script[i:]) < op.length { + str := fmt.Sprintf("opcode %s requires %d "+ + "bytes, but script only has %d remaining", + op.name, op.length, len(script[i:])) + return retScript, scriptError(ErrMalformedPush, + str) + } + + // Slice out the data. + pop.data = script[i+1 : i+op.length] + i += op.length + + // Data pushes with parsed lengths -- OP_PUSHDATAP{1,2,4}. + case op.length < 0: + var l uint + off := i + 1 + + if len(script[off:]) < -op.length { + str := fmt.Sprintf("opcode %s requires %d "+ + "bytes, but script only has %d remaining", + op.name, -op.length, len(script[off:])) + return retScript, scriptError(ErrMalformedPush, + str) + } + + // Next -length bytes are little endian length of data. + switch op.length { + case -1: + l = uint(script[off]) + case -2: + l = ((uint(script[off+1]) << 8) | + uint(script[off])) + case -4: + l = ((uint(script[off+3]) << 24) | + (uint(script[off+2]) << 16) | + (uint(script[off+1]) << 8) | + uint(script[off])) + default: + str := fmt.Sprintf("invalid opcode length %d", + op.length) + return retScript, scriptError(ErrMalformedPush, + str) + } + + // Move offset to beginning of the data. + off += -op.length + + // Disallow entries that do not fit script or were + // sign extended. + if int(l) > len(script[off:]) || int(l) < 0 { + str := fmt.Sprintf("opcode %s pushes %d bytes, "+ + "but script only has %d remaining", + op.name, int(l), len(script[off:])) + return retScript, scriptError(ErrMalformedPush, + str) + } + + pop.data = script[off : off+int(l)] + i += 1 - op.length + int(l) + } + + retScript = append(retScript, pop) + } + + return retScript, nil +} + +// parseScript preparses the script in bytes into a list of parsedOpcodes while +// applying a number of sanity checks. +func parseScript(script []byte) ([]parsedOpcode, error) { + return parseScriptTemplate(script, &opcodeArray) +} + +// unparseScript reversed the action of parseScript and returns the +// parsedOpcodes as a list of bytes +func unparseScript(pops []parsedOpcode) ([]byte, error) { + script := make([]byte, 0, len(pops)) + for _, pop := range pops { + b, err := pop.bytes() + if err != nil { + return nil, err + } + script = append(script, b...) + } + return script, nil +} + +// DisasmString formats a disassembled script for one line printing. When the +// script fails to parse, the returned string will contain the disassembled +// script up to the point the failure occurred along with the string '[error]' +// appended. In addition, the reason the script failed to parse is returned +// if the caller wants more information about the failure. +func DisasmString(version uint16, buf []byte) (string, error) { + // currently, there is only one version exists so it equals to the max version. + if version == constants.MaxScriptPublicKeyVersion { + var disbuf bytes.Buffer + opcodes, err := parseScript(buf) + for _, pop := range opcodes { + disbuf.WriteString(pop.print(true)) + disbuf.WriteByte(' ') + } + if disbuf.Len() > 0 { + disbuf.Truncate(disbuf.Len() - 1) + } + if err != nil { + disbuf.WriteString("[error]") + } + return disbuf.String(), err + } + return "", scriptError(ErrPubKeyFormat, "the version of the scriptPublicHash is higher then the known version") +} + +// canonicalPush returns true if the object is either not a push instruction +// or the push instruction contained wherein is matches the canonical form +// or using the smallest instruction to do the job. False otherwise. +func canonicalPush(pop parsedOpcode) bool { + opcode := pop.opcode.value + data := pop.data + dataLen := len(pop.data) + if opcode > Op16 { + return true + } + + if opcode < OpPushData1 && opcode > Op0 && (dataLen == 1 && data[0] <= 16) { + return false + } + if opcode == OpPushData1 && dataLen < OpPushData1 { + return false + } + if opcode == OpPushData2 && dataLen <= 0xff { + return false + } + if opcode == OpPushData4 && dataLen <= 0xffff { + return false + } + return true +} + +// asSmallInt returns the passed opcode, which must be true according to +// isSmallInt(), as an integer. +func asSmallInt(op *opcode) int { + if op.value == Op0 { + return 0 + } + + return int(op.value - (Op1 - 1)) +} + +// getSigOpCount is the implementation function for counting the number of +// signature operations in the script provided by pops. If precise mode is +// requested then we attempt to count the number of operations for a multisig +// op. Otherwise we use the maximum. +func getSigOpCount(pops []parsedOpcode, precise bool) int { + nSigs := 0 + for i, pop := range pops { + switch pop.opcode.value { + case OpCheckSig, OpCheckSigVerify, OpCheckSigECDSA: + nSigs++ + case OpCheckMultiSig, OpCheckMultiSigVerify, OpCheckMultiSigECDSA: + // If we are being precise then look for familiar + // patterns for multisig, for now all we recognize is + // OP_1 - OP_16 to signify the number of pubkeys. + // Otherwise, we use the max of 20. + if precise && i > 0 && + pops[i-1].opcode.value >= Op1 && + pops[i-1].opcode.value <= Op16 { + nSigs += asSmallInt(pops[i-1].opcode) + } else { + nSigs += MaxPubKeysPerMultiSig + } + default: + // Not a sigop. + } + } + + return nSigs +} + +// GetSigOpCount provides a quick count of the number of signature operations +// in a script. a CHECKSIG operations counts for 1, and a CHECK_MULTISIG for 20. +// If the script fails to parse, then the count up to the point of failure is +// returned. +func GetSigOpCount(script []byte) int { + // Don't check error since parseScript returns the parsed-up-to-error + // list of pops. + pops, _ := parseScript(script) + return getSigOpCount(pops, false) +} + +// GetPreciseSigOpCount returns the number of signature operations in +// scriptPubKey. If p2sh is true then scriptSig may be searched for the +// Pay-To-Script-Hash script in order to find the precise number of signature +// operations in the transaction. If the script fails to parse, then the count +// up to the point of failure is returned. +func GetPreciseSigOpCount(scriptSig []byte, scriptPubKey *externalapi.ScriptPublicKey, isP2SH bool) int { + // Don't check error since parseScript returns the parsed-up-to-error + // list of pops. + pops, _ := parseScript(scriptPubKey.Script) + + // Treat non P2SH transactions as normal. + if !(isP2SH && isScriptHash(pops)) { + return getSigOpCount(pops, true) + } + + // The public key script is a pay-to-script-hash, so parse the signature + // script to get the final item. Scripts that fail to fully parse count + // as 0 signature operations. + sigPops, err := parseScript(scriptSig) + if err != nil { + return 0 + } + + // The signature script must only push data to the stack for P2SH to be + // a valid pair, so the signature operation count is 0 when that is not + // the case. + if !isPushOnly(sigPops) || len(sigPops) == 0 { + return 0 + } + + // The P2SH script is the last item the signature script pushes to the + // stack. When the script is empty, there are no signature operations. + shScript := sigPops[len(sigPops)-1].data + if len(shScript) == 0 { + return 0 + } + + // Parse the P2SH script and don't check the error since parseScript + // returns the parsed-up-to-error list of pops and the consensus rules + // dictate signature operations are counted up to the first parse + // failure. + shPops, _ := parseScript(shScript) + return getSigOpCount(shPops, true) +} + +// IsUnspendable returns whether the passed public key script is unspendable, or +// guaranteed to fail at execution. This allows inputs to be pruned instantly +// when entering the UTXO set. +func IsUnspendable(scriptPubKey []byte) bool { + pops, err := parseScript(scriptPubKey) + if err != nil { + return true + } + + return len(pops) > 0 && pops[0].opcode.value == OpReturn +} diff --git a/domain/consensus/utils/txscript/script_test.go b/domain/consensus/utils/txscript/script_test.go new file mode 100644 index 0000000..9c93492 --- /dev/null +++ b/domain/consensus/utils/txscript/script_test.go @@ -0,0 +1,3822 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "encoding/hex" + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// TestParseOpcode tests for opcode parsing with bad data templates. +func TestParseOpcode(t *testing.T) { + // Deep copy the array and make one of the opcodes invalid by setting it + // to the wrong length. + fakeArray := opcodeArray + fakeArray[OpPushData4] = opcode{value: OpPushData4, + name: "OP_PUSHDATA4", length: -8, opfunc: opcodePushData} + + // This script would be fine if -8 was a valid length. + _, err := parseScriptTemplate([]byte{OpPushData4, 0x1, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00}, &fakeArray) + if err == nil { + t.Errorf("no error with dodgy opcode array!") + } +} + +// TestUnparsingInvalidOpcodes tests for errors when unparsing invalid parsed +// opcodes. +func TestUnparsingInvalidOpcodes(t *testing.T) { + tests := []struct { + name string + pop *parsedOpcode + expectedErr error + }{ + { + name: "OP_FALSE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpFalse], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_FALSE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpFalse], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_1 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData1], + data: nil, + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_1", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData1], + data: make([]byte, 1), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_1 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData1], + data: make([]byte, 2), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_2 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData2], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_2", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData2], + data: make([]byte, 2), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_2 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData2], + data: make([]byte, 3), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_3 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData3], + data: make([]byte, 2), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_3", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData3], + data: make([]byte, 3), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_3 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData3], + data: make([]byte, 4), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_4 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData4], + data: make([]byte, 3), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_4", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData4], + data: make([]byte, 4), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_4 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData4], + data: make([]byte, 5), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_5 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData5], + data: make([]byte, 4), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_5", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData5], + data: make([]byte, 5), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_5 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData5], + data: make([]byte, 6), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_6 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData6], + data: make([]byte, 5), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_6", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData6], + data: make([]byte, 6), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_6 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData6], + data: make([]byte, 7), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_7 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData7], + data: make([]byte, 6), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_7", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData7], + data: make([]byte, 7), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_7 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData7], + data: make([]byte, 8), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_8 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData8], + data: make([]byte, 7), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_8", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData8], + data: make([]byte, 8), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_8 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData8], + data: make([]byte, 9), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_9 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData9], + data: make([]byte, 8), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_9", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData9], + data: make([]byte, 9), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_9 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData9], + data: make([]byte, 10), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_10 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData10], + data: make([]byte, 9), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_10", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData10], + data: make([]byte, 10), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_10 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData10], + data: make([]byte, 11), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_11 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData11], + data: make([]byte, 10), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_11", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData11], + data: make([]byte, 11), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_11 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData11], + data: make([]byte, 12), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_12 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData12], + data: make([]byte, 11), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_12", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData12], + data: make([]byte, 12), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_12 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData12], + data: make([]byte, 13), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_13 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData13], + data: make([]byte, 12), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_13", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData13], + data: make([]byte, 13), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_13 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData13], + data: make([]byte, 14), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_14 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData14], + data: make([]byte, 13), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_14", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData14], + data: make([]byte, 14), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_14 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData14], + data: make([]byte, 15), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_15 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData15], + data: make([]byte, 14), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_15", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData15], + data: make([]byte, 15), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_15 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData15], + data: make([]byte, 16), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_16 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData16], + data: make([]byte, 15), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_16", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData16], + data: make([]byte, 16), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_16 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData16], + data: make([]byte, 17), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_17 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData17], + data: make([]byte, 16), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_17", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData17], + data: make([]byte, 17), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_17 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData17], + data: make([]byte, 18), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_18 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData18], + data: make([]byte, 17), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_18", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData18], + data: make([]byte, 18), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_18 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData18], + data: make([]byte, 19), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_19 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData19], + data: make([]byte, 18), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_19", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData19], + data: make([]byte, 19), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_19 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData19], + data: make([]byte, 20), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_20 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData20], + data: make([]byte, 19), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_20", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData20], + data: make([]byte, 20), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_20 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData20], + data: make([]byte, 21), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_21 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData21], + data: make([]byte, 20), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_21", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData21], + data: make([]byte, 21), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_21 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData21], + data: make([]byte, 22), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_22 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData22], + data: make([]byte, 21), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_22", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData22], + data: make([]byte, 22), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_22 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData22], + data: make([]byte, 23), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_23 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData23], + data: make([]byte, 22), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_23", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData23], + data: make([]byte, 23), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_23 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData23], + data: make([]byte, 24), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_24 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData24], + data: make([]byte, 23), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_24", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData24], + data: make([]byte, 24), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_24 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData24], + data: make([]byte, 25), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_25 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData25], + data: make([]byte, 24), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_25", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData25], + data: make([]byte, 25), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_25 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData25], + data: make([]byte, 26), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_26 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData26], + data: make([]byte, 25), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_26", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData26], + data: make([]byte, 26), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_26 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData26], + data: make([]byte, 27), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_27 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData27], + data: make([]byte, 26), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_27", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData27], + data: make([]byte, 27), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_27 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData27], + data: make([]byte, 28), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_28 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData28], + data: make([]byte, 27), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_28", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData28], + data: make([]byte, 28), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_28 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData28], + data: make([]byte, 29), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_29 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData29], + data: make([]byte, 28), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_29", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData29], + data: make([]byte, 29), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_29 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData29], + data: make([]byte, 30), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_30 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData30], + data: make([]byte, 29), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_30", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData30], + data: make([]byte, 30), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_30 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData30], + data: make([]byte, 31), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_31 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData31], + data: make([]byte, 30), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_31", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData31], + data: make([]byte, 31), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_31 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData31], + data: make([]byte, 32), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_32 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData32], + data: make([]byte, 31), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_32", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData32], + data: make([]byte, 32), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_32 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData32], + data: make([]byte, 33), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_33 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData33], + data: make([]byte, 32), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_33", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData33], + data: make([]byte, 33), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_33 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData33], + data: make([]byte, 34), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_34 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData34], + data: make([]byte, 33), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_34", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData34], + data: make([]byte, 34), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_34 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData34], + data: make([]byte, 35), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_35 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData35], + data: make([]byte, 34), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_35", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData35], + data: make([]byte, 35), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_35 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData35], + data: make([]byte, 36), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_36 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData36], + data: make([]byte, 35), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_36", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData36], + data: make([]byte, 36), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_36 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData36], + data: make([]byte, 37), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_37 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData37], + data: make([]byte, 36), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_37", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData37], + data: make([]byte, 37), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_37 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData37], + data: make([]byte, 38), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_38 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData38], + data: make([]byte, 37), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_38", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData38], + data: make([]byte, 38), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_38 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData38], + data: make([]byte, 39), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_39 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData39], + data: make([]byte, 38), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_39", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData39], + data: make([]byte, 39), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_39 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData39], + data: make([]byte, 40), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_40 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData40], + data: make([]byte, 39), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_40", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData40], + data: make([]byte, 40), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_40 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData40], + data: make([]byte, 41), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_41 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData41], + data: make([]byte, 40), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_41", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData41], + data: make([]byte, 41), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_41 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData41], + data: make([]byte, 42), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_42 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData42], + data: make([]byte, 41), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_42", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData42], + data: make([]byte, 42), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_42 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData42], + data: make([]byte, 43), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_43 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData43], + data: make([]byte, 42), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_43", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData43], + data: make([]byte, 43), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_43 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData43], + data: make([]byte, 44), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_44 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData44], + data: make([]byte, 43), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_44", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData44], + data: make([]byte, 44), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_44 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData44], + data: make([]byte, 45), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_45 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData45], + data: make([]byte, 44), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_45", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData45], + data: make([]byte, 45), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_45 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData45], + data: make([]byte, 46), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_46 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData46], + data: make([]byte, 45), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_46", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData46], + data: make([]byte, 46), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_46 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData46], + data: make([]byte, 47), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_47 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData47], + data: make([]byte, 46), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_47", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData47], + data: make([]byte, 47), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_47 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData47], + data: make([]byte, 48), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_48 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData48], + data: make([]byte, 47), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_48", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData48], + data: make([]byte, 48), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_48 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData48], + data: make([]byte, 49), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_49 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData49], + data: make([]byte, 48), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_49", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData49], + data: make([]byte, 49), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_49 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData49], + data: make([]byte, 50), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_50 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData50], + data: make([]byte, 49), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_50", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData50], + data: make([]byte, 50), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_50 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData50], + data: make([]byte, 51), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_51 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData51], + data: make([]byte, 50), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_51", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData51], + data: make([]byte, 51), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_51 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData51], + data: make([]byte, 52), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_52 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData52], + data: make([]byte, 51), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_52", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData52], + data: make([]byte, 52), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_52 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData52], + data: make([]byte, 53), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_53 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData53], + data: make([]byte, 52), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_53", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData53], + data: make([]byte, 53), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_53 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData53], + data: make([]byte, 54), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_54 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData54], + data: make([]byte, 53), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_54", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData54], + data: make([]byte, 54), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_54 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData54], + data: make([]byte, 55), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_55 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData55], + data: make([]byte, 54), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_55", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData55], + data: make([]byte, 55), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_55 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData55], + data: make([]byte, 56), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_56 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData56], + data: make([]byte, 55), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_56", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData56], + data: make([]byte, 56), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_56 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData56], + data: make([]byte, 57), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_57 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData57], + data: make([]byte, 56), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_57", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData57], + data: make([]byte, 57), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_57 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData57], + data: make([]byte, 58), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_58 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData58], + data: make([]byte, 57), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_58", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData58], + data: make([]byte, 58), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_58 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData58], + data: make([]byte, 59), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_59 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData59], + data: make([]byte, 58), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_59", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData59], + data: make([]byte, 59), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_59 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData59], + data: make([]byte, 60), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_60 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData60], + data: make([]byte, 59), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_60", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData60], + data: make([]byte, 60), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_60 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData60], + data: make([]byte, 61), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_61 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData61], + data: make([]byte, 60), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_61", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData61], + data: make([]byte, 61), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_61 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData61], + data: make([]byte, 62), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_62 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData62], + data: make([]byte, 61), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_62", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData62], + data: make([]byte, 62), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_62 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData62], + data: make([]byte, 63), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_63 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData63], + data: make([]byte, 62), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_63", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData63], + data: make([]byte, 63), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_63 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData63], + data: make([]byte, 64), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_64 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData64], + data: make([]byte, 63), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_64", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData64], + data: make([]byte, 64), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_64 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData64], + data: make([]byte, 65), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_65 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData65], + data: make([]byte, 64), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_65", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData65], + data: make([]byte, 65), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_65 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData65], + data: make([]byte, 66), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_66 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData66], + data: make([]byte, 65), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_66", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData66], + data: make([]byte, 66), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_66 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData66], + data: make([]byte, 67), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_67 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData67], + data: make([]byte, 66), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_67", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData67], + data: make([]byte, 67), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_67 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData67], + data: make([]byte, 68), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_68 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData68], + data: make([]byte, 67), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_68", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData68], + data: make([]byte, 68), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_68 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData68], + data: make([]byte, 69), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_69 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData69], + data: make([]byte, 68), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_69", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData69], + data: make([]byte, 69), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_69 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData69], + data: make([]byte, 70), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_70 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData70], + data: make([]byte, 69), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_70", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData70], + data: make([]byte, 70), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_70 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData70], + data: make([]byte, 71), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_71 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData71], + data: make([]byte, 70), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_71", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData71], + data: make([]byte, 71), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_71 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData71], + data: make([]byte, 72), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_72 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData72], + data: make([]byte, 71), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_72", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData72], + data: make([]byte, 72), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_72 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData72], + data: make([]byte, 73), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_73 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData73], + data: make([]byte, 72), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_73", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData73], + data: make([]byte, 73), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_73 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData73], + data: make([]byte, 74), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_74 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData74], + data: make([]byte, 73), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_74", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData74], + data: make([]byte, 74), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_74 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData74], + data: make([]byte, 75), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_75 short", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData75], + data: make([]byte, 74), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DATA_75", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData75], + data: make([]byte, 75), + }, + expectedErr: nil, + }, + { + name: "OP_DATA_75 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpData75], + data: make([]byte, 76), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_PUSHDATA1", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPushData1], + data: []byte{0, 1, 2, 3, 4}, + }, + expectedErr: nil, + }, + { + name: "OP_PUSHDATA2", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPushData2], + data: []byte{0, 1, 2, 3, 4}, + }, + expectedErr: nil, + }, + { + name: "OP_PUSHDATA4", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPushData1], + data: []byte{0, 1, 2, 3, 4}, + }, + expectedErr: nil, + }, + { + name: "OP_1NEGATE", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Negate], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_1NEGATE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Negate], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RESERVED", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RESERVED long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_TRUE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpTrue], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_TRUE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpTrue], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_3", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op3], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_3 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op3], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_4", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op4], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_4 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op4], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_5", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op5], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_5 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op5], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_6", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op6], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_6 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op6], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_7", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op7], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_7 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op7], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_8", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op8], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_8 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op8], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_9", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op9], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_9 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op9], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_10", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op10], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_10 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op10], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_11", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op11], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_11 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op11], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_12", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op12], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_12 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op12], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_13", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op13], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_13 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op13], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_14", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op14], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_14 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op14], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_15", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op15], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_15 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op15], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_16", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op16], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_16 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op16], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NOP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNop], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NOP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNop], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_VER", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVer], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_VER long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVer], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_IF", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpIf], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_IF long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpIf], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NOTIF", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNotIf], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NOTIF long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNotIf], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_VERIF", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerIf], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_VERIF long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerIf], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_VERNOTIF", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerNotIf], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_VERNOTIF long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerNotIf], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ELSE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpElse], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ELSE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpElse], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ENDIF", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEndIf], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ENDIF long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEndIf], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_VERIFY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerify], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_VERIFY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpVerify], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RETURN", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReturn], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RETURN long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReturn], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_TOALTSTACK", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpToAltStack], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_TOALTSTACK long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpToAltStack], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_FROMALTSTACK", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpFromAltStack], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_FROMALTSTACK long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpFromAltStack], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2DROP", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Drop], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2DROP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Drop], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2DUP", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Dup], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2DUP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Dup], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_3DUP", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op3Dup], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_3DUP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op3Dup], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2OVER", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Over], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2OVER long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Over], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2ROT", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Rot], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2ROT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Rot], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2SWAP", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Swap], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2SWAP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Swap], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_IFDUP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpIfDup], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_IFDUP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpIfDup], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DEPTH", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDepth], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_DEPTH long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDepth], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DROP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDrop], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_DROP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDrop], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DUP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDup], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_DUP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDup], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NIP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNip], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NIP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNip], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_OVER", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpOver], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_OVER long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpOver], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_PICK", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPick], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_PICK long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPick], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ROLL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRoll], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ROLL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRoll], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ROT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRot], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ROT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRot], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_SWAP", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSwap], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_SWAP long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSwap], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_TUCK", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpTuck], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_TUCK long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpTuck], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_CAT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCat], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_CAT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCat], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_SUBSTR", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSubStr], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_SUBSTR long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSubStr], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_LEFT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLeft], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_LEFT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLeft], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_LEFT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLeft], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_LEFT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLeft], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RIGHT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRight], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RIGHT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRight], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_SIZE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSize], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_SIZE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSize], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_INVERT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpInvert], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_INVERT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpInvert], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_AND", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAnd], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_AND long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAnd], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_OR", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpOr], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_OR long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpOr], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_XOR", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpXor], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_XOR long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpXor], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_EQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_EQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_EQUALVERIFY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEqualVerify], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_EQUALVERIFY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpEqualVerify], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RESERVED1", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved1], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RESERVED1 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved1], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RESERVED2", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved2], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RESERVED2 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpReserved2], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_1ADD", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Add], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_1ADD long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Add], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_1SUB", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Sub], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_1SUB long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op1Sub], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2MUL", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Mul], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2MUL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Mul], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_2DIV", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Div], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_2DIV long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op2Div], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NEGATE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNegate], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NEGATE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNegate], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ABS", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAbs], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ABS long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAbs], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NOT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNot], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NOT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNot], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_0NOTEQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op0NotEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_0NOTEQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[Op0NotEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_ADD", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAdd], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_ADD long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpAdd], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_SUB", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSub], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_SUB long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSub], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_MUL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMul], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_MUL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMul], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_DIV", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDiv], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_DIV long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpDiv], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_MOD", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMod], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_MOD long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMod], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_LSHIFT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLShift], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_LSHIFT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLShift], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_RSHIFT", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRShift], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_RSHIFT long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpRShift], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_BOOLAND", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBoolAnd], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_BOOLAND long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBoolAnd], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_BOOLOR", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBoolOr], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_BOOLOR long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBoolOr], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NUMEQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NUMEQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NUMEQUALVERIFY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumEqualVerify], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NUMEQUALVERIFY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumEqualVerify], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_NUMNOTEQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumNotEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_NUMNOTEQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpNumNotEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_LESSTHAN", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLessThan], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_LESSTHAN long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLessThan], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_GREATERTHAN", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpGreaterThan], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_GREATERTHAN long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpGreaterThan], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_LESSTHANOREQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLessThanOrEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_LESSTHANOREQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpLessThanOrEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_GREATERTHANOREQUAL", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpGreaterThanOrEqual], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_GREATERTHANOREQUAL long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpGreaterThanOrEqual], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_MIN", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMin], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_MIN long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMin], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_MAX", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMax], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_MAX long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpMax], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_WITHIN", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpWithin], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_WITHIN long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpWithin], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_SHA256", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSHA256], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_SHA256 long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpSHA256], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_BLAKE2B", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBlake2b], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_BLAKE2B long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpBlake2b], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_CHECKSIG", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckSig], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_CHECKSIG long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckSig], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_CHECKSIGVERIFY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckSigVerify], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_CHECKSIGVERIFY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckSigVerify], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_CHECKMULTISIG", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckMultiSig], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_CHECKMULTISIG long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckMultiSig], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_CHECKMULTISIGVERIFY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckMultiSigVerify], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_CHECKMULTISIGVERIFY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpCheckMultiSigVerify], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_PUBKEYHASH", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPubKeyHash], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_PUBKEYHASH long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPubKeyHash], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_PUBKEY", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPubKey], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_PUBKEY long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpPubKey], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + { + name: "OP_INVALIDOPCODE", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpInvalidOpCode], + data: nil, + }, + expectedErr: nil, + }, + { + name: "OP_INVALIDOPCODE long", + pop: &parsedOpcode{ + opcode: &opcodeArray[OpInvalidOpCode], + data: make([]byte, 1), + }, + expectedErr: scriptError(ErrInternal, ""), + }, + } + + for _, test := range tests { + _, err := test.pop.bytes() + if e := checkScriptError(err, test.expectedErr); e != nil { + t.Errorf("Parsed opcode test '%s': %v", test.name, e) + continue + } + } +} + +// TestPushedData ensured the PushedData function extracts the expected data out +// of various scripts. +func TestPushedData(t *testing.T) { + t.Parallel() + + var tests = []struct { + script string + out [][]byte + valid bool + }{ + { + "0 IF 0 ELSE 2 ENDIF", + [][]byte{nil, nil}, + true, + }, + { + "16777216 10000000", + [][]byte{ + {0x00, 0x00, 0x00, 0x01}, // 16777216 + {0x80, 0x96, 0x98, 0x00}, // 10000000 + }, + true, + }, + { + "DUP BLAKE2B '17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem' EQUALVERIFY CHECKSIG", + [][]byte{ + // 17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem + { + 0x31, 0x37, 0x56, 0x5a, 0x4e, 0x58, 0x31, 0x53, 0x4e, 0x35, + 0x4e, 0x74, 0x4b, 0x61, 0x38, 0x55, 0x51, 0x46, 0x78, 0x77, + 0x51, 0x62, 0x46, 0x65, 0x46, 0x63, 0x33, 0x69, 0x71, 0x52, + 0x59, 0x68, 0x65, 0x6d, + }, + }, + true, + }, + { + "PUSHDATA4 1000 EQUAL", + nil, + false, + }, + } + + for i, test := range tests { + script := mustParseShortForm(test.script, 0) + data, err := PushedData(script) + if test.valid && err != nil { + t.Errorf("TestPushedData failed test #%d: %v\n", i, err) + continue + } else if !test.valid && err == nil { + t.Errorf("TestPushedData failed test #%d: test should "+ + "be invalid\n", i) + continue + } + if !reflect.DeepEqual(data, test.out) { + t.Errorf("TestPushedData failed test #%d: want: %x "+ + "got: %x\n", i, test.out, data) + } + } +} + +// isPushOnlyScript returns whether or not the passed script only pushes data. +// +// False will be returned when the script does not parse. +func isPushOnlyScript(script []byte) (bool, error) { + pops, err := parseScript(script) + if err != nil { + return false, err + } + return isPushOnly(pops), nil +} + +// TestHasCanonicalPush ensures the canonicalPush function works as expected. +func TestHasCanonicalPush(t *testing.T) { + t.Parallel() + + for i := 0; i < 65535; i++ { + script, err := NewScriptBuilder().AddInt64(int64(i)).Script() + if err != nil { + t.Errorf("Script: test #%d unexpected error: %v\n", i, + err) + continue + } + if result, _ := isPushOnlyScript(script); !result { + t.Errorf("isPushOnlyScript: test #%d failed: %x\n", i, + script) + continue + } + pops, err := parseScript(script) + if err != nil { + t.Errorf("parseScript: #%d failed: %v", i, err) + continue + } + for _, pop := range pops { + if result := canonicalPush(pop); !result { + t.Errorf("canonicalPush: test #%d failed: %x\n", + i, script) + break + } + } + } + for i := 0; i <= MaxScriptElementSize; i++ { + builder := NewScriptBuilder() + builder.AddData(bytes.Repeat([]byte{0x49}, i)) + script, err := builder.Script() + if err != nil { + t.Errorf("StandardPushesTests test #%d unexpected error: %v\n", i, err) + continue + } + if result, _ := isPushOnlyScript(script); !result { + t.Errorf("StandardPushesTests isPushOnlyScript test #%d failed: %x\n", i, script) + continue + } + pops, err := parseScript(script) + if err != nil { + t.Errorf("StandardPushesTests #%d failed to TstParseScript: %v", i, err) + continue + } + for _, pop := range pops { + if result := canonicalPush(pop); !result { + t.Errorf("StandardPushesTests TstHasCanonicalPushes test #%d failed: %x\n", i, script) + break + } + } + } +} + +func hexDecode(src string) []byte { + decoded, err := hex.DecodeString(src) + if err != nil { + panic(err) + } + return decoded +} + +// TestGetPreciseSigOps ensures the more precise signature operation counting +// mechanism which includes signatures in P2SH scripts works as expected. +func TestGetPreciseSigOps(t *testing.T) { + t.Parallel() + + // The signature in the p2sh script is nonsensical for most tests since + // this script will never be executed. What matters is that it matches + // the right pattern. + scriptOnly := mustParseShortForm("BLAKE2B DATA_32 0x433ec2ac1ffa1b7b7d0"+ + "27f564529c57197f9ae88 EQUAL", 0) + scriptPubKey := &externalapi.ScriptPublicKey{scriptOnly, 0} + + tests := []struct { + name string + scriptSig []byte + scriptPublicKey *externalapi.ScriptPublicKey + nSigOps int + }{ + { + name: "scriptSig doesn't parse", + scriptSig: mustParseShortForm("PUSHDATA1 0x02", 0), + scriptPublicKey: scriptPubKey, + }, + { + name: "scriptSig isn't push only", + scriptSig: mustParseShortForm("1 DUP", 0), + nSigOps: 0, + scriptPublicKey: scriptPubKey, + }, + { + name: "scriptSig length 0", + scriptSig: nil, + nSigOps: 0, + scriptPublicKey: scriptPubKey, + }, + { + name: "No script at the end", + // No script at end but still push only. + scriptSig: mustParseShortForm("1 1", 0), + nSigOps: 0, + scriptPublicKey: scriptPubKey, + }, + { + name: "pushed script doesn't parse", + scriptSig: mustParseShortForm("DATA_2 PUSHDATA1 0x02", 0), + scriptPublicKey: scriptPubKey, + }, + { + name: "mainnet multisig transaction 487f94ffa63106f72644068765b9dc629bb63e481210f382667d4a93b69af412", + scriptSig: hexDecode("41eb577889fa28283709201ef5b056745c6cf0546dd31666cecd41c40a581b256e885d941b86b14d44efacec12d614e7fcabf7b341660f95bab16b71d766ab010501411c0eeef117ca485d34e4bc0cf6d5b578aa250c5d13ebff0882a7e2eeea1f31e8ecb6755696d194b1b0fcb853afab28b61f3f7cec487bd611df7e57252802f535014c875220ab64c7691713a32ea6dfced9155c5c26e8186426f0697af0db7a4b1340f992d12041ae738d66fe3d21105483e5851778ad73c5cddf0819c5e8fd8a589260d967e72065120722c36d3fac19646258481dd3661fa767da151304af514cb30af5cb5692203cd7690ecb67cbbe6cafad00a7c9133da535298ab164549e0cce2658f7b3032754ae"), + scriptPublicKey: &externalapi.ScriptPublicKey{ + Script: hexDecode("aa20f38031f61ca23d70844f63a477d07f0b2c2decab907c2e096e548b0e08721c7987"), + Version: 0, + }, + nSigOps: 4, + }, + { + name: "a partially parseable script public key", + scriptSig: nil, + scriptPublicKey: &externalapi.ScriptPublicKey{ + Script: mustParseShortForm("CHECKSIG CHECKSIG DATA_1", 0), + Version: 0, + }, + nSigOps: 2, + }, + { + name: "p2pk", + scriptSig: hexDecode("416db0c0ce824a6d076c8e73aae9987416933df768e07760829cb0685dc0a2bbb11e2c0ced0cab806e111a11cbda19784098fd25db176b6a9d7c93e5747674d32301"), + scriptPublicKey: &externalapi.ScriptPublicKey{ + Script: hexDecode("208a457ca74ade0492c44c440da1cab5b008d8449150fe2794f0d8f4cce7e8aa27ac"), + Version: 0, + }, + nSigOps: 1, + }, + } + + for _, test := range tests { + count := GetPreciseSigOpCount(test.scriptSig, test.scriptPublicKey, true) + if count != test.nSigOps { + t.Errorf("%s: expected count of %d, got %d", test.name, + test.nSigOps, count) + + } + } +} + +// TestIsPayToScriptHash ensures the IsPayToScriptHash function returns the +// expected results for all the scripts in scriptClassTests. +func TestIsPayToScriptHash(t *testing.T) { + t.Parallel() + + for _, test := range scriptClassTests { + script := &externalapi.ScriptPublicKey{mustParseShortForm(test.script, 0), 0} + shouldBe := (test.class == ScriptHashTy) + p2sh := IsPayToScriptHash(script) + if p2sh != shouldBe { + t.Errorf("%s: expected p2sh %v, got %v", test.name, + shouldBe, p2sh) + } + } +} + +// TestHasCanonicalPushes ensures the canonicalPush function properly determines +// what is considered a canonical push. +func TestHasCanonicalPushes(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + script string + expected bool + }{ + { + name: "does not parse", + script: "0x046708afdb0fe5548271967f1a67130b7105cd6a82" + + "8e03909a67962e0ea1f61d", + expected: false, + }, + { + name: "non-canonical push", + script: "PUSHDATA1 0x04 0x01020304", + expected: false, + }, + } + + for i, test := range tests { + script := mustParseShortForm(test.script, 0) + pops, err := parseScript(script) + if err != nil { + if test.expected { + t.Errorf("TstParseScript #%d failed: %v", i, err) + } + continue + } + for _, pop := range pops { + if canonicalPush(pop) != test.expected { + t.Errorf("canonicalPush: #%d (%s) wrong result"+ + "\ngot: %v\nwant: %v", i, test.name, + true, test.expected) + break + } + } + } +} + +// TestIsPushOnly ensures the isPushOnly function returns the +// expected results. +func TestIsPushOnly(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + script []byte + expectedResult bool + shouldFail bool + }{ + { + name: "does not parse", + script: mustParseShortForm("0x046708afdb0fe5548271967f1a67130"+ + "b7105cd6a828e03909a67962e0ea1f61d", 0), + expectedResult: false, + shouldFail: true, + }, + { + name: "non push only script", + script: mustParseShortForm("0x515293", 0), //OP_1 OP_2 OP_ADD + expectedResult: false, + shouldFail: false, + }, + { + name: "push only script", + script: mustParseShortForm("0x5152", 0), //OP_1 OP_2 + expectedResult: true, + shouldFail: false, + }, + } + + for _, test := range tests { + isPushOnly, err := isPushOnlyScript(test.script) + + if isPushOnly != test.expectedResult { + t.Errorf("isPushOnlyScript (%s) wrong result\ngot: %v\nwant: "+ + "%v", test.name, isPushOnly, test.expectedResult) + } + + if test.shouldFail && err == nil { + t.Errorf("isPushOnlyScript (%s) expected an error but got ", test.name) + } + + if !test.shouldFail && err != nil { + t.Errorf("isPushOnlyScript (%s) expected no error but got: %v", test.name, err) + } + } +} + +// TestIsUnspendable ensures the IsUnspendable function returns the expected +// results. +func TestIsUnspendable(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + scriptPubKey []byte + expected bool + }{ + { + // Unspendable + scriptPubKey: []byte{0x6a, 0x04, 0x74, 0x65, 0x73, 0x74}, + expected: true, + }, + { + // Spendable + scriptPubKey: []byte{0x76, 0xa9, 0x14, 0x29, 0x95, 0xa0, + 0xfe, 0x68, 0x43, 0xfa, 0x9b, 0x95, 0x45, + 0x97, 0xf0, 0xdc, 0xa7, 0xa4, 0x4d, 0xf6, + 0xfa, 0x0b, 0x5c, 0x88, 0xac}, + expected: false, + }, + } + + for i, test := range tests { + res := IsUnspendable(test.scriptPubKey) + if res != test.expected { + t.Errorf("TestIsUnspendable #%d failed: got %v want %v", + i, res, test.expected) + continue + } + } +} diff --git a/domain/consensus/utils/txscript/scriptbuilder.go b/domain/consensus/utils/txscript/scriptbuilder.go new file mode 100644 index 0000000..a3206b2 --- /dev/null +++ b/domain/consensus/utils/txscript/scriptbuilder.go @@ -0,0 +1,300 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "encoding/binary" + "fmt" +) + +const ( + // defaultScriptAlloc is the default size used for the backing array + // for a script being built by the ScriptBuilder. The array will + // dynamically grow as needed, but this figure is intended to provide + // enough space for vast majority of scripts without needing to grow the + // backing array multiple times. + defaultScriptAlloc = 500 +) + +// ErrScriptNotCanonical identifies a non-canonical script. The caller can use +// a type assertion to detect this error type. +type ErrScriptNotCanonical string + +// Error implements the error interface. +func (e ErrScriptNotCanonical) Error() string { + return string(e) +} + +// ScriptBuilder provides a facility for building custom scripts. It allows +// you to push opcodes, ints, and data while respecting canonical encoding. In +// general it does not ensure the script will execute correctly, however any +// data pushes which would exceed the maximum allowed script engine limits and +// are therefore guaranteed not to execute will not be pushed and will result in +// the Script function returning an error. +// +// For example, the following would build a 2-of-3 multisig script for usage in +// a pay-to-script-hash (although in this situation MultiSigScript() would be a +// better choice to generate the script): +// +// builder := txscript.NewScriptBuilder() +// builder.AddOp(txscript.OP_2).AddData(pubKey1).AddData(pubKey2) +// builder.AddData(pubKey3).AddOp(txscript.OP_3) +// builder.AddOp(txscript.OP_CHECKMULTISIG) +// script, err := builder.Script() +// if err != nil { +// // Handle the error. +// return +// } +// fmt.Printf("Final multi-sig script: %x\n", script) +type ScriptBuilder struct { + script []byte + err error +} + +// AddOp pushes the passed opcode to the end of the script. The script will not +// be modified if pushing the opcode would cause the script to exceed the +// maximum allowed script engine size. +func (b *ScriptBuilder) AddOp(opcode byte) *ScriptBuilder { + if b.err != nil { + return b + } + + // Pushes that would cause the script to exceed the largest allowed + // script size would result in a non-canonical script. + if len(b.script)+1 > MaxScriptSize { + str := fmt.Sprintf("adding an opcode would exceed the maximum "+ + "allowed canonical script length of %d", MaxScriptSize) + b.err = ErrScriptNotCanonical(str) + return b + } + + b.script = append(b.script, opcode) + return b +} + +// AddOps pushes the passed opcodes to the end of the script. The script will +// not be modified if pushing the opcodes would cause the script to exceed the +// maximum allowed script engine size. +func (b *ScriptBuilder) AddOps(opcodes []byte) *ScriptBuilder { + if b.err != nil { + return b + } + + // Pushes that would cause the script to exceed the largest allowed + // script size would result in a non-canonical script. + if len(b.script)+len(opcodes) > MaxScriptSize { + str := fmt.Sprintf("adding opcodes would exceed the maximum "+ + "allowed canonical script length of %d", MaxScriptSize) + b.err = ErrScriptNotCanonical(str) + return b + } + + b.script = append(b.script, opcodes...) + return b +} + +// canonicalDataSize returns the number of bytes the canonical encoding of the +// data will take. +func canonicalDataSize(data []byte) int { + dataLen := len(data) + + // When the data consists of a single number that can be represented + // by one of the "small integer" opcodes, that opcode will be instead + // of a data push opcode followed by the number. + if dataLen == 0 { + return 1 + } else if dataLen == 1 && data[0] <= 16 { + return 1 + } else if dataLen == 1 && data[0] == 0x81 { + return 1 + } + + if dataLen < OpPushData1 { + return 1 + dataLen + } else if dataLen <= 0xff { + return 2 + dataLen + } else if dataLen <= 0xffff { + return 3 + dataLen + } + + return 5 + dataLen +} + +// addData is the internal function that actually pushes the passed data to the +// end of the script. It automatically chooses canonical opcodes depending on +// the length of the data. A zero length buffer will lead to a push of empty +// data onto the stack (OP_0). No data limits are enforced with this function. +func (b *ScriptBuilder) addData(data []byte) *ScriptBuilder { + dataLen := len(data) + + // When the data consists of a single number that can be represented + // by one of the "small integer" opcodes, use that opcode instead of + // a data push opcode followed by the number. + if dataLen == 0 || dataLen == 1 && data[0] == 0 { + b.script = append(b.script, Op0) + return b + } else if dataLen == 1 && data[0] <= 16 { + b.script = append(b.script, (Op1-1)+data[0]) + return b + } else if dataLen == 1 && data[0] == 0x81 { + b.script = append(b.script, byte(Op1Negate)) + return b + } + + // Use one of the OP_DATA_# opcodes if the length of the data is small + // enough so the data push instruction is only a single byte. + // Otherwise, choose the smallest possible OP_PUSHDATA# opcode that + // can represent the length of the data. + if dataLen < OpPushData1 { + b.script = append(b.script, byte((OpData1-1)+dataLen)) + } else if dataLen <= 0xff { + b.script = append(b.script, OpPushData1, byte(dataLen)) + } else if dataLen <= 0xffff { + buf := make([]byte, 2) + binary.LittleEndian.PutUint16(buf, uint16(dataLen)) + b.script = append(b.script, OpPushData2) + b.script = append(b.script, buf...) + } else { + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, uint32(dataLen)) + b.script = append(b.script, OpPushData4) + b.script = append(b.script, buf...) + } + + // Append the actual data. + b.script = append(b.script, data...) + + return b +} + +// AddFullData should not typically be used by ordinary users as it does not +// include the checks which prevent data pushes larger than the maximum allowed +// sizes which leads to scripts that can't be executed. This is provided for +// testing purposes such as tests where sizes are intentionally made larger +// than allowed. +// +// Use AddData instead. +func (b *ScriptBuilder) AddFullData(data []byte) *ScriptBuilder { + if b.err != nil { + return b + } + + return b.addData(data) +} + +// AddData pushes the passed data to the end of the script. It automatically +// chooses canonical opcodes depending on the length of the data. A zero length +// buffer will lead to a push of empty data onto the stack (OP_0) and any push +// of data greater than MaxScriptElementSize will not modify the script since +// that is not allowed by the script engine. Also, the script will not be +// modified if pushing the data would cause the script to exceed the maximum +// allowed script engine size. +func (b *ScriptBuilder) AddData(data []byte) *ScriptBuilder { + if b.err != nil { + return b + } + + // Pushes that would cause the script to exceed the largest allowed + // script size would result in a non-canonical script. + dataSize := canonicalDataSize(data) + if len(b.script)+dataSize > MaxScriptSize { + str := fmt.Sprintf("adding %d bytes of data would exceed the "+ + "maximum allowed canonical script length of %d", + dataSize, MaxScriptSize) + b.err = ErrScriptNotCanonical(str) + return b + } + + // Pushes larger than the max script element size would result in a + // script that is not canonical. + dataLen := len(data) + if dataLen > MaxScriptElementSize { + str := fmt.Sprintf("adding a data element of %d bytes would "+ + "exceed the maximum allowed script element size of %d", + dataLen, MaxScriptElementSize) + b.err = ErrScriptNotCanonical(str) + return b + } + + return b.addData(data) +} + +// AddInt64 pushes the passed integer to the end of the script. The script will +// not be modified if pushing the data would cause the script to exceed the +// maximum allowed script engine size. +func (b *ScriptBuilder) AddInt64(val int64) *ScriptBuilder { + if b.err != nil { + return b + } + + // Pushes that would cause the script to exceed the largest allowed + // script size would result in a non-canonical script. + if len(b.script)+1 > MaxScriptSize { + str := fmt.Sprintf("adding an integer would exceed the "+ + "maximum allow canonical script length of %d", + MaxScriptSize) + b.err = ErrScriptNotCanonical(str) + return b + } + + // Fast path for small integers and OP_1NEGATE. + if val == 0 { + b.script = append(b.script, Op0) + return b + } + if val == -1 || (val >= 1 && val <= 16) { + b.script = append(b.script, byte((Op1-1)+val)) + return b + } + + return b.AddData(scriptNum(val).Bytes()) +} + +// AddLockTimeNumber gets a uint64 lockTime,converts it to byte array in little-endian, and then used the AddData function. +func (b *ScriptBuilder) AddLockTimeNumber(lockTime uint64) *ScriptBuilder { + return b.addLockTimeOrSequence(lockTime) +} + +// AddSequenceNumber gets a uint64 sequence, converts it to byte array in little-endian, and then used the AddData function. +func (b *ScriptBuilder) AddSequenceNumber(sequence uint64) *ScriptBuilder { + return b.addLockTimeOrSequence(sequence) +} + +// addLockTimeOrSequence gets a uint64 lockTime/sequence, converts it to byte array in little-endian, and then used the AddData function. +func (b *ScriptBuilder) addLockTimeOrSequence(lockTimeOrSequence uint64) *ScriptBuilder { + if b.err != nil { + return b + } + lockTimeOrSequenceBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(lockTimeOrSequenceBytes, lockTimeOrSequence) + unpaddedSize := 8 + for lockTimeOrSequenceBytes[unpaddedSize-1] == 0 { + unpaddedSize-- + } + fixedLockTimeOrSequenceBytesBytes := lockTimeOrSequenceBytes[:unpaddedSize] + return b.AddData(fixedLockTimeOrSequenceBytesBytes) +} + +// Reset resets the script so it has no content. +func (b *ScriptBuilder) Reset() *ScriptBuilder { + b.script = b.script[0:0] + b.err = nil + return b +} + +// Script returns the currently built script. When any errors occurred while +// building the script, the script will be returned up the point of the first +// error along with the error. +func (b *ScriptBuilder) Script() ([]byte, error) { + return b.script, b.err +} + +// NewScriptBuilder returns a new instance of a script builder. See +// ScriptBuilder for details. +func NewScriptBuilder() *ScriptBuilder { + return &ScriptBuilder{ + script: make([]byte, 0, defaultScriptAlloc), + } +} diff --git a/domain/consensus/utils/txscript/scriptbuilder_test.go b/domain/consensus/utils/txscript/scriptbuilder_test.go new file mode 100644 index 0000000..b62b1a6 --- /dev/null +++ b/domain/consensus/utils/txscript/scriptbuilder_test.go @@ -0,0 +1,414 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "github.com/pkg/errors" + "testing" +) + +// TestScriptBuilderAddOp tests that pushing opcodes to a script via the +// ScriptBuilder API works as expected. +func TestScriptBuilderAddOp(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + opcodes []byte + expected []byte + }{ + { + name: "push OP_0", + opcodes: []byte{Op0}, + expected: []byte{Op0}, + }, + { + name: "push OP_1 OP_2", + opcodes: []byte{Op1, Op2}, + expected: []byte{Op1, Op2}, + }, + { + name: "push OP_BLAKE2B OP_EQUAL", + opcodes: []byte{OpBlake2b, OpEqual}, + expected: []byte{OpBlake2b, OpEqual}, + }, + } + + // Run tests and individually add each op via AddOp. + builder := NewScriptBuilder() + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + builder.Reset() + for _, opcode := range test.opcodes { + builder.AddOp(opcode) + } + result, err := builder.Script() + if err != nil { + t.Errorf("ScriptBuilder.AddOp #%d (%s) unexpected "+ + "error: %v", i, test.name, err) + continue + } + if !bytes.Equal(result, test.expected) { + t.Errorf("ScriptBuilder.AddOp #%d (%s) wrong result\n"+ + "got: %x\nwant: %x", i, test.name, result, + test.expected) + continue + } + } + + // Run tests and bulk add ops via AddOps. + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + builder.Reset() + result, err := builder.AddOps(test.opcodes).Script() + if err != nil { + t.Errorf("ScriptBuilder.AddOps #%d (%s) unexpected "+ + "error: %v", i, test.name, err) + continue + } + if !bytes.Equal(result, test.expected) { + t.Errorf("ScriptBuilder.AddOps #%d (%s) wrong result\n"+ + "got: %x\nwant: %x", i, test.name, result, + test.expected) + continue + } + } + +} + +// TestScriptBuilderAddInt64 tests that pushing signed integers to a script via +// the ScriptBuilder API works as expected. +func TestScriptBuilderAddInt64(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + val int64 + expected []byte + }{ + {name: "push -1", val: -1, expected: []byte{Op1Negate}}, + {name: "push small int 0", val: 0, expected: []byte{Op0}}, + {name: "push small int 1", val: 1, expected: []byte{Op1}}, + {name: "push small int 2", val: 2, expected: []byte{Op2}}, + {name: "push small int 3", val: 3, expected: []byte{Op3}}, + {name: "push small int 4", val: 4, expected: []byte{Op4}}, + {name: "push small int 5", val: 5, expected: []byte{Op5}}, + {name: "push small int 6", val: 6, expected: []byte{Op6}}, + {name: "push small int 7", val: 7, expected: []byte{Op7}}, + {name: "push small int 8", val: 8, expected: []byte{Op8}}, + {name: "push small int 9", val: 9, expected: []byte{Op9}}, + {name: "push small int 10", val: 10, expected: []byte{Op10}}, + {name: "push small int 11", val: 11, expected: []byte{Op11}}, + {name: "push small int 12", val: 12, expected: []byte{Op12}}, + {name: "push small int 13", val: 13, expected: []byte{Op13}}, + {name: "push small int 14", val: 14, expected: []byte{Op14}}, + {name: "push small int 15", val: 15, expected: []byte{Op15}}, + {name: "push small int 16", val: 16, expected: []byte{Op16}}, + {name: "push 17", val: 17, expected: []byte{OpData1, 0x11}}, + {name: "push 65", val: 65, expected: []byte{OpData1, 0x41}}, + {name: "push 127", val: 127, expected: []byte{OpData1, 0x7f}}, + {name: "push 128", val: 128, expected: []byte{OpData2, 0x80, 0}}, + {name: "push 255", val: 255, expected: []byte{OpData2, 0xff, 0}}, + {name: "push 256", val: 256, expected: []byte{OpData2, 0, 0x01}}, + {name: "push 32767", val: 32767, expected: []byte{OpData2, 0xff, 0x7f}}, + {name: "push 32768", val: 32768, expected: []byte{OpData3, 0, 0x80, 0}}, + {name: "push -2", val: -2, expected: []byte{OpData1, 0x82}}, + {name: "push -3", val: -3, expected: []byte{OpData1, 0x83}}, + {name: "push -4", val: -4, expected: []byte{OpData1, 0x84}}, + {name: "push -5", val: -5, expected: []byte{OpData1, 0x85}}, + {name: "push -17", val: -17, expected: []byte{OpData1, 0x91}}, + {name: "push -65", val: -65, expected: []byte{OpData1, 0xc1}}, + {name: "push -127", val: -127, expected: []byte{OpData1, 0xff}}, + {name: "push -128", val: -128, expected: []byte{OpData2, 0x80, 0x80}}, + {name: "push -255", val: -255, expected: []byte{OpData2, 0xff, 0x80}}, + {name: "push -256", val: -256, expected: []byte{OpData2, 0x00, 0x81}}, + {name: "push -32767", val: -32767, expected: []byte{OpData2, 0xff, 0xff}}, + {name: "push -32768", val: -32768, expected: []byte{OpData3, 0x00, 0x80, 0x80}}, + } + + builder := NewScriptBuilder() + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + builder.Reset().AddInt64(test.val) + result, err := builder.Script() + if err != nil { + t.Errorf("ScriptBuilder.AddInt64 #%d (%s) unexpected "+ + "error: %v", i, test.name, err) + continue + } + if !bytes.Equal(result, test.expected) { + t.Errorf("ScriptBuilder.AddInt64 #%d (%s) wrong result\n"+ + "got: %x\nwant: %x", i, test.name, result, + test.expected) + continue + } + } +} + +// TestScriptBuilderAddData tests that pushing data to a script via the +// ScriptBuilder API works as expected and conforms to BIP0062. +func TestScriptBuilderAddData(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + data []byte + expected []byte + useFull bool // use AddFullData instead of AddData. + }{ + // BIP0062: Pushing an empty byte sequence must use OP_0. + {name: "push empty byte sequence", data: nil, expected: []byte{Op0}}, + {name: "push 1 byte 0x00", data: []byte{0x00}, expected: []byte{Op0}}, + + // BIP0062: Pushing a 1-byte sequence of byte 0x01 through 0x10 must use OP_n. + {name: "push 1 byte 0x01", data: []byte{0x01}, expected: []byte{Op1}}, + {name: "push 1 byte 0x02", data: []byte{0x02}, expected: []byte{Op2}}, + {name: "push 1 byte 0x03", data: []byte{0x03}, expected: []byte{Op3}}, + {name: "push 1 byte 0x04", data: []byte{0x04}, expected: []byte{Op4}}, + {name: "push 1 byte 0x05", data: []byte{0x05}, expected: []byte{Op5}}, + {name: "push 1 byte 0x06", data: []byte{0x06}, expected: []byte{Op6}}, + {name: "push 1 byte 0x07", data: []byte{0x07}, expected: []byte{Op7}}, + {name: "push 1 byte 0x08", data: []byte{0x08}, expected: []byte{Op8}}, + {name: "push 1 byte 0x09", data: []byte{0x09}, expected: []byte{Op9}}, + {name: "push 1 byte 0x0a", data: []byte{0x0a}, expected: []byte{Op10}}, + {name: "push 1 byte 0x0b", data: []byte{0x0b}, expected: []byte{Op11}}, + {name: "push 1 byte 0x0c", data: []byte{0x0c}, expected: []byte{Op12}}, + {name: "push 1 byte 0x0d", data: []byte{0x0d}, expected: []byte{Op13}}, + {name: "push 1 byte 0x0e", data: []byte{0x0e}, expected: []byte{Op14}}, + {name: "push 1 byte 0x0f", data: []byte{0x0f}, expected: []byte{Op15}}, + {name: "push 1 byte 0x10", data: []byte{0x10}, expected: []byte{Op16}}, + + // BIP0062: Pushing the byte 0x81 must use OP_1NEGATE. + {name: "push 1 byte 0x81", data: []byte{0x81}, expected: []byte{Op1Negate}}, + + // BIP0062: Pushing any other byte sequence up to 75 bytes must + // use the normal data push (opcode byte n, with n the number of + // bytes, followed n bytes of data being pushed). + {name: "push 1 byte 0x11", data: []byte{0x11}, expected: []byte{OpData1, 0x11}}, + {name: "push 1 byte 0x80", data: []byte{0x80}, expected: []byte{OpData1, 0x80}}, + {name: "push 1 byte 0x82", data: []byte{0x82}, expected: []byte{OpData1, 0x82}}, + {name: "push 1 byte 0xff", data: []byte{0xff}, expected: []byte{OpData1, 0xff}}, + { + name: "push data len 17", + data: bytes.Repeat([]byte{0x49}, 17), + expected: append([]byte{OpData17}, bytes.Repeat([]byte{0x49}, 17)...), + }, + { + name: "push data len 75", + data: bytes.Repeat([]byte{0x49}, 75), + expected: append([]byte{OpData75}, bytes.Repeat([]byte{0x49}, 75)...), + }, + + // BIP0062: Pushing 76 to 255 bytes must use OP_PUSHDATA1. + { + name: "push data len 76", + data: bytes.Repeat([]byte{0x49}, 76), + expected: append([]byte{OpPushData1, 76}, bytes.Repeat([]byte{0x49}, 76)...), + }, + { + name: "push data len 255", + data: bytes.Repeat([]byte{0x49}, 255), + expected: append([]byte{OpPushData1, 255}, bytes.Repeat([]byte{0x49}, 255)...), + }, + + // BIP0062: Pushing 256 to 520 bytes must use OP_PUSHDATA2. + { + name: "push data len 256", + data: bytes.Repeat([]byte{0x49}, 256), + expected: append([]byte{OpPushData2, 0, 1}, bytes.Repeat([]byte{0x49}, 256)...), + }, + { + name: "push data len 520", + data: bytes.Repeat([]byte{0x49}, 520), + expected: append([]byte{OpPushData2, 0x08, 0x02}, bytes.Repeat([]byte{0x49}, 520)...), + }, + + // BIP0062: OP_PUSHDATA4 can never be used, as pushes over 520 + // bytes are not allowed, and those below can be done using + // other operators. + { + name: "push data len 521", + data: bytes.Repeat([]byte{0x49}, 521), + expected: nil, + }, + { + name: "push data len 32767 (canonical)", + data: bytes.Repeat([]byte{0x49}, 32767), + expected: nil, + }, + { + name: "push data len 65536 (canonical)", + data: bytes.Repeat([]byte{0x49}, 65536), + expected: nil, + }, + + // Additional tests for the PushFullData function that + // intentionally allows data pushes to exceed the limit for + // testing purposes. + + // 3-byte data push via OP_PUSHDATA_2. + { + name: "push data len 32767 (non-canonical)", + data: bytes.Repeat([]byte{0x49}, 32767), + expected: append([]byte{OpPushData2, 255, 127}, bytes.Repeat([]byte{0x49}, 32767)...), + useFull: true, + }, + + // 5-byte data push via OP_PUSHDATA_4. + { + name: "push data len 65536 (non-canonical)", + data: bytes.Repeat([]byte{0x49}, 65536), + expected: append([]byte{OpPushData4, 0, 0, 1, 0}, bytes.Repeat([]byte{0x49}, 65536)...), + useFull: true, + }, + } + + builder := NewScriptBuilder() + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + if !test.useFull { + builder.Reset().AddData(test.data) + } else { + builder.Reset().AddFullData(test.data) + } + result, _ := builder.Script() + if !bytes.Equal(result, test.expected) { + t.Errorf("ScriptBuilder.AddData #%d (%s) wrong result\n"+ + "got: %x\nwant: %x", i, test.name, result, + test.expected) + continue + } + } +} + +// TestExceedMaxScriptSize ensures that all of the functions that can be used +// to add data to a script don't allow the script to exceed the max allowed +// size. +func TestExceedMaxScriptSize(t *testing.T) { + t.Parallel() + + // Start off by constructing a max size script. + builder := NewScriptBuilder() + builder.Reset().AddFullData(make([]byte, MaxScriptSize-3)) + origScript, err := builder.Script() + if err != nil { + t.Fatalf("Unexpected error for max size script: %v", err) + } + + // Ensure adding data that would exceed the maximum size of the script + // does not add the data. + script, err := builder.AddData([]byte{0x00}).Script() + var errScriptNotCanonical ErrScriptNotCanonical + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatalf("ScriptBuilder.AddData allowed exceeding max script "+ + "size: %v", len(script)) + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddData unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + + // Ensure adding an opcode that would exceed the maximum size of the + // script does not add the data. + builder.Reset().AddFullData(make([]byte, MaxScriptSize-3)) + script, err = builder.AddOp(Op0).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatalf("ScriptBuilder.AddOp unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddOp unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + + // Ensure adding an integer that would exceed the maximum size of the + // script does not add the data. + builder.Reset().AddFullData(make([]byte, MaxScriptSize-3)) + script, err = builder.AddInt64(0).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatalf("ScriptBuilder.AddInt64 unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddInt64 unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } +} + +// TestErroredScript ensures that all of the functions that can be used to add +// data to a script don't modify the script once an error has happened. +func TestErroredScript(t *testing.T) { + t.Parallel() + + // Start off by constructing a near max size script that has enough + // space left to add each data type without an error and force an + // initial error condition. + builder := NewScriptBuilder() + builder.Reset().AddFullData(make([]byte, MaxScriptSize-8)) + origScript, err := builder.Script() + if err != nil { + t.Fatalf("ScriptBuilder.AddFullData unexpected error: %v", err) + } + script, err := builder.AddData([]byte{0x00, 0x00, 0x00, 0x00, 0x00}).Script() + var errScriptNotCanonical ErrScriptNotCanonical + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatalf("ScriptBuilder.AddData allowed exceeding max script "+ + "size: %v", len(script)) + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddData unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + + // Ensure adding data, even using the non-canonical path, to a script + // that has errored doesn't succeed. + script, err = builder.AddFullData([]byte{0x00}).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatal("ScriptBuilder.AddFullData succeeded on errored script") + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddFullData unexpected modified "+ + "script - got len %d, want len %d", len(script), + len(origScript)) + } + + // Ensure adding data to a script that has errored doesn't succeed. + script, err = builder.AddData([]byte{0x00}).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatal("ScriptBuilder.AddData succeeded on errored script") + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddData unexpected modified "+ + "script - got len %d, want len %d", len(script), + len(origScript)) + } + + // Ensure adding an opcode to a script that has errored doesn't succeed. + script, err = builder.AddOp(Op0).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatal("ScriptBuilder.AddOp succeeded on errored script") + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddOp unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + + // Ensure adding an integer to a script that has errored doesn't + // succeed. + script, err = builder.AddInt64(0).Script() + if !errors.As(err, &errScriptNotCanonical) || err == nil { + t.Fatal("ScriptBuilder.AddInt64 succeeded on errored script") + } + if !bytes.Equal(script, origScript) { + t.Fatalf("ScriptBuilder.AddInt64 unexpected modified script - "+ + "got len %d, want len %d", len(script), len(origScript)) + } + + // Ensure the error has a message set. + if err.Error() == "" { + t.Fatal("ErrScriptNotCanonical.Error does not have any text") + } +} diff --git a/domain/consensus/utils/txscript/scriptnum.go b/domain/consensus/utils/txscript/scriptnum.go new file mode 100644 index 0000000..7fe3be3 --- /dev/null +++ b/domain/consensus/utils/txscript/scriptnum.go @@ -0,0 +1,227 @@ +// Copyright (c) 2015-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "fmt" +) + +const ( + maxInt32 = 1<<31 - 1 + minInt32 = -1 << 31 + + // defaultScriptNumLen is the default number of bytes + // data being interpreted as an integer may be. + defaultScriptNumLen = 4 +) + +// scriptNum represents a numeric value used in the scripting engine with +// special handling to deal with the subtle semantics required by consensus. +// +// All numbers are stored on the data and alternate stacks encoded as little +// endian with a sign bit. All numeric opcodes such as OP_ADD, OP_SUB, +// and OP_MUL, are only allowed to operate on 4-byte integers in the range +// [-2^31 + 1, 2^31 - 1], however the results of numeric operations may overflow +// and remain valid so long as they are not used as inputs to other numeric +// operations or otherwise interpreted as an integer. +// +// For example, it is possible for OP_ADD to have 2^31 - 1 for its two operands +// resulting 2^32 - 2, which overflows, but is still pushed to the stack as the +// result of the addition. That value can then be used as input to OP_VERIFY +// which will succeed because the data is being interpreted as a boolean. +// However, if that same value were to be used as input to another numeric +// opcode, such as OP_SUB, it must fail. +// +// This type handles the aforementioned requirements by storing all numeric +// operation results as an int64 to handle overflow and provides the Bytes +// method to get the serialized representation (including values that overflow). +// +// Then, whenever data is interpreted as an integer, it is converted to this +// type by using the makeScriptNum function which will return an error if the +// number is out of range or not minimally encoded depending on parameters. +// Since all numeric opcodes involve pulling data from the stack and +// interpreting it as an integer, it provides the required behavior. +type scriptNum int64 + +// checkMinimalDataEncoding returns whether or not the passed byte array adheres +// to the minimal encoding requirements. +// An error will be returned if it will determined that +// the encoding is not represented with the smallest possible +// number of bytes or is the negative 0 encoding, [0x80]. For example, consider +// the number 127. It could be encoded as [0x7f], [0x7f 0x00], +// [0x7f 0x00 0x00 ...], etc. All forms except [0x7f] will return an error +func checkMinimalDataEncoding(v []byte) error { + if len(v) == 0 { + return nil + } + + // Check that the number is encoded with the minimum possible + // number of bytes. + // + // If the most-significant-byte - excluding the sign bit - is zero + // then we're not minimal. Note how this test also rejects the + // negative-zero encoding, [0x80]. + if v[len(v)-1]&0x7f == 0 { + // One exception: if there's more than one byte and the most + // significant bit of the second-most-significant-byte is set + // it would conflict with the sign bit. An example of this case + // is +-255, which encode to 0xff00 and 0xff80 respectively. + // (big-endian). + if len(v) == 1 || v[len(v)-2]&0x80 == 0 { + str := fmt.Sprintf("numeric value encoded as %x is "+ + "not minimally encoded", v) + return scriptError(ErrMinimalData, str) + } + } + + return nil +} + +// Bytes returns the number serialized as a little endian with a sign bit. +// +// Example encodings: +// +// 127 -> [0x7f] +// -127 -> [0xff] +// 128 -> [0x80 0x00] +// -128 -> [0x80 0x80] +// 129 -> [0x81 0x00] +// -129 -> [0x81 0x80] +// 256 -> [0x00 0x01] +// -256 -> [0x00 0x81] +// 32767 -> [0xff 0x7f] +// -32767 -> [0xff 0xff] +// 32768 -> [0x00 0x80 0x00] +// -32768 -> [0x00 0x80 0x80] +func (n scriptNum) Bytes() []byte { + // Zero encodes as an empty byte slice. + if n == 0 { + return nil + } + + // Take the absolute value and keep track of whether it was originally + // negative. + isNegative := n < 0 + if isNegative { + n = -n + } + + // Encode to little endian. The maximum number of encoded bytes is 9 + // (8 bytes for max int64 plus a potential byte for sign extension). + result := make([]byte, 0, 9) + for n > 0 { + result = append(result, byte(n&0xff)) + n >>= 8 + } + + // When the most significant byte already has the high bit set, an + // additional high byte is required to indicate whether the number is + // negative or positive. The additional byte is removed when converting + // back to an integral and its high bit is used to denote the sign. + // + // Otherwise, when the most significant byte does not already have the + // high bit set, use it to indicate the value is negative, if needed. + if result[len(result)-1]&0x80 != 0 { + extraByte := byte(0x00) + if isNegative { + extraByte = 0x80 + } + result = append(result, extraByte) + + } else if isNegative { + result[len(result)-1] |= 0x80 + } + + return result +} + +// Int32 returns the script number clamped to a valid int32. That is to say +// when the script number is higher than the max allowed int32, the max int32 +// value is returned and vice versa for the minimum value. Note that this +// behavior is different from a simple int32 cast because that truncates +// and the consensus rules dictate numbers which are directly cast to ints +// provide this behavior. +// +// In practice, for most opcodes, the number should never be out of range since +// it will have been created with makeScriptNum using the defaultScriptLen +// value, which rejects them. In case something in the future ends up calling +// this function against the result of some arithmetic, which IS allowed to be +// out of range before being reinterpreted as an integer, this will provide the +// correct behavior. +func (n scriptNum) Int32() int32 { + if n > maxInt32 { + return maxInt32 + } + + if n < minInt32 { + return minInt32 + } + + return int32(n) +} + +// makeScriptNum interprets the passed serialized bytes as an encoded integer +// and returns the result as a script number. +// +// Since the consensus rules dictate that serialized bytes interpreted as ints +// are only allowed to be in the range determined by a maximum number of bytes, +// on a per opcode basis, an error will be returned when the provided bytes +// would result in a number outside of that range. In particular, the range for +// the vast majority of opcodes dealing with numeric values are limited to 4 +// bytes and therefore will pass that value to this function resulting in an +// allowed range of [-2^31 + 1, 2^31 - 1]. +// +// The scriptNumLen is the maximum number of bytes the encoded value can be +// before an ErrStackNumberTooBig is returned. This effectively limits the +// range of allowed values. +// WARNING: Great care should be taken if passing a value larger than +// defaultScriptNumLen, which could lead to addition and multiplication +// overflows. +// +// See the Bytes function documentation for example encodings. +func makeScriptNum(v []byte, scriptNumLen int) (scriptNum, error) { + // Interpreting data requires that it is not larger than + // the the passed scriptNumLen value. + if len(v) > scriptNumLen { + str := fmt.Sprintf("numeric value encoded as %x is %d bytes "+ + "which exceeds the max allowed of %d", v, len(v), + scriptNumLen) + return 0, scriptError(ErrNumberTooBig, str) + } + + // Disallow any numerical value larger than 8 bytes, so that it fits in int64. + if len(v) > 8 { + str := fmt.Sprintf("numeric value encoded as %x is longer than 8 bytes", v) + return 0, scriptError(ErrNumberTooBig, str) + } + + if err := checkMinimalDataEncoding(v); err != nil { + return 0, err + } + + // Zero is encoded as an empty byte slice. + if len(v) == 0 { + return 0, nil + } + + // Decode from little endian. + var result int64 + for i, val := range v { + result |= int64(val) << uint8(8*i) + } + + // When the most significant byte of the input bytes has the sign bit + // set, the result is negative. So, remove the sign bit from the result + // and make it negative. + if v[len(v)-1]&0x80 != 0 { + // The maximum length of v has already been determined to be 4 + // above, so uint8 is enough to cover the max possible shift + // value of 24. + result &= ^(int64(0x80) << uint8(8*(len(v)-1))) + return scriptNum(-result), nil + } + + return scriptNum(result), nil +} diff --git a/domain/consensus/utils/txscript/scriptnum_test.go b/domain/consensus/utils/txscript/scriptnum_test.go new file mode 100644 index 0000000..36bf7f3 --- /dev/null +++ b/domain/consensus/utils/txscript/scriptnum_test.go @@ -0,0 +1,255 @@ +// Copyright (c) 2015-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "encoding/hex" + "testing" +) + +// hexToBytes converts the passed hex string into bytes and will panic if there +// is an error. This is only provided for the hard-coded constants so errors in +// the source code can be detected. It will only (and must only) be called with +// hard-coded values. +func hexToBytes(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + return b +} + +// TestScriptNumBytes ensures that converting from integral script numbers to +// byte representations works as expected. +func TestScriptNumBytes(t *testing.T) { + t.Parallel() + + tests := []struct { + num scriptNum + serialized []byte + }{ + {0, nil}, + {1, hexToBytes("01")}, + {-1, hexToBytes("81")}, + {127, hexToBytes("7f")}, + {-127, hexToBytes("ff")}, + {128, hexToBytes("8000")}, + {-128, hexToBytes("8080")}, + {129, hexToBytes("8100")}, + {-129, hexToBytes("8180")}, + {256, hexToBytes("0001")}, + {-256, hexToBytes("0081")}, + {32767, hexToBytes("ff7f")}, + {-32767, hexToBytes("ffff")}, + {32768, hexToBytes("008000")}, + {-32768, hexToBytes("008080")}, + {65535, hexToBytes("ffff00")}, + {-65535, hexToBytes("ffff80")}, + {524288, hexToBytes("000008")}, + {-524288, hexToBytes("000088")}, + {7340032, hexToBytes("000070")}, + {-7340032, hexToBytes("0000f0")}, + {8388608, hexToBytes("00008000")}, + {-8388608, hexToBytes("00008080")}, + {2147483647, hexToBytes("ffffff7f")}, + {-2147483647, hexToBytes("ffffffff")}, + + // Values that are out of range for data that is interpreted as + // numbers, but are allowed as the result of numeric operations. + {2147483648, hexToBytes("0000008000")}, + {-2147483648, hexToBytes("0000008080")}, + {2415919104, hexToBytes("0000009000")}, + {-2415919104, hexToBytes("0000009080")}, + {4294967295, hexToBytes("ffffffff00")}, + {-4294967295, hexToBytes("ffffffff80")}, + {4294967296, hexToBytes("0000000001")}, + {-4294967296, hexToBytes("0000000081")}, + {281474976710655, hexToBytes("ffffffffffff00")}, + {-281474976710655, hexToBytes("ffffffffffff80")}, + {72057594037927935, hexToBytes("ffffffffffffff00")}, + {-72057594037927935, hexToBytes("ffffffffffffff80")}, + {9223372036854775807, hexToBytes("ffffffffffffff7f")}, + {-9223372036854775807, hexToBytes("ffffffffffffffff")}, + } + + for _, test := range tests { + gotBytes := test.num.Bytes() + if !bytes.Equal(gotBytes, test.serialized) { + t.Errorf("Bytes: did not get expected bytes for %d - "+ + "got %x, want %x", test.num, gotBytes, + test.serialized) + continue + } + } +} + +// TestMakeScriptNum ensures that converting from byte representations to +// integral script numbers works as expected. +func TestMakeScriptNum(t *testing.T) { + t.Parallel() + + // Errors used in the tests below defined here for convenience and to + // keep the horizontal test size shorter. + errNumTooBig := scriptError(ErrNumberTooBig, "") + errMinimalData := scriptError(ErrMinimalData, "") + + tests := []struct { + serialized []byte + num scriptNum + numLen int + err error + }{ + // Minimal encoding must reject negative 0. + {hexToBytes("80"), 0, defaultScriptNumLen, errMinimalData}, + + // Minimally encoded valid values with minimal encoding flag. + // Should not error and return expected integral number. + {nil, 0, defaultScriptNumLen, nil}, + {hexToBytes("01"), 1, defaultScriptNumLen, nil}, + {hexToBytes("81"), -1, defaultScriptNumLen, nil}, + {hexToBytes("7f"), 127, defaultScriptNumLen, nil}, + {hexToBytes("ff"), -127, defaultScriptNumLen, nil}, + {hexToBytes("8000"), 128, defaultScriptNumLen, nil}, + {hexToBytes("8080"), -128, defaultScriptNumLen, nil}, + {hexToBytes("8100"), 129, defaultScriptNumLen, nil}, + {hexToBytes("8180"), -129, defaultScriptNumLen, nil}, + {hexToBytes("0001"), 256, defaultScriptNumLen, nil}, + {hexToBytes("0081"), -256, defaultScriptNumLen, nil}, + {hexToBytes("ff7f"), 32767, defaultScriptNumLen, nil}, + {hexToBytes("ffff"), -32767, defaultScriptNumLen, nil}, + {hexToBytes("008000"), 32768, defaultScriptNumLen, nil}, + {hexToBytes("008080"), -32768, defaultScriptNumLen, nil}, + {hexToBytes("ffff00"), 65535, defaultScriptNumLen, nil}, + {hexToBytes("ffff80"), -65535, defaultScriptNumLen, nil}, + {hexToBytes("000008"), 524288, defaultScriptNumLen, nil}, + {hexToBytes("000088"), -524288, defaultScriptNumLen, nil}, + {hexToBytes("000070"), 7340032, defaultScriptNumLen, nil}, + {hexToBytes("0000f0"), -7340032, defaultScriptNumLen, nil}, + {hexToBytes("00008000"), 8388608, defaultScriptNumLen, nil}, + {hexToBytes("00008080"), -8388608, defaultScriptNumLen, nil}, + {hexToBytes("ffffff7f"), 2147483647, defaultScriptNumLen, nil}, + {hexToBytes("ffffffff"), -2147483647, defaultScriptNumLen, nil}, + {hexToBytes("ffffffff7f"), 549755813887, 5, nil}, + {hexToBytes("ffffffffff"), -549755813887, 5, nil}, + {hexToBytes("ffffffffffffff7f"), 9223372036854775807, 8, nil}, + {hexToBytes("ffffffffffffffff"), -9223372036854775807, 8, nil}, + + // Minimally encoded values that are out of range for data that + // is interpreted as script numbers with the minimal encoding + // flag set. Should error and return 0. + {hexToBytes("0000008000"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("0000008080"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("0000009000"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("0000009080"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffff00"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffff80"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("0000000001"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("0000000081"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffff00"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffff80"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffffff00"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffffff80"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffffff7f"), 0, defaultScriptNumLen, errNumTooBig}, + {hexToBytes("ffffffffffffffff"), 0, defaultScriptNumLen, errNumTooBig}, + + // Non-minimally encoded, but otherwise valid values with + // minimal encoding flag. Should error and return 0. + {hexToBytes("00"), 0, defaultScriptNumLen, errMinimalData}, // 0 + {hexToBytes("0100"), 0, defaultScriptNumLen, errMinimalData}, // 1 + {hexToBytes("7f00"), 0, defaultScriptNumLen, errMinimalData}, // 127 + {hexToBytes("800000"), 0, defaultScriptNumLen, errMinimalData}, // 128 + {hexToBytes("810000"), 0, defaultScriptNumLen, errMinimalData}, // 129 + {hexToBytes("000100"), 0, defaultScriptNumLen, errMinimalData}, // 256 + {hexToBytes("ff7f00"), 0, defaultScriptNumLen, errMinimalData}, // 32767 + {hexToBytes("00800000"), 0, defaultScriptNumLen, errMinimalData}, // 32768 + {hexToBytes("ffff0000"), 0, defaultScriptNumLen, errMinimalData}, // 65535 + {hexToBytes("00000800"), 0, defaultScriptNumLen, errMinimalData}, // 524288 + {hexToBytes("00007000"), 0, defaultScriptNumLen, errMinimalData}, // 7340032 + {hexToBytes("0009000100"), 0, 5, errMinimalData}, // 16779520 + // Values above 8 bytes should always return error + {hexToBytes("ffffffffffffffffff"), 0, 9, errNumTooBig}, + {hexToBytes("00000000000000000000"), 0, 10, errNumTooBig}, + } + + for _, test := range tests { + // Ensure the error code is of the expected type and the error + // code matches the value specified in the test instance. + gotNum, err := makeScriptNum(test.serialized, + test.numLen) + if e := checkScriptError(err, test.err); e != nil { + t.Errorf("makeScriptNum(%#x): %v", test.serialized, e) + continue + } + + if gotNum != test.num { + t.Errorf("makeScriptNum(%#x): did not get expected "+ + "number - got %d, want %d", test.serialized, + gotNum, test.num) + continue + } + } +} + +// TestScriptNumInt32 ensures that the Int32 function on script number behaves +// as expected. +func TestScriptNumInt32(t *testing.T) { + t.Parallel() + + tests := []struct { + in scriptNum + want int32 + }{ + // Values inside the valid int32 range are just the values + // themselves cast to an int32. + {0, 0}, + {1, 1}, + {-1, -1}, + {127, 127}, + {-127, -127}, + {128, 128}, + {-128, -128}, + {129, 129}, + {-129, -129}, + {256, 256}, + {-256, -256}, + {32767, 32767}, + {-32767, -32767}, + {32768, 32768}, + {-32768, -32768}, + {65535, 65535}, + {-65535, -65535}, + {524288, 524288}, + {-524288, -524288}, + {7340032, 7340032}, + {-7340032, -7340032}, + {8388608, 8388608}, + {-8388608, -8388608}, + {2147483647, 2147483647}, + {-2147483647, -2147483647}, + {-2147483648, -2147483648}, + + // Values outside of the valid int32 range are limited to int32. + {2147483648, 2147483647}, + {-2147483649, -2147483648}, + {1152921504606846975, 2147483647}, + {-1152921504606846975, -2147483648}, + {2305843009213693951, 2147483647}, + {-2305843009213693951, -2147483648}, + {4611686018427387903, 2147483647}, + {-4611686018427387903, -2147483648}, + {9223372036854775807, 2147483647}, + {-9223372036854775808, -2147483648}, + } + + for _, test := range tests { + got := test.in.Int32() + if got != test.want { + t.Errorf("Int32: did not get expected value for %d - "+ + "got %d, want %d", test.in, got, test.want) + continue + } + } +} diff --git a/domain/consensus/utils/txscript/sigcache.go b/domain/consensus/utils/txscript/sigcache.go new file mode 100644 index 0000000..3bab97b --- /dev/null +++ b/domain/consensus/utils/txscript/sigcache.go @@ -0,0 +1,90 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "github.com/spectre-project/go-secp256k1" +) + +// sigCacheEntry represents an entry in the SigCache. Entries within the +// SigCache are keyed according to the sigHash of the signature. In the +// scenario of a cache-hit (according to the sigHash), an additional comparison +// of the signature, and public key will be executed in order to ensure a complete +// match. In the occasion that two sigHashes collide, the newer sigHash will +// simply overwrite the existing entry. +type sigCacheEntry struct { + sig *secp256k1.SchnorrSignature + pubKey *secp256k1.SchnorrPublicKey +} + +// SigCache implements an Schnorr signature verification cache with a randomized +// entry eviction policy. Only valid signatures will be added to the cache. The +// benefits of SigCache are two fold. Firstly, usage of SigCache mitigates a DoS +// attack wherein an attack causes a victim's client to hang due to worst-case +// behavior triggered while processing attacker crafted invalid transactions. A +// detailed description of the mitigated DoS attack can be found here: +// https://bitslog.wordpress.com/2013/01/23/fixed-bitcoin-vulnerability-explanation-why-the-signature-cache-is-a-dos-protection/. +// Secondly, usage of the SigCache introduces a signature verification +// optimization which speeds up the validation of transactions within a block, +// if they've already been seen and verified within the mempool. +type SigCache struct { + validSigs map[secp256k1.Hash]sigCacheEntry + maxEntries uint +} + +// NewSigCache creates and initializes a new instance of SigCache. Its sole +// parameter 'maxEntries' represents the maximum number of entries allowed to +// exist in the SigCache at any particular moment. Random entries are evicted +// to make room for new entries that would cause the number of entries in the +// cache to exceed the max. +func NewSigCache(maxEntries uint) *SigCache { + return &SigCache{ + validSigs: make(map[secp256k1.Hash]sigCacheEntry, maxEntries), + maxEntries: maxEntries, + } +} + +// Exists returns true if an existing entry of 'sig' over 'sigHash' for public +// key 'pubKey' is found within the SigCache. Otherwise, false is returned. +// +// NOTE: This function is safe for concurrent access. Readers won't be blocked +// unless there exists a writer, adding an entry to the SigCache. +func (s *SigCache) Exists(sigHash secp256k1.Hash, sig *secp256k1.SchnorrSignature, pubKey *secp256k1.SchnorrPublicKey) bool { + entry, ok := s.validSigs[sigHash] + + return ok && entry.pubKey.IsEqual(pubKey) && entry.sig.IsEqual(sig) +} + +// Add adds an entry for a signature over 'sigHash' under public key 'pubKey' +// to the signature cache. In the event that the SigCache is 'full', an +// existing entry is randomly chosen to be evicted in order to make space for +// the new entry. +// +// NOTE: This function is safe for concurrent access. Writers will block +// simultaneous readers until function execution has concluded. +func (s *SigCache) Add(sigHash secp256k1.Hash, sig *secp256k1.SchnorrSignature, pubKey *secp256k1.SchnorrPublicKey) { + if s.maxEntries == 0 { + return + } + + // If adding this new entry will put us over the max number of allowed + // entries, then evict an entry. + if uint(len(s.validSigs)+1) > s.maxEntries { + // Remove a random entry from the map. Relying on the random + // starting point of Go's map iteration. It's worth noting that + // the random iteration starting point is not 100% guaranteed + // by the spec, however most Go compilers support it. + // Ultimately, the iteration order isn't important here because + // in order to manipulate which items are evicted, an adversary + // would need to be able to execute preimage attacks on the + // hashing function in order to start eviction at a specific + // entry. + for sigEntry := range s.validSigs { + delete(s.validSigs, sigEntry) + break + } + } + s.validSigs[sigHash] = sigCacheEntry{sig, pubKey} +} diff --git a/domain/consensus/utils/txscript/sigcache_ecdsa.go b/domain/consensus/utils/txscript/sigcache_ecdsa.go new file mode 100644 index 0000000..0b6d29f --- /dev/null +++ b/domain/consensus/utils/txscript/sigcache_ecdsa.go @@ -0,0 +1,90 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "github.com/spectre-project/go-secp256k1" +) + +// sigCacheEntryECDSA represents an entry in the SigCache. Entries within the +// SigCache are keyed according to the sigHash of the signature. In the +// scenario of a cache-hit (according to the sigHash), an additional comparison +// of the signature, and public key will be executed in order to ensure a complete +// match. In the occasion that two sigHashes collide, the newer sigHash will +// simply overwrite the existing entry. +type sigCacheEntryECDSA struct { + sig *secp256k1.ECDSASignature + pubKey *secp256k1.ECDSAPublicKey +} + +// SigCacheECDSA implements an ECDSA signature verification cache with a randomized +// entry eviction policy. Only valid signatures will be added to the cache. The +// benefits of SigCache are two fold. Firstly, usage of SigCache mitigates a DoS +// attack wherein an attack causes a victim's client to hang due to worst-case +// behavior triggered while processing attacker crafted invalid transactions. A +// detailed description of the mitigated DoS attack can be found here: +// https://bitslog.wordpress.com/2013/01/23/fixed-bitcoin-vulnerability-explanation-why-the-signature-cache-is-a-dos-protection/. +// Secondly, usage of the SigCache introduces a signature verification +// optimization which speeds up the validation of transactions within a block, +// if they've already been seen and verified within the mempool. +type SigCacheECDSA struct { + validSigs map[secp256k1.Hash]sigCacheEntryECDSA + maxEntries uint +} + +// NewSigCacheECDSA creates and initializes a new instance of SigCache. Its sole +// parameter 'maxEntries' represents the maximum number of entries allowed to +// exist in the SigCache at any particular moment. Random entries are evicted +// to make room for new entries that would cause the number of entries in the +// cache to exceed the max. +func NewSigCacheECDSA(maxEntries uint) *SigCacheECDSA { + return &SigCacheECDSA{ + validSigs: make(map[secp256k1.Hash]sigCacheEntryECDSA, maxEntries), + maxEntries: maxEntries, + } +} + +// Exists returns true if an existing entry of 'sig' over 'sigHash' for public +// key 'pubKey' is found within the SigCache. Otherwise, false is returned. +// +// NOTE: This function is safe for concurrent access. Readers won't be blocked +// unless there exists a writer, adding an entry to the SigCache. +func (s *SigCacheECDSA) Exists(sigHash secp256k1.Hash, sig *secp256k1.ECDSASignature, pubKey *secp256k1.ECDSAPublicKey) bool { + entry, ok := s.validSigs[sigHash] + + return ok && entry.pubKey.IsEqual(pubKey) && entry.sig.IsEqual(sig) +} + +// Add adds an entry for a signature over 'sigHash' under public key 'pubKey' +// to the signature cache. In the event that the SigCache is 'full', an +// existing entry is randomly chosen to be evicted in order to make space for +// the new entry. +// +// NOTE: This function is safe for concurrent access. Writers will block +// simultaneous readers until function execution has concluded. +func (s *SigCacheECDSA) Add(sigHash secp256k1.Hash, sig *secp256k1.ECDSASignature, pubKey *secp256k1.ECDSAPublicKey) { + if s.maxEntries == 0 { + return + } + + // If adding this new entry will put us over the max number of allowed + // entries, then evict an entry. + if uint(len(s.validSigs)+1) > s.maxEntries { + // Remove a random entry from the map. Relying on the random + // starting point of Go's map iteration. It's worth noting that + // the random iteration starting point is not 100% guaranteed + // by the spec, however most Go compilers support it. + // Ultimately, the iteration order isn't important here because + // in order to manipulate which items are evicted, an adversary + // would need to be able to execute preimage attacks on the + // hashing function in order to start eviction at a specific + // entry. + for sigEntry := range s.validSigs { + delete(s.validSigs, sigEntry) + break + } + } + s.validSigs[sigHash] = sigCacheEntryECDSA{sig, pubKey} +} diff --git a/domain/consensus/utils/txscript/sigcache_test.go b/domain/consensus/utils/txscript/sigcache_test.go new file mode 100644 index 0000000..cf694b5 --- /dev/null +++ b/domain/consensus/utils/txscript/sigcache_test.go @@ -0,0 +1,148 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "crypto/rand" + "testing" + + "github.com/spectre-project/go-secp256k1" +) + +// genRandomSig returns a random message, a signature of the message under the +// public key and the public key. This function is used to generate randomized +// test data. +func genRandomSig() (*secp256k1.Hash, *secp256k1.SchnorrSignature, *secp256k1.SchnorrPublicKey, error) { + privKey, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + return nil, nil, nil, err + } + + msgHash := &secp256k1.Hash{} + if _, err := rand.Read(msgHash[:]); err != nil { + return nil, nil, nil, err + } + + sig, err := privKey.SchnorrSign(msgHash) + if err != nil { + return nil, nil, nil, err + } + + pubkey, err := privKey.SchnorrPublicKey() + if err != nil { + return nil, nil, nil, err + } + + return msgHash, sig, pubkey, nil +} + +// TestSigCacheAddExists tests the ability to add, and later check the +// existence of a signature triplet in the signature cache. +func TestSigCacheAddExists(t *testing.T) { + sigCache := NewSigCache(200) + + // Generate a random sigCache entry triplet. + msg1, sig1, key1, err := genRandomSig() + if err != nil { + t.Fatalf("unable to generate random signature test data") + } + + // Add the triplet to the signature cache. + sigCache.Add(*msg1, sig1, key1) + + // The previously added triplet should now be found within the sigcache. + sig1Copy := secp256k1.DeserializeSchnorrSignature(sig1.Serialize()) + key1Serialized, _ := key1.Serialize() + key1Copy, _ := secp256k1.DeserializeSchnorrPubKey(key1Serialized[:]) + if !sigCache.Exists(*msg1, sig1Copy, key1Copy) { + t.Errorf("previously added item not found in signature cache") + } +} + +// TestSigCacheAddEvictEntry tests the eviction case where a new signature +// triplet is added to a full signature cache which should trigger randomized +// eviction, followed by adding the new element to the cache. +func TestSigCacheAddEvictEntry(t *testing.T) { + // Create a sigcache that can hold up to 100 entries. + sigCacheSize := uint(100) + sigCache := NewSigCache(sigCacheSize) + + // Fill the sigcache up with some random sig triplets. + for i := uint(0); i < sigCacheSize; i++ { + msg, sig, key, err := genRandomSig() + if err != nil { + t.Fatalf("unable to generate random signature test data") + } + + sigCache.Add(*msg, sig, key) + + sigCopy := secp256k1.DeserializeSchnorrSignature(sig.Serialize()) + keySerialized, _ := key.Serialize() + keyCopy, _ := secp256k1.DeserializeSchnorrPubKey(keySerialized[:]) + if !sigCache.Exists(*msg, sigCopy, keyCopy) { + t.Errorf("previously added item not found in signature" + + "cache") + } + } + + // The sigcache should now have sigCacheSize entries within it. + if uint(len(sigCache.validSigs)) != sigCacheSize { + t.Fatalf("sigcache should now have %v entries, instead it has %v", + sigCacheSize, len(sigCache.validSigs)) + } + + // Add a new entry, this should cause eviction of a randomly chosen + // previous entry. + msgNew, sigNew, keyNew, err := genRandomSig() + if err != nil { + t.Fatalf("unable to generate random signature test data") + } + sigCache.Add(*msgNew, sigNew, keyNew) + + // The sigcache should still have sigCache entries. + if uint(len(sigCache.validSigs)) != sigCacheSize { + t.Fatalf("sigcache should now have %v entries, instead it has %v", + sigCacheSize, len(sigCache.validSigs)) + } + + // The entry added above should be found within the sigcache. + sigNewCopy := secp256k1.DeserializeSchnorrSignature(sigNew.Serialize()) + keyNewSerialized, _ := keyNew.Serialize() + keyNewCopy, _ := secp256k1.DeserializeSchnorrPubKey(keyNewSerialized[:]) + if !sigCache.Exists(*msgNew, sigNewCopy, keyNewCopy) { + t.Fatalf("previously added item not found in signature cache") + } +} + +// TestSigCacheAddMaxEntriesZeroOrNegative tests that if a sigCache is created +// with a max size <= 0, then no entries are added to the sigcache at all. +func TestSigCacheAddMaxEntriesZeroOrNegative(t *testing.T) { + // Create a sigcache that can hold up to 0 entries. + sigCache := NewSigCache(0) + + // Generate a random sigCache entry triplet. + msg1, sig1, key1, err := genRandomSig() + if err != nil { + t.Fatalf("unable to generate random signature test data") + } + + // Add the triplet to the signature cache. + sigCache.Add(*msg1, sig1, key1) + + // The generated triplet should not be found. + sig1Copy := secp256k1.DeserializeSchnorrSignature(sig1.Serialize()) + key1Serialized, _ := key1.Serialize() + key1Copy, _ := secp256k1.DeserializeSchnorrPubKey(key1Serialized[:]) + if sigCache.Exists(*msg1, sig1Copy, key1Copy) { + t.Errorf("previously added signature found in sigcache, but" + + "shouldn't have been") + } + + // There shouldn't be any entries in the sigCache. + if len(sigCache.validSigs) != 0 { + t.Errorf("%v items found in sigcache, no items should have"+ + "been added", len(sigCache.validSigs)) + } +} diff --git a/domain/consensus/utils/txscript/sign.go b/domain/consensus/utils/txscript/sign.go new file mode 100644 index 0000000..c8aacd0 --- /dev/null +++ b/domain/consensus/utils/txscript/sign.go @@ -0,0 +1,261 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +// RawTxInSignature returns the serialized Schnorr signature for the input idx of +// the given transaction, with hashType appended to it. +func RawTxInSignature(tx *externalapi.DomainTransaction, idx int, hashType consensushashing.SigHashType, + key *secp256k1.SchnorrKeyPair, sighashReusedValues *consensushashing.SighashReusedValues) ([]byte, error) { + + hash, err := consensushashing.CalculateSignatureHashSchnorr(tx, idx, hashType, sighashReusedValues) + if err != nil { + return nil, err + } + secpHash := secp256k1.Hash(*hash.ByteArray()) + signature, err := key.SchnorrSign(&secpHash) + if err != nil { + return nil, errors.Errorf("cannot sign tx input: %s", err) + } + + return append(signature.Serialize()[:], byte(hashType)), nil +} + +// RawTxInSignatureECDSA returns the serialized ECDSA signature for the input idx of +// the given transaction, with hashType appended to it. +func RawTxInSignatureECDSA(tx *externalapi.DomainTransaction, idx int, hashType consensushashing.SigHashType, + key *secp256k1.ECDSAPrivateKey, sighashReusedValues *consensushashing.SighashReusedValues) ([]byte, error) { + + hash, err := consensushashing.CalculateSignatureHashECDSA(tx, idx, hashType, sighashReusedValues) + if err != nil { + return nil, err + } + secpHash := secp256k1.Hash(*hash.ByteArray()) + signature, err := key.ECDSASign(&secpHash) + if err != nil { + return nil, errors.Errorf("cannot sign tx input: %s", err) + } + + return append(signature.Serialize()[:], byte(hashType)), nil +} + +// SignatureScript creates an input signature script for tx to spend SPR sent +// from a previous output to the owner of a Schnorr private key. tx must include all +// transaction inputs and outputs, however txin scripts are allowed to be filled +// or empty. The returned script is calculated to be used as the idx'th txin +// sigscript for tx. script is the ScriptPublicKey of the previous output being used +// as the idx'th input. privKey is serialized in either a compressed or +// uncompressed format based on compress. This format must match the same format +// used to generate the payment address, or the script validation will fail. +func SignatureScript(tx *externalapi.DomainTransaction, idx int, hashType consensushashing.SigHashType, + privKey *secp256k1.SchnorrKeyPair, sighashReusedValues *consensushashing.SighashReusedValues) ([]byte, error) { + + sig, err := RawTxInSignature(tx, idx, hashType, privKey, sighashReusedValues) + if err != nil { + return nil, err + } + + return NewScriptBuilder().AddData(sig).Script() +} + +// SignatureScriptECDSA creates an input signature script for tx to spend SPR sent +// from a previous output to the owner of an ECDSA private key. tx must include all +// transaction inputs and outputs, however txin scripts are allowed to be filled +// or empty. The returned script is calculated to be used as the idx'th txin +// sigscript for tx. script is the ScriptPublicKey of the previous output being used +// as the idx'th input. privKey is serialized in either a compressed or +// uncompressed format based on compress. This format must match the same format +// used to generate the payment address, or the script validation will fail. +func SignatureScriptECDSA(tx *externalapi.DomainTransaction, idx int, hashType consensushashing.SigHashType, + privKey *secp256k1.ECDSAPrivateKey, sighashReusedValues *consensushashing.SighashReusedValues) ([]byte, error) { + + sig, err := RawTxInSignatureECDSA(tx, idx, hashType, privKey, sighashReusedValues) + if err != nil { + return nil, err + } + + return NewScriptBuilder().AddData(sig).Script() +} + +func sign(dagParams *dagconfig.Params, tx *externalapi.DomainTransaction, idx int, + script *externalapi.ScriptPublicKey, hashType consensushashing.SigHashType, + sighashReusedValues *consensushashing.SighashReusedValues, kdb KeyDB, sdb ScriptDB) ( + []byte, ScriptClass, util.Address, error) { + + class, address, err := ExtractScriptPubKeyAddress(script, dagParams) + if err != nil { + return nil, NonStandardTy, nil, err + } + + switch class { + case PubKeyTy: + // look up key for address + key, err := kdb.GetKey(address) + if err != nil { + return nil, class, nil, err + } + + signedScript, err := SignatureScript(tx, idx, hashType, key, sighashReusedValues) + if err != nil { + return nil, class, nil, err + } + + return signedScript, class, address, nil + case ScriptHashTy: + script, err := sdb.GetScript(address) + if err != nil { + return nil, class, nil, err + } + + return script, class, address, nil + default: + return nil, class, nil, errors.New("can't sign unknown transactions") + } +} + +// mergeScripts merges sigScript and prevScript assuming they are both +// partial solutions for scriptPubKey spending output idx of tx. class, addresses +// and nrequired are the result of extracting the addresses from scriptPubKey. +// The return value is the best effort merging of the two scripts. Calling this +// function with addresses, class and nrequired that do not match scriptPubKey is +// an error and results in undefined behaviour. +func mergeScripts(dagParams *dagconfig.Params, tx *externalapi.DomainTransaction, idx int, + class ScriptClass, sigScript []byte, prevScript *externalapi.ScriptPublicKey) ([]byte, error) { + + switch class { + case ScriptHashTy: + // Remove the last push in the script and then recurse. + // this could be a lot less inefficient. + sigPops, err := parseScript(sigScript) + if err != nil || len(sigPops) == 0 { + return prevScript.Script, nil + } + prevPops, err := parseScript(prevScript.Script) + if err != nil || len(prevPops) == 0 { + return sigScript, nil + } + + // assume that script in sigPops is the correct one, we just + // made it. + script := sigPops[len(sigPops)-1].data + scriptPubKey := &externalapi.ScriptPublicKey{ + Script: script, + Version: prevScript.Version, + } + // We already know this information somewhere up the stack. + class, _, _ := + ExtractScriptPubKeyAddress(scriptPubKey, dagParams) + + // regenerate scripts. + sigScript, _ := unparseScript(sigPops) + prevScriptByte, _ := unparseScript(prevPops) + prevScript = &externalapi.ScriptPublicKey{ + Script: prevScriptByte, + Version: prevScript.Version, + } + // Merge + mergedScript, err := mergeScripts(dagParams, tx, idx, class, sigScript, prevScript) + if err != nil { + return nil, err + } + + // Reappend the script and return the result. + builder := NewScriptBuilder() + builder.AddOps(mergedScript) + builder.AddData(script) + return builder.Script() + + // It doesn't actually make sense to merge anything other than multiig + // and scripthash (because it could contain multisig). Everything else + // has either zero signature, can't be spent, or has a single signature + // which is either present or not. The other two cases are handled + // above. In the conflict case here we just assume the longest is + // correct (this matches behaviour of the reference implementation). + default: + if len(sigScript) > len(prevScript.Script) { + return sigScript, nil + } + return prevScript.Script, nil + } +} + +// KeyDB is an interface type provided to SignTxOutput, it encapsulates +// any user state required to get the private keys for an address. +type KeyDB interface { + GetKey(util.Address) (*secp256k1.SchnorrKeyPair, error) +} + +// KeyClosure implements KeyDB with a closure. +type KeyClosure func(util.Address) (*secp256k1.SchnorrKeyPair, error) + +// GetKey implements KeyDB by returning the result of calling the closure. +func (kc KeyClosure) GetKey(address util.Address) (*secp256k1.SchnorrKeyPair, error) { + return kc(address) +} + +// ScriptDB is an interface type provided to SignTxOutput, it encapsulates any +// user state required to get the scripts for an pay-to-script-hash address. +type ScriptDB interface { + GetScript(util.Address) ([]byte, error) +} + +// ScriptClosure implements ScriptDB with a closure. +type ScriptClosure func(util.Address) ([]byte, error) + +// GetScript implements ScriptDB by returning the result of calling the closure. +func (sc ScriptClosure) GetScript(address util.Address) ([]byte, error) { + return sc(address) +} + +// SignTxOutput signs output idx of the given tx to resolve the script given in +// scriptPublicKey with a signature type of hashType. Any keys required will be +// looked up by calling getKey() with the string of the given address. +// Any pay-to-script-hash signatures will be similarly looked up by calling +// getScript. If previousScript is provided then the results in previousScript +// will be merged in a type-dependent manner with the newly generated. +// signature script. +func SignTxOutput(dagParams *dagconfig.Params, tx *externalapi.DomainTransaction, idx int, + scriptPublicKey *externalapi.ScriptPublicKey, hashType consensushashing.SigHashType, + sighashReusedValues *consensushashing.SighashReusedValues, kdb KeyDB, sdb ScriptDB, + previousScript *externalapi.ScriptPublicKey) ([]byte, error) { + + sigScript, class, _, err := sign(dagParams, tx, + idx, scriptPublicKey, hashType, sighashReusedValues, kdb, sdb) + if err != nil { + return nil, err + } + + if class == ScriptHashTy { + scriptHashPreimageScriptPublicKey := &externalapi.ScriptPublicKey{ + Script: sigScript, + Version: scriptPublicKey.Version, + } + + realSigScript, _, _, err := sign(dagParams, tx, idx, + scriptHashPreimageScriptPublicKey, hashType, sighashReusedValues, kdb, sdb) + if err != nil { + return nil, err + } + + // Append the p2sh script as the last push in the script. + builder := NewScriptBuilder() + builder.AddOps(realSigScript) + builder.AddData(sigScript) + + sigScript, _ = builder.Script() + } + + // Merge scripts. with any previous data, if any. + return mergeScripts(dagParams, tx, idx, class, sigScript, previousScript) +} diff --git a/domain/consensus/utils/txscript/sign_test.go b/domain/consensus/utils/txscript/sign_test.go new file mode 100644 index 0000000..c0fd7f5 --- /dev/null +++ b/domain/consensus/utils/txscript/sign_test.go @@ -0,0 +1,893 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "fmt" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +func mkGetKey(keys map[string]*secp256k1.SchnorrKeyPair) KeyDB { + if keys == nil { + return KeyClosure(func(addr util.Address) (*secp256k1.SchnorrKeyPair, error) { + return nil, errors.New("nope") + }) + } + return KeyClosure(func(addr util.Address) (*secp256k1.SchnorrKeyPair, error) { + key, ok := keys[addr.EncodeAddress()] + if !ok { + return nil, errors.New("nope") + } + return key, nil + }) +} + +func mkGetScript(scripts map[string][]byte) ScriptDB { + if scripts == nil { + return ScriptClosure(func(addr util.Address) ([]byte, error) { + return nil, errors.New("nope") + }) + } + return ScriptClosure(func(addr util.Address) ([]byte, error) { + script, ok := scripts[addr.EncodeAddress()] + if !ok { + return nil, errors.New("nope") + } + return script, nil + }) +} + +func checkScripts(msg string, tx *externalapi.DomainTransaction, idx int, sigScript []byte, scriptPubKey *externalapi.ScriptPublicKey) error { + tx.Inputs[idx].SignatureScript = sigScript + var flags ScriptFlags + vm, err := NewEngine(scriptPubKey, tx, idx, + flags, nil, nil, &consensushashing.SighashReusedValues{}) + if err != nil { + return errors.Errorf("failed to make script engine for %s: %v", + msg, err) + } + + err = vm.Execute() + if err != nil { + return errors.Errorf("invalid script signature for %s: %v", msg, + err) + } + + return nil +} + +func signAndCheck(msg string, tx *externalapi.DomainTransaction, idx int, scriptPubKey *externalapi.ScriptPublicKey, + hashType consensushashing.SigHashType, kdb KeyDB, sdb ScriptDB) error { + + sigScript, err := SignTxOutput(&dagconfig.TestnetParams, tx, idx, + scriptPubKey, hashType, &consensushashing.SighashReusedValues{}, kdb, sdb, + &externalapi.ScriptPublicKey{Script: nil, Version: 0}) + if err != nil { + return errors.Errorf("failed to sign output %s: %v", msg, err) + } + + return checkScripts(msg, tx, idx, sigScript, scriptPubKey) +} + +func TestSignTxOutput(t *testing.T) { + t.Parallel() + + // make key + // make script based on key. + // sign with magic pixie dust. + hashTypes := []consensushashing.SigHashType{ + consensushashing.SigHashAll, + consensushashing.SigHashNone, + consensushashing.SigHashSingle, + consensushashing.SigHashAll | consensushashing.SigHashAnyOneCanPay, + consensushashing.SigHashNone | consensushashing.SigHashAnyOneCanPay, + consensushashing.SigHashSingle | consensushashing.SigHashAnyOneCanPay, + } + inputs := []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: 0, + }, + Sequence: 4294967295, + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: 1, + }, + Sequence: 4294967295, + }, + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: externalapi.DomainTransactionID{}, + Index: 2, + }, + Sequence: 4294967295, + }, + } + outputs := []*externalapi.DomainTransactionOutput{ + { + Value: 1, + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + }, + { + Value: 2, + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + }, + { + Value: 3, + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + }, + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: inputs, + Outputs: outputs, + } + + key, scriptPubKey, address, err := generateKeys() + if err != nil { + t.Fatal(err) + } + // Pay to Pubkey (merging with correct) + for _, hashType := range hashTypes { + for _, input := range tx.Inputs { + input.UTXOEntry = utxo.NewUTXOEntry(500, scriptPubKey, false, 100) + } + for i := range tx.Inputs { + msg := fmt.Sprintf("%d:%d", hashType, i) + + sigScript, err := SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(nil), &externalapi.ScriptPublicKey{Script: nil, Version: 0}) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(nil), &externalapi.ScriptPublicKey{ + Script: sigScript, + Version: scriptPubKey.Version, + }) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPubKey) + if err != nil { + t.Fatalf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } + } + } + + // Pay to Pubkey + for _, hashType := range hashTypes { + for i := range tx.Inputs { + msg := fmt.Sprintf("%d:%d", hashType, i) + + key, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Errorf("failed to make privKey for %s: %s", + msg, err) + break + } + + pubKey, err := key.SchnorrPublicKey() + if err != nil { + t.Errorf("failed to make a publickey for %s: %s", + key, err) + break + } + + serializedPubKey, err := pubKey.Serialize() + if err != nil { + t.Errorf("failed to make a pubkey for %s: %s", + key, err) + break + } + + address, err := util.NewAddressPublicKey(serializedPubKey[:], util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + scriptPubKey, err := PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make scriptPubKey "+ + "for %s: %v", msg, err) + } + err = signAndCheck(msg, tx, i, scriptPubKey, hashType, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), + mkGetScript(nil)) + if err != nil { + t.Error(err) + break + } + } + } + + // Pay to Pubkey with duplicate merge + for _, hashType := range hashTypes { + for i := range tx.Inputs { + msg := fmt.Sprintf("%d:%d", hashType, i) + + key, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Errorf("failed to make privKey for %s: %s", + msg, err) + break + } + + pubKey, err := key.SchnorrPublicKey() + if err != nil { + t.Errorf("failed to make a publickey for %s: %s", + key, err) + break + } + + serializedPubKey, err := pubKey.Serialize() + if err != nil { + t.Errorf("failed to make a pubkey for %s: %s", + key, err) + break + } + + address, err := util.NewAddressPublicKey(serializedPubKey[:], util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + scriptPubKey, err := PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make scriptPubKey "+ + "for %s: %v", msg, err) + } + + sigScript, err := SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(nil), &externalapi.ScriptPublicKey{Script: nil, Version: 0}) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err = SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(nil), &externalapi.ScriptPublicKey{ + Script: sigScript, + Version: scriptPubKey.Version, + }) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptPubKey) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } + } + } + + // As before, but with p2sh now. + + // Pay to Pubkey + for _, hashType := range hashTypes { + for i := range tx.Inputs { + msg := fmt.Sprintf("%d:%d", hashType, i) + + key, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Errorf("failed to make privKey for %s: %s", + msg, err) + break + } + + pubKey, err := key.SchnorrPublicKey() + if err != nil { + t.Errorf("failed to make a publickey for %s: %s", + key, err) + break + } + + serializedPubKey, err := pubKey.Serialize() + if err != nil { + t.Errorf("failed to make a pubkey for %s: %s", + key, err) + break + } + + address, err := util.NewAddressPublicKey(serializedPubKey[:], util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + scriptPubKey, err := PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make scriptPubKey "+ + "for %s: %v", msg, err) + } + + scriptAddr, err := util.NewAddressScriptHash( + scriptPubKey.Script, util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } + + scriptScriptPubKey, err := PayToAddrScript(scriptAddr) + if err != nil { + t.Errorf("failed to make script scriptPubKey for "+ + "%s: %v", msg, err) + break + } + + err = signAndCheck(msg, tx, i, scriptScriptPubKey, hashType, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{address.EncodeAddress(): key}), + mkGetScript(map[string][]byte{scriptAddr.EncodeAddress(): scriptPubKey.Script})) + if err != nil { + t.Error(err) + break + } + } + } + + // Pay to Pubkey with duplicate merge + for _, hashType := range hashTypes { + for i := range tx.Inputs { + msg := fmt.Sprintf("%d:%d", hashType, i) + + key, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + t.Errorf("failed to make privKey for %s: %s", + msg, err) + break + } + + pubKey, err := key.SchnorrPublicKey() + if err != nil { + t.Errorf("failed to make a publickey for %s: %s", + key, err) + break + } + + serializedPubKey, err := pubKey.Serialize() + if err != nil { + t.Errorf("failed to make a pubkey for %s: %s", + key, err) + break + } + + address, err := util.NewAddressPublicKey(serializedPubKey[:], util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make address for %s: %v", + msg, err) + break + } + + scriptPubKey, err := PayToAddrScript(address) + if err != nil { + t.Errorf("failed to make scriptPubKey "+ + "for %s: %v", msg, err) + } + + scriptAddr, err := util.NewAddressScriptHash( + scriptPubKey.Script, util.Bech32PrefixSpectreTest) + if err != nil { + t.Errorf("failed to make p2sh addr for %s: %v", + msg, err) + break + } + + scriptScriptPubKey, err := PayToAddrScript(scriptAddr) + if err != nil { + t.Errorf("failed to make script scriptPubKey for "+ + "%s: %v", msg, err) + break + } + _, err = SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptScriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): scriptPubKey.Script, + }), &externalapi.ScriptPublicKey{Script: nil, Version: 0}) + if err != nil { + t.Errorf("failed to sign output %s: %v", msg, + err) + break + } + + // by the above loop, this should be valid, now sign + // again and merge. + sigScript, err := SignTxOutput(&dagconfig.TestnetParams, + tx, i, scriptScriptPubKey, hashType, &consensushashing.SighashReusedValues{}, + mkGetKey(map[string]*secp256k1.SchnorrKeyPair{ + address.EncodeAddress(): key, + }), mkGetScript(map[string][]byte{ + scriptAddr.EncodeAddress(): scriptPubKey.Script, + }), &externalapi.ScriptPublicKey{Script: nil, Version: 0}) + if err != nil { + t.Errorf("failed to sign output %s a "+ + "second time: %v", msg, err) + break + } + + err = checkScripts(msg, tx, i, sigScript, scriptScriptPubKey) + if err != nil { + t.Errorf("twice signed script invalid for "+ + "%s: %v", msg, err) + break + } + } + } +} + +func generateKeys() (keyPair *secp256k1.SchnorrKeyPair, scriptPublicKey *externalapi.ScriptPublicKey, + addressPubKeyHash *util.AddressPublicKey, err error) { + + key, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + return nil, nil, nil, errors.Errorf("failed to make privKey: %s", err) + } + + pubKey, err := key.SchnorrPublicKey() + if err != nil { + return nil, nil, nil, errors.Errorf("failed to make a publickey for %s: %s", key, err) + } + + serializedPubKey, err := pubKey.Serialize() + if err != nil { + return nil, nil, nil, errors.Errorf("failed to serialize a pubkey for %s: %s", pubKey, err) + } + address, err := util.NewAddressPublicKey(serializedPubKey[:], util.Bech32PrefixSpectreTest) + if err != nil { + return nil, nil, nil, errors.Errorf("failed to make address for %s: %s", serializedPubKey, err) + } + + scriptPubKey, err := PayToAddrScript(address) + if err != nil { + return nil, nil, nil, errors.Errorf("failed to make scriptPubKey for %s: %s", address, err) + } + return key, scriptPubKey, address, err +} + +type tstInput struct { + txout *externalapi.DomainTransactionOutput + sigscriptGenerates bool + inputValidates bool + indexOutOfRange bool +} + +type tstSigScript struct { + name string + inputs []tstInput + hashType consensushashing.SigHashType + scriptAtWrongIndex bool +} + +var coinbaseOutpoint = &externalapi.DomainOutpoint{ + Index: (1 << 32) - 1, +} + +// Pregenerated private key, with associated public key and scriptPubKeys +// for the uncompressed and compressed hash160. +var ( + privKeyD = secp256k1.SerializedPrivateKey{0x6b, 0x0f, 0xd8, 0xda, 0x54, 0x22, 0xd0, 0xb7, + 0xb4, 0xfc, 0x4e, 0x55, 0xd4, 0x88, 0x42, 0xb3, 0xa1, 0x65, + 0xac, 0x70, 0x7f, 0x3d, 0xa4, 0x39, 0x5e, 0xcb, 0x3b, 0xb0, + 0xd6, 0x0e, 0x06, 0x92} + oldUncompressedScriptPubKey = &externalapi.ScriptPublicKey{[]byte{0x76, 0xa9, 0x14, 0xd1, 0x7c, 0xb5, + 0xeb, 0xa4, 0x02, 0xcb, 0x68, 0xe0, 0x69, 0x56, 0xbf, 0x32, + 0x53, 0x90, 0x0e, 0x0a, 0x86, 0xc9, 0xfa, 0x88, 0xac}, 0} + oldCompressedScriptPubKey = &externalapi.ScriptPublicKey{[]byte{0x76, 0xa9, 0x14, 0x27, 0x4d, 0x9f, 0x7f, + 0x61, 0x7e, 0x7c, 0x7a, 0x1c, 0x1f, 0xb2, 0x75, 0x79, 0x10, + 0x43, 0x65, 0x68, 0x27, 0x9d, 0x86, 0x88, 0xac}, 0} + p2pkScriptPubKey = &externalapi.ScriptPublicKey{[]byte{0x20, 0xb2, 0x52, 0xf0, 0x49, 0x85, 0x78, 0x03, 0x03, + 0xc8, 0x7d, 0xce, 0x51, 0x7f, 0xa8, 0x69, 0x0b, + 0x91, 0x95, 0xf4, 0xf3, 0x5c, 0x26, 0x73, 0x05, + 0x05, 0xa2, 0xee, 0xbc, 0x09, 0x38, 0x34, 0x3a, 0xac}, 0} + shortScriptPubKey = &externalapi.ScriptPublicKey{[]byte{0x76, 0xa9, 0x14, 0xd1, 0x7c, 0xb5, + 0xeb, 0xa4, 0x02, 0xcb, 0x68, 0xe0, 0x69, 0x56, 0xbf, 0x32, + 0x53, 0x90, 0x0e, 0x0a, 0x88, 0xac}, 0} +) + +// Pretend output amounts. +const coinbaseVal = 2500000000 +const fee = 5000000 + +var sigScriptTests = []tstSigScript{ + { + name: "one input old uncompressed", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: oldUncompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "two inputs old uncompressed", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: oldUncompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal + fee, + ScriptPublicKey: oldUncompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "one input old compressed", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: oldCompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "two inputs old compressed", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: oldCompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal + fee, + ScriptPublicKey: oldCompressedScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "one input 32byte pubkey", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "two inputs 32byte pubkey", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal + fee, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: false, + }, + { + name: "hashType SigHashNone", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashNone, + scriptAtWrongIndex: false, + }, + { + name: "hashType SigHashSingle", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashSingle, + scriptAtWrongIndex: false, + }, + { + name: "hashType SigHashAll | SigHashAnyoneCanPay", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll | consensushashing.SigHashAnyOneCanPay, + scriptAtWrongIndex: false, + }, + { + name: "hashType SigHashAnyoneCanPay", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: false, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAnyOneCanPay, + scriptAtWrongIndex: false, + }, + { + name: "hashType non-exist", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: false, + inputValidates: false, + indexOutOfRange: false, + }, + }, + hashType: 0b00000011, + scriptAtWrongIndex: false, + }, + { + name: "valid script at wrong index", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal + fee, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: true, + }, + { + name: "index out of range", + inputs: []tstInput{ + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + { + txout: &externalapi.DomainTransactionOutput{ + Value: coinbaseVal + fee, + ScriptPublicKey: p2pkScriptPubKey, + }, + sigscriptGenerates: true, + inputValidates: true, + indexOutOfRange: false, + }, + }, + hashType: consensushashing.SigHashAll, + scriptAtWrongIndex: true, + }, +} + +// Test the sigscript generation for valid and invalid inputs, all +// hashTypes, and with and without compression. This test creates +// sigscripts to spend fake coinbase inputs, as sigscripts cannot be +// created for the DomainTransactions in txTests, since they come from the blockDAG +// and we don't have the private keys. +func TestSignatureScript(t *testing.T) { + t.Parallel() + + privKey, _ := secp256k1.DeserializeSchnorrPrivateKey(&privKeyD) + +nexttest: + for i := range sigScriptTests { + outputs := []*externalapi.DomainTransactionOutput{ + {Value: 500, ScriptPublicKey: &externalapi.ScriptPublicKey{[]byte{OpReturn}, 0}}, + } + + inputs := []*externalapi.DomainTransactionInput{} + for j := range sigScriptTests[i].inputs { + txOut := sigScriptTests[i].inputs[j].txout + inputs = append(inputs, &externalapi.DomainTransactionInput{ + PreviousOutpoint: *coinbaseOutpoint, + UTXOEntry: utxo.NewUTXOEntry(txOut.Value, txOut.ScriptPublicKey, false, 10), + }) + } + tx := &externalapi.DomainTransaction{ + Version: 0, + Inputs: inputs, + Outputs: outputs, + } + + var script []byte + var err error + for j := range tx.Inputs { + var idx int + if sigScriptTests[i].inputs[j].indexOutOfRange { + t.Errorf("at test %v", sigScriptTests[i].name) + idx = len(sigScriptTests[i].inputs) + } else { + idx = j + } + script, err = SignatureScript(tx, idx, sigScriptTests[i].hashType, privKey, + &consensushashing.SighashReusedValues{}) + + if (err == nil) != sigScriptTests[i].inputs[j].sigscriptGenerates { + if err == nil { + t.Errorf("passed test '%v' incorrectly", + sigScriptTests[i].name) + } else { + t.Errorf("failed test '%v': %v", + sigScriptTests[i].name, err) + } + continue nexttest + } + if !sigScriptTests[i].inputs[j].sigscriptGenerates { + // done with this test + continue nexttest + } + + tx.Inputs[j].SignatureScript = script + } + + // If testing using a correct sigscript but for an incorrect + // index, use last input script for first input. Requires > 0 + // inputs for test. + if sigScriptTests[i].scriptAtWrongIndex { + tx.Inputs[0].SignatureScript = script + sigScriptTests[i].inputs[0].inputValidates = false + } + + // Validate tx input scripts + var scriptFlags ScriptFlags + for j := range tx.Inputs { + vm, err := NewEngine(sigScriptTests[i].inputs[j].txout.ScriptPublicKey, tx, j, scriptFlags, nil, nil, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Errorf("cannot create script vm for test %v: %v", + sigScriptTests[i].name, err) + continue nexttest + } + err = vm.Execute() + if (err == nil) != sigScriptTests[i].inputs[j].inputValidates { + if err == nil { + t.Errorf("passed test '%v' validation incorrectly: %v", + sigScriptTests[i].name, err) + } else { + t.Errorf("failed test '%v' validation: %v", + sigScriptTests[i].name, err) + } + continue nexttest + } + } + } +} diff --git a/domain/consensus/utils/txscript/stack.go b/domain/consensus/utils/txscript/stack.go new file mode 100644 index 0000000..761ace7 --- /dev/null +++ b/domain/consensus/utils/txscript/stack.go @@ -0,0 +1,360 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "encoding/hex" + "fmt" +) + +// asBool gets the boolean value of the byte array. +func asBool(t []byte) bool { + for i := range t { + if t[i] != 0 { + // Negative 0 is also considered false. + if i == len(t)-1 && t[i] == 0x80 { + return false + } + return true + } + } + return false +} + +// fromBool converts a boolean into the appropriate byte array. +func fromBool(v bool) []byte { + if v { + return []byte{1} + } + return nil +} + +// stack represents a stack of immutable objects to be used with spectre +// scripts. Objects may be shared, therefore in usage if a value is to be +// changed it *must* be deep-copied first to avoid changing other values on the +// stack. +type stack struct { + stk [][]byte +} + +// Depth returns the number of items on the stack. +func (s *stack) Depth() int32 { + return int32(len(s.stk)) +} + +// PushByteArray adds the given back array to the top of the stack. +// +// Stack transformation: [... x1 x2] -> [... x1 x2 data] +func (s *stack) PushByteArray(so []byte) { + s.stk = append(s.stk, so) +} + +// PushInt converts the provided scriptNum to a suitable byte array then pushes +// it onto the top of the stack. +// +// Stack transformation: [... x1 x2] -> [... x1 x2 int] +func (s *stack) PushInt(val scriptNum) { + s.PushByteArray(val.Bytes()) +} + +// PushBool converts the provided boolean to a suitable byte array then pushes +// it onto the top of the stack. +// +// Stack transformation: [... x1 x2] -> [... x1 x2 bool] +func (s *stack) PushBool(val bool) { + s.PushByteArray(fromBool(val)) +} + +// PopByteArray pops the value off the top of the stack and returns it. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2] +func (s *stack) PopByteArray() ([]byte, error) { + return s.nipN(0) +} + +// PopInt pops the value off the top of the stack, converts it into a script +// num, and returns it. The act of converting to a script num enforces the +// consensus rules imposed on data interpreted as numbers. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2] +func (s *stack) PopInt() (scriptNum, error) { + so, err := s.PopByteArray() + if err != nil { + return 0, err + } + + return makeScriptNum(so, defaultScriptNumLen) +} + +// PopBool pops the value off the top of the stack, converts it into a bool, and +// returns it. +// +// Stack transformation: [... x1 x2 x3] -> [... x1 x2] +func (s *stack) PopBool() (bool, error) { + so, err := s.PopByteArray() + if err != nil { + return false, err + } + + return asBool(so), nil +} + +// PeekByteArray returns the Nth item on the stack without removing it. +func (s *stack) PeekByteArray(idx int32) ([]byte, error) { + sz := int32(len(s.stk)) + if idx < 0 || idx >= sz { + str := fmt.Sprintf("index %d is invalid for stack size %d", idx, + sz) + return nil, scriptError(ErrInvalidStackOperation, str) + } + + return s.stk[sz-idx-1], nil +} + +// PeekInt returns the Nth item on the stack as a script num without removing +// it. The act of converting to a script num enforces the consensus rules +// imposed on data interpreted as numbers. +func (s *stack) PeekInt(idx int32) (scriptNum, error) { + so, err := s.PeekByteArray(idx) + if err != nil { + return 0, err + } + + return makeScriptNum(so, defaultScriptNumLen) +} + +// PeekBool returns the Nth item on the stack as a bool without removing it. +func (s *stack) PeekBool(idx int32) (bool, error) { + so, err := s.PeekByteArray(idx) + if err != nil { + return false, err + } + + return asBool(so), nil +} + +// nipN is an internal function that removes the nth item on the stack and +// returns it. +// +// Stack transformation: +// nipN(0): [... x1 x2 x3] -> [... x1 x2] +// nipN(1): [... x1 x2 x3] -> [... x1 x3] +// nipN(2): [... x1 x2 x3] -> [... x2 x3] +func (s *stack) nipN(idx int32) ([]byte, error) { + sz := int32(len(s.stk)) + if idx < 0 || idx > sz-1 { + str := fmt.Sprintf("index %d is invalid for stack size %d", idx, + sz) + return nil, scriptError(ErrInvalidStackOperation, str) + } + + so := s.stk[sz-idx-1] + if idx == 0 { + s.stk = s.stk[:sz-1] + } else if idx == sz-1 { + s1 := make([][]byte, sz-1) + copy(s1, s.stk[1:]) + s.stk = s1 + } else { + s1 := s.stk[sz-idx : sz] + s.stk = s.stk[:sz-idx-1] + s.stk = append(s.stk, s1...) + } + return so, nil +} + +// NipN removes the Nth object on the stack +// +// Stack transformation: +// NipN(0): [... x1 x2 x3] -> [... x1 x2] +// NipN(1): [... x1 x2 x3] -> [... x1 x3] +// NipN(2): [... x1 x2 x3] -> [... x2 x3] +func (s *stack) NipN(idx int32) error { + _, err := s.nipN(idx) + return err +} + +// Tuck copies the item at the top of the stack and inserts it before the 2nd +// to top item. +// +// Stack transformation: [... x1 x2] -> [... x2 x1 x2] +func (s *stack) Tuck() error { + so2, err := s.PopByteArray() + if err != nil { + return err + } + so1, err := s.PopByteArray() + if err != nil { + return err + } + s.PushByteArray(so2) // stack [... x2] + s.PushByteArray(so1) // stack [... x2 x1] + s.PushByteArray(so2) // stack [... x2 x1 x2] + + return nil +} + +// DropN removes the top N items from the stack. +// +// Stack transformation: +// DropN(1): [... x1 x2] -> [... x1] +// DropN(2): [... x1 x2] -> [...] +func (s *stack) DropN(n int32) error { + if n < 1 { + str := fmt.Sprintf("attempt to drop %d items from stack", n) + return scriptError(ErrInvalidStackOperation, str) + } + + for ; n > 0; n-- { + _, err := s.PopByteArray() + if err != nil { + return err + } + } + return nil +} + +// DupN duplicates the top N items on the stack. +// +// Stack transformation: +// DupN(1): [... x1 x2] -> [... x1 x2 x2] +// DupN(2): [... x1 x2] -> [... x1 x2 x1 x2] +func (s *stack) DupN(n int32) error { + if n < 1 { + str := fmt.Sprintf("attempt to dup %d stack items", n) + return scriptError(ErrInvalidStackOperation, str) + } + + // Iteratively duplicate the value n-1 down the stack n times. + // This leaves an in-order duplicate of the top n items on the stack. + for i := n; i > 0; i-- { + so, err := s.PeekByteArray(n - 1) + if err != nil { + return err + } + s.PushByteArray(so) + } + return nil +} + +// RotN rotates the top 3N items on the stack to the left N times. +// +// Stack transformation: +// RotN(1): [... x1 x2 x3] -> [... x2 x3 x1] +// RotN(2): [... x1 x2 x3 x4 x5 x6] -> [... x3 x4 x5 x6 x1 x2] +func (s *stack) RotN(n int32) error { + if n < 1 { + str := fmt.Sprintf("attempt to rotate %d stack items", n) + return scriptError(ErrInvalidStackOperation, str) + } + + // Nip the 3n-1th item from the stack to the top n times to rotate + // them up to the head of the stack. + entry := 3*n - 1 + for i := n; i > 0; i-- { + so, err := s.nipN(entry) + if err != nil { + return err + } + + s.PushByteArray(so) + } + return nil +} + +// SwapN swaps the top N items on the stack with those below them. +// +// Stack transformation: +// SwapN(1): [... x1 x2] -> [... x2 x1] +// SwapN(2): [... x1 x2 x3 x4] -> [... x3 x4 x1 x2] +func (s *stack) SwapN(n int32) error { + if n < 1 { + str := fmt.Sprintf("attempt to swap %d stack items", n) + return scriptError(ErrInvalidStackOperation, str) + } + + entry := 2*n - 1 + for i := n; i > 0; i-- { + // Swap 2n-1th entry to top. + so, err := s.nipN(entry) + if err != nil { + return err + } + + s.PushByteArray(so) + } + return nil +} + +// OverN copies N items N items back to the top of the stack. +// +// Stack transformation: +// OverN(1): [... x1 x2 x3] -> [... x1 x2 x3 x2] +// OverN(2): [... x1 x2 x3 x4] -> [... x1 x2 x3 x4 x1 x2] +func (s *stack) OverN(n int32) error { + if n < 1 { + str := fmt.Sprintf("attempt to perform over on %d stack items", + n) + return scriptError(ErrInvalidStackOperation, str) + } + + // Copy 2n-1th entry to top of the stack. + entry := 2*n - 1 + for ; n > 0; n-- { + so, err := s.PeekByteArray(entry) + if err != nil { + return err + } + s.PushByteArray(so) + } + + return nil +} + +// PickN copies the item N items back in the stack to the top. +// +// Stack transformation: +// PickN(0): [x1 x2 x3] -> [x1 x2 x3 x3] +// PickN(1): [x1 x2 x3] -> [x1 x2 x3 x2] +// PickN(2): [x1 x2 x3] -> [x1 x2 x3 x1] +func (s *stack) PickN(n int32) error { + so, err := s.PeekByteArray(n) + if err != nil { + return err + } + s.PushByteArray(so) + + return nil +} + +// RollN moves the item N items back in the stack to the top. +// +// Stack transformation: +// RollN(0): [x1 x2 x3] -> [x1 x2 x3] +// RollN(1): [x1 x2 x3] -> [x1 x3 x2] +// RollN(2): [x1 x2 x3] -> [x2 x3 x1] +func (s *stack) RollN(n int32) error { + so, err := s.nipN(n) + if err != nil { + return err + } + + s.PushByteArray(so) + + return nil +} + +// String returns the stack in a readable format. +func (s *stack) String() string { + var result string + for _, stack := range s.stk { + if len(stack) == 0 { + result += "00000000 \n" + } + result += hex.Dump(stack) + } + + return result +} diff --git a/domain/consensus/utils/txscript/stack_test.go b/domain/consensus/utils/txscript/stack_test.go new file mode 100644 index 0000000..70b1902 --- /dev/null +++ b/domain/consensus/utils/txscript/stack_test.go @@ -0,0 +1,864 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "fmt" + "github.com/pkg/errors" + "reflect" + "testing" +) + +// checkScriptError ensures the type of the two passed errors are of the +// same type (either both nil or both of type Error) and their error codes +// match when not nil. +func checkScriptError(gotErr, wantErr error) error { + // Ensure the error code is of the expected type and the error + // code matches the value specified in the test instance. + if reflect.TypeOf(gotErr) != reflect.TypeOf(wantErr) { + return errors.Errorf("wrong error - got %T (%[1]v), want %T", + gotErr, wantErr) + } + if gotErr == nil { + return nil + } + + // Ensure the want error type is a script error. + werr, ok := wantErr.(Error) + if !ok { + return errors.Errorf("unexpected test error type %T", wantErr) + } + + // Ensure the error codes match. It's safe to use a raw type assert + // here since the code above already proved they are the same type and + // the want error is a script error. + gotErrorCode := gotErr.(Error).ErrorCode + if gotErrorCode != werr.ErrorCode { + return errors.Errorf("mismatched error code - got %v (%v), want %v", + gotErrorCode, gotErr, werr.ErrorCode) + } + + return nil +} + +// TestStack tests that all of the stack operations work as expected. +func TestStack(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + before [][]byte + operation func(*stack) error + err error + after [][]byte + }{ + { + "noop", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + return nil + }, + nil, + [][]byte{{1}, {2}, {3}, {4}, {5}}, + }, + { + "peek underflow (byte)", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + _, err := s.PeekByteArray(5) + return err + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "peek underflow (int)", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + _, err := s.PeekInt(5) + return err + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "peek underflow (bool)", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + _, err := s.PeekBool(5) + return err + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "pop", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + val, err := s.PopByteArray() + if err != nil { + return err + } + if !bytes.Equal(val, []byte{5}) { + return errors.New("not equal") + } + return err + }, + nil, + [][]byte{{1}, {2}, {3}, {4}}, + }, + { + "pop everything", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + for i := 0; i < 5; i++ { + _, err := s.PopByteArray() + if err != nil { + return err + } + } + return nil + }, + nil, + nil, + }, + { + "pop underflow", + [][]byte{{1}, {2}, {3}, {4}, {5}}, + func(s *stack) error { + for i := 0; i < 6; i++ { + _, err := s.PopByteArray() + if err != nil { + return err + } + } + return nil + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "pop bool", + [][]byte{nil}, + func(s *stack) error { + val, err := s.PopBool() + if err != nil { + return err + } + + if val { + return errors.New("unexpected value") + } + return nil + }, + nil, + nil, + }, + { + "pop bool", + [][]byte{{1}}, + func(s *stack) error { + val, err := s.PopBool() + if err != nil { + return err + } + + if !val { + return errors.New("unexpected value") + } + return nil + }, + nil, + nil, + }, + { + "pop bool", + nil, + func(s *stack) error { + _, err := s.PopBool() + return err + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "popInt 0", + [][]byte{{}}, + func(s *stack) error { + v, err := s.PopInt() + if err != nil { + return err + } + if v != 0 { + return errors.New("0 != 0 on popInt") + } + return nil + }, + nil, + nil, + }, + { + "popInt 1", + [][]byte{{0x01}}, + func(s *stack) error { + v, err := s.PopInt() + if err != nil { + return err + } + if v != 1 { + return errors.New("1 != 1 on popInt") + } + return nil + }, + nil, + nil, + }, + { + "popInt -1", + [][]byte{{0x81}}, + func(s *stack) error { + v, err := s.PopInt() + if err != nil { + return err + } + if v != -1 { + return errors.New("-1 != -1 on popInt") + } + return nil + }, + nil, + nil, + }, + // Triggers the multibyte case in asInt + { + "popInt -513", + [][]byte{{0x1, 0x82}}, + func(s *stack) error { + v, err := s.PopInt() + if err != nil { + return err + } + if v != -513 { + fmt.Printf("%v != %v\n", v, -513) + return errors.New("1 != 1 on popInt") + } + return nil + }, + nil, + nil, + }, + { + "PushInt 0", + nil, + func(s *stack) error { + s.PushInt(scriptNum(0)) + return nil + }, + nil, + [][]byte{{}}, + }, + { + "PushInt 1", + nil, + func(s *stack) error { + s.PushInt(scriptNum(1)) + return nil + }, + nil, + [][]byte{{0x1}}, + }, + { + "PushInt -1", + nil, + func(s *stack) error { + s.PushInt(scriptNum(-1)) + return nil + }, + nil, + [][]byte{{0x81}}, + }, + { + "PushInt two bytes", + nil, + func(s *stack) error { + s.PushInt(scriptNum(256)) + return nil + }, + nil, + // little endian.. *sigh* + [][]byte{{0x00, 0x01}}, + }, + { + "PushInt leading zeros", + nil, + func(s *stack) error { + // this will have the highbit set + s.PushInt(scriptNum(128)) + return nil + }, + nil, + [][]byte{{0x80, 0x00}}, + }, + { + "dup", + [][]byte{{1}}, + func(s *stack) error { + return s.DupN(1) + }, + nil, + [][]byte{{1}, {1}}, + }, + { + "dup2", + [][]byte{{1}, {2}}, + func(s *stack) error { + return s.DupN(2) + }, + nil, + [][]byte{{1}, {2}, {1}, {2}}, + }, + { + "dup3", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.DupN(3) + }, + nil, + [][]byte{{1}, {2}, {3}, {1}, {2}, {3}}, + }, + { + "dup0", + [][]byte{{1}}, + func(s *stack) error { + return s.DupN(0) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "dup-1", + [][]byte{{1}}, + func(s *stack) error { + return s.DupN(-1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "dup too much", + [][]byte{{1}}, + func(s *stack) error { + return s.DupN(2) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "PushBool true", + nil, + func(s *stack) error { + s.PushBool(true) + + return nil + }, + nil, + [][]byte{{1}}, + }, + { + "PushBool false", + nil, + func(s *stack) error { + s.PushBool(false) + + return nil + }, + nil, + [][]byte{nil}, + }, + { + "PushBool PopBool", + nil, + func(s *stack) error { + s.PushBool(true) + val, err := s.PopBool() + if err != nil { + return err + } + if !val { + return errors.New("unexpected value") + } + + return nil + }, + nil, + nil, + }, + { + "PushBool PopBool 2", + nil, + func(s *stack) error { + s.PushBool(false) + val, err := s.PopBool() + if err != nil { + return err + } + if val { + return errors.New("unexpected value") + } + + return nil + }, + nil, + nil, + }, + { + "PushInt PopBool", + nil, + func(s *stack) error { + s.PushInt(scriptNum(1)) + val, err := s.PopBool() + if err != nil { + return err + } + if !val { + return errors.New("unexpected value") + } + + return nil + }, + nil, + nil, + }, + { + "PushInt PopBool 2", + nil, + func(s *stack) error { + s.PushInt(scriptNum(0)) + val, err := s.PopBool() + if err != nil { + return err + } + if val { + return errors.New("unexpected value") + } + + return nil + }, + nil, + nil, + }, + { + "Nip top", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.NipN(0) + }, + nil, + [][]byte{{1}, {2}}, + }, + { + "Nip middle", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.NipN(1) + }, + nil, + [][]byte{{1}, {3}}, + }, + { + "Nip low", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.NipN(2) + }, + nil, + [][]byte{{2}, {3}}, + }, + { + "Nip too much", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + // bite off more than we can chew + return s.NipN(3) + }, + scriptError(ErrInvalidStackOperation, ""), + [][]byte{{2}, {3}}, + }, + { + "keep on tucking", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.Tuck() + }, + nil, + [][]byte{{1}, {3}, {2}, {3}}, + }, + { + "a little tucked up", + [][]byte{{1}}, // too few arguments for tuck + func(s *stack) error { + return s.Tuck() + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "all tucked up", + nil, // too few arguments for tuck + func(s *stack) error { + return s.Tuck() + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "drop 1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(1) + }, + nil, + [][]byte{{1}, {2}, {3}}, + }, + { + "drop 2", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(2) + }, + nil, + [][]byte{{1}, {2}}, + }, + { + "drop 3", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(3) + }, + nil, + [][]byte{{1}}, + }, + { + "drop 4", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(4) + }, + nil, + nil, + }, + { + "drop 4/5", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(5) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "drop invalid", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.DropN(0) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Rot1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.RotN(1) + }, + nil, + [][]byte{{1}, {3}, {4}, {2}}, + }, + { + "Rot2", + [][]byte{{1}, {2}, {3}, {4}, {5}, {6}}, + func(s *stack) error { + return s.RotN(2) + }, + nil, + [][]byte{{3}, {4}, {5}, {6}, {1}, {2}}, + }, + { + "Rot too little", + [][]byte{{1}, {2}}, + func(s *stack) error { + return s.RotN(1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Rot0", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.RotN(0) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Swap1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.SwapN(1) + }, + nil, + [][]byte{{1}, {2}, {4}, {3}}, + }, + { + "Swap2", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.SwapN(2) + }, + nil, + [][]byte{{3}, {4}, {1}, {2}}, + }, + { + "Swap too little", + [][]byte{{1}}, + func(s *stack) error { + return s.SwapN(1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Swap0", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.SwapN(0) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Over1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.OverN(1) + }, + nil, + [][]byte{{1}, {2}, {3}, {4}, {3}}, + }, + { + "Over2", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.OverN(2) + }, + nil, + [][]byte{{1}, {2}, {3}, {4}, {1}, {2}}, + }, + { + "Over too little", + [][]byte{{1}}, + func(s *stack) error { + return s.OverN(1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Over0", + [][]byte{{1}, {2}, {3}}, + func(s *stack) error { + return s.OverN(0) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Pick1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.PickN(1) + }, + nil, + [][]byte{{1}, {2}, {3}, {4}, {3}}, + }, + { + "Pick2", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.PickN(2) + }, + nil, + [][]byte{{1}, {2}, {3}, {4}, {2}}, + }, + { + "Pick too little", + [][]byte{{1}}, + func(s *stack) error { + return s.PickN(1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Roll1", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.RollN(1) + }, + nil, + [][]byte{{1}, {2}, {4}, {3}}, + }, + { + "Roll2", + [][]byte{{1}, {2}, {3}, {4}}, + func(s *stack) error { + return s.RollN(2) + }, + nil, + [][]byte{{1}, {3}, {4}, {2}}, + }, + { + "Roll too little", + [][]byte{{1}}, + func(s *stack) error { + return s.RollN(1) + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + { + "Peek bool", + [][]byte{{1}}, + func(s *stack) error { + // Peek bool is otherwise pretty well tested, + // just check it works. + val, err := s.PeekBool(0) + if err != nil { + return err + } + if !val { + return errors.New("invalid result") + } + return nil + }, + nil, + [][]byte{{1}}, + }, + { + "Peek bool 2", + [][]byte{nil}, + func(s *stack) error { + // Peek bool is otherwise pretty well tested, + // just check it works. + val, err := s.PeekBool(0) + if err != nil { + return err + } + if val { + return errors.New("invalid result") + } + return nil + }, + nil, + [][]byte{nil}, + }, + { + "Peek int", + [][]byte{{1}}, + func(s *stack) error { + // Peek int is otherwise pretty well tested, + // just check it works. + val, err := s.PeekInt(0) + if err != nil { + return err + } + if val != 1 { + return errors.New("invalid result") + } + return nil + }, + nil, + [][]byte{{1}}, + }, + { + "pop int", + nil, + func(s *stack) error { + s.PushInt(scriptNum(1)) + // Peek int is otherwise pretty well tested, + // just check it works. + val, err := s.PopInt() + if err != nil { + return err + } + if val != 1 { + return errors.New("invalid result") + } + return nil + }, + nil, + nil, + }, + { + "pop empty", + nil, + func(s *stack) error { + // Peek int is otherwise pretty well tested, + // just check it works. + _, err := s.PopInt() + return err + }, + scriptError(ErrInvalidStackOperation, ""), + nil, + }, + } + + for _, test := range tests { + // Setup the initial stack state and perform the test operation. + s := stack{} + for i := range test.before { + s.PushByteArray(test.before[i]) + } + err := test.operation(&s) + + // Ensure the error code is of the expected type and the error + // code matches the value specified in the test instance. + if e := checkScriptError(err, test.err); e != nil { + t.Errorf("%s: %v", test.name, e) + continue + } + if err != nil { + continue + } + + // Ensure the resulting stack is the expected length. + if int32(len(test.after)) != s.Depth() { + t.Errorf("%s: stack depth doesn't match expected: %v "+ + "vs %v", test.name, len(test.after), + s.Depth()) + continue + } + + // Ensure all items of the resulting stack are the expected + // values. + for i := range test.after { + val, err := s.PeekByteArray(s.Depth() - int32(i) - 1) + if err != nil { + t.Errorf("%s: can't peek %dth stack entry: %v", + test.name, i, err) + break + } + + if !bytes.Equal(val, test.after[i]) { + t.Errorf("%s: %dth stack entry doesn't match "+ + "expected: %v vs %v", test.name, i, val, + test.after[i]) + break + } + } + } +} diff --git a/domain/consensus/utils/txscript/standard.go b/domain/consensus/utils/txscript/standard.go new file mode 100644 index 0000000..c1d18c2 --- /dev/null +++ b/domain/consensus/utils/txscript/standard.go @@ -0,0 +1,443 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +// ScriptClass is an enumeration for the list of standard types of script. +type ScriptClass byte + +// Classes of script payment known about in the blockDAG. +const ( + NonStandardTy ScriptClass = iota // None of the recognized forms. + PubKeyTy // Pay to pubkey. + PubKeyECDSATy // Pay to pubkey ECDSA. + ScriptHashTy // Pay to script hash. +) + +// Script public key versions for address types. +const ( + addressPublicKeyScriptPublicKeyVersion = 0 + addressPublicKeyECDSAScriptPublicKeyVersion = 0 + addressScriptHashScriptPublicKeyVersion = 0 +) + +// scriptClassToName houses the human-readable strings which describe each +// script class. +var scriptClassToName = []string{ + NonStandardTy: "nonstandard", + PubKeyTy: "pubkey", + PubKeyECDSATy: "pubkeyecdsa", + ScriptHashTy: "scripthash", +} + +// String implements the Stringer interface by returning the name of +// the enum script class. If the enum is invalid then "Invalid" will be +// returned. +func (t ScriptClass) String() string { + if int(t) > len(scriptClassToName) || int(t) < 0 { + return "Invalid" + } + return scriptClassToName[t] +} + +// isPayToPubkey returns true if the script passed is a pay-to-pubkey +// transaction, false otherwise. +func isPayToPubkey(pops []parsedOpcode) bool { + return len(pops) == 2 && + pops[0].opcode.value == OpData32 && + pops[1].opcode.value == OpCheckSig +} + +// isPayToPubkeyECDSA returns true if the script passed is an ECDSA pay-to-pubkey +// transaction, false otherwise. +func isPayToPubkeyECDSA(pops []parsedOpcode) bool { + return len(pops) == 2 && + pops[0].opcode.value == OpData33 && + pops[1].opcode.value == OpCheckSigECDSA + +} + +// scriptType returns the type of the script being inspected from the known +// standard types. +func typeOfScript(pops []parsedOpcode) ScriptClass { + switch { + case isPayToPubkey(pops): + return PubKeyTy + case isPayToPubkeyECDSA(pops): + return PubKeyECDSATy + case isScriptHash(pops): + return ScriptHashTy + } + return NonStandardTy +} + +// GetScriptClass returns the class of the script passed. +// +// NonStandardTy will be returned when the script does not parse. +func GetScriptClass(script []byte) ScriptClass { + pops, err := parseScript(script) + if err != nil { + return NonStandardTy + } + return typeOfScript(pops) +} + +// expectedInputs returns the number of arguments required by a script. +// If the script is of unknown type such that the number can not be determined +// then -1 is returned. We are an internal function and thus assume that class +// is the real class of pops (and we can thus assume things that were determined +// while finding out the type). +func expectedInputs(pops []parsedOpcode, class ScriptClass) int { + switch class { + + case PubKeyTy: + return 1 + + case ScriptHashTy: + // Not including script. That is handled by the caller. + return 1 + + default: + return -1 + } +} + +// ScriptInfo houses information about a script pair that is determined by +// CalcScriptInfo. +type ScriptInfo struct { + // ScriptPubKeyClass is the class of the public key script and is equivalent + // to calling GetScriptClass on it. + ScriptPubKeyClass ScriptClass + + // NumInputs is the number of inputs provided by the public key script. + NumInputs int + + // ExpectedInputs is the number of outputs required by the signature + // script and any pay-to-script-hash scripts. The number will be -1 if + // unknown. + ExpectedInputs int + + // SigOps is the number of signature operations in the script pair. + SigOps int +} + +// CalcScriptInfo returns a structure providing data about the provided script +// pair. It will error if the pair is in someway invalid such that they can not +// be analysed, i.e. if they do not parse or the scriptPubKey is not a push-only +// script +func CalcScriptInfo(sigScript, scriptPubKey []byte, isP2SH bool) (*ScriptInfo, error) { + sigPops, err := parseScript(sigScript) + if err != nil { + return nil, err + } + + scriptPubKeyPops, err := parseScript(scriptPubKey) + if err != nil { + return nil, err + } + + // Push only sigScript makes little sense. + si := new(ScriptInfo) + si.ScriptPubKeyClass = typeOfScript(scriptPubKeyPops) + + // Can't have a signature script that doesn't just push data. + if !isPushOnly(sigPops) { + return nil, scriptError(ErrNotPushOnly, + "signature script is not push only") + } + + si.ExpectedInputs = expectedInputs(scriptPubKeyPops, si.ScriptPubKeyClass) + + // All entries pushed to stack (or are OP_RESERVED and exec will fail). + si.NumInputs = len(sigPops) + + if si.ScriptPubKeyClass == ScriptHashTy && isP2SH { + // The pay-to-hash-script is the final data push of the + // signature script. + script := sigPops[len(sigPops)-1].data + shPops, err := parseScript(script) + if err != nil { + return nil, err + } + + shInputs := expectedInputs(shPops, typeOfScript(shPops)) + if shInputs == -1 { + si.ExpectedInputs = -1 + } else { + si.ExpectedInputs += shInputs + } + si.SigOps = getSigOpCount(shPops, true) + } else { + si.SigOps = getSigOpCount(scriptPubKeyPops, true) + } + + return si, nil +} + +// payToPubKeyScript creates a new script to pay a transaction +// output to a 32-byte pubkey. +func payToPubKeyScript(pubKey []byte) ([]byte, error) { + return NewScriptBuilder(). + AddData(pubKey). + AddOp(OpCheckSig). + Script() +} + +// payToPubKeyScript creates a new script to pay a transaction +// output to a 33-byte pubkey. +func payToPubKeyScriptECDSA(pubKey []byte) ([]byte, error) { + return NewScriptBuilder(). + AddData(pubKey). + AddOp(OpCheckSigECDSA). + Script() +} + +// payToScriptHashScript creates a new script to pay a transaction output to a +// script hash. It is expected that the input is a valid hash. +func payToScriptHashScript(scriptHash []byte) ([]byte, error) { + return NewScriptBuilder().AddOp(OpBlake2b).AddData(scriptHash). + AddOp(OpEqual).Script() +} + +// PayToAddrScript creates a new script to pay a transaction output to a the +// specified address. +func PayToAddrScript(addr util.Address) (*externalapi.ScriptPublicKey, error) { + const nilAddrErrStr = "unable to generate payment script for nil address" + switch addr := addr.(type) { + case *util.AddressPublicKey: + if addr == nil { + return nil, scriptError(ErrUnsupportedAddress, + nilAddrErrStr) + } + script, err := payToPubKeyScript(addr.ScriptAddress()) + if err != nil { + return nil, err + } + + return &externalapi.ScriptPublicKey{script, addressPublicKeyScriptPublicKeyVersion}, err + + case *util.AddressPublicKeyECDSA: + if addr == nil { + return nil, scriptError(ErrUnsupportedAddress, + nilAddrErrStr) + } + script, err := payToPubKeyScriptECDSA(addr.ScriptAddress()) + if err != nil { + return nil, err + } + + return &externalapi.ScriptPublicKey{script, addressPublicKeyECDSAScriptPublicKeyVersion}, err + + case *util.AddressScriptHash: + if addr == nil { + return nil, scriptError(ErrUnsupportedAddress, + nilAddrErrStr) + } + script, err := payToScriptHashScript(addr.ScriptAddress()) + if err != nil { + return nil, err + } + + return &externalapi.ScriptPublicKey{script, addressScriptHashScriptPublicKeyVersion}, err + } + + str := fmt.Sprintf("unable to generate payment script for unsupported "+ + "address type %T", addr) + return nil, scriptError(ErrUnsupportedAddress, str) +} + +// PayToScriptHashScript takes a script and returns an equivalent pay-to-script-hash script +func PayToScriptHashScript(redeemScript []byte) ([]byte, error) { + redeemScriptHash := util.HashBlake2b(redeemScript) + script, err := NewScriptBuilder(). + AddOp(OpBlake2b).AddData(redeemScriptHash). + AddOp(OpEqual).Script() + if err != nil { + return nil, err + } + return script, nil +} + +// PayToScriptHashSignatureScript generates a signature script that fits a pay-to-script-hash script +func PayToScriptHashSignatureScript(redeemScript []byte, signature []byte) ([]byte, error) { + redeemScriptAsData, err := NewScriptBuilder().AddData(redeemScript).Script() + if err != nil { + return nil, err + } + signatureScript := make([]byte, len(signature)+len(redeemScriptAsData)) + copy(signatureScript, signature) + copy(signatureScript[len(signature):], redeemScriptAsData) + return signatureScript, nil +} + +// PushedData returns an array of byte slices containing any pushed data found +// in the passed script. This includes OP_0, but not OP_1 - OP_16. +func PushedData(script []byte) ([][]byte, error) { + pops, err := parseScript(script) + if err != nil { + return nil, err + } + + var data [][]byte + for _, pop := range pops { + if pop.data != nil { + data = append(data, pop.data) + } else if pop.opcode.value == Op0 { + data = append(data, nil) + } + } + return data, nil +} + +// ExtractScriptPubKeyAddress returns the type of script and its addresses. +// Note that it only works for 'standard' transaction script types. Any data such +// as public keys which are invalid will return a nil address. +func ExtractScriptPubKeyAddress(scriptPubKey *externalapi.ScriptPublicKey, dagParams *dagconfig.Params) (ScriptClass, util.Address, error) { + if scriptPubKey.Version > constants.MaxScriptPublicKeyVersion { + return NonStandardTy, nil, nil + } + // No valid address if the script doesn't parse. + pops, err := parseScript(scriptPubKey.Script) + if err != nil { + return NonStandardTy, nil, err + } + + scriptClass := typeOfScript(pops) + switch scriptClass { + case PubKeyTy: + // A pay-to-pubkey script is of the form: + // OP_CHECKSIG + // Therefore the pubkey is the first item on the stack. + // If the pubkey is invalid for some reason, return a nil address. + addr, err := util.NewAddressPublicKey(pops[0].data, + dagParams.Prefix) + if err != nil { + return scriptClass, nil, nil + } + return scriptClass, addr, nil + + case PubKeyECDSATy: + // A pay-to-pubkey script is of the form: + // OP_CHECKSIGECDSA + // Therefore the pubkey is the first item on the stack. + // If the pubkey is invalid for some reason, return a nil address. + addr, err := util.NewAddressPublicKeyECDSA(pops[0].data, + dagParams.Prefix) + if err != nil { + return scriptClass, nil, nil + } + return scriptClass, addr, nil + + case ScriptHashTy: + // A pay-to-script-hash script is of the form: + // OP_BLAKE2B OP_EQUAL + // Therefore the script hash is the 2nd item on the stack. + // If the script hash ss invalid for some reason, return a nil address. + addr, err := util.NewAddressScriptHashFromHash(pops[1].data, + dagParams.Prefix) + if err != nil { + return scriptClass, nil, nil + } + return scriptClass, addr, nil + + case NonStandardTy: + // Don't attempt to extract addresses or required signatures for + // nonstandard transactions. + return NonStandardTy, nil, nil + } + + return NonStandardTy, nil, errors.Errorf("Cannot handle script class %s", scriptClass) +} + +// AtomicSwapDataPushes houses the data pushes found in atomic swap contracts. +type AtomicSwapDataPushes struct { + RecipientBlake2b [32]byte + RefundBlake2b [32]byte + SecretHash [32]byte + SecretSize int64 + LockTime uint64 +} + +// ExtractAtomicSwapDataPushes returns the data pushes from an atomic swap +// contract. If the script is not an atomic swap contract, +// ExtractAtomicSwapDataPushes returns (nil, nil). Non-nil errors are returned +// for unparsable scripts. +// +// NOTE: Atomic swaps are not considered standard script types by the dcrd +// mempool policy and should be used with P2SH. The atomic swap format is also +// expected to change to use a more secure hash function in the future. +// +// This function is only defined in the txscript package due to API limitations +// which prevent callers using txscript to parse nonstandard scripts. +func ExtractAtomicSwapDataPushes(version uint16, scriptPubKey []byte) (*AtomicSwapDataPushes, error) { + pops, err := parseScript(scriptPubKey) + if err != nil { + return nil, err + } + + if len(pops) != 19 { + return nil, nil + } + isAtomicSwap := pops[0].opcode.value == OpIf && + pops[1].opcode.value == OpSize && + canonicalPush(pops[2]) && + pops[3].opcode.value == OpEqualVerify && + pops[4].opcode.value == OpSHA256 && + pops[5].opcode.value == OpData32 && + pops[6].opcode.value == OpEqualVerify && + pops[7].opcode.value == OpDup && + pops[8].opcode.value == OpBlake2b && + pops[9].opcode.value == OpData32 && + pops[10].opcode.value == OpElse && + canonicalPush(pops[11]) && + pops[12].opcode.value == OpCheckLockTimeVerify && + pops[13].opcode.value == OpDup && + pops[14].opcode.value == OpBlake2b && + pops[15].opcode.value == OpData32 && + pops[16].opcode.value == OpEndIf && + pops[17].opcode.value == OpEqualVerify && + pops[18].opcode.value == OpCheckSig + if !isAtomicSwap { + return nil, nil + } + + pushes := new(AtomicSwapDataPushes) + copy(pushes.SecretHash[:], pops[5].data) + copy(pushes.RecipientBlake2b[:], pops[9].data) + copy(pushes.RefundBlake2b[:], pops[15].data) + if pops[2].data != nil { + locktime, err := makeScriptNum(pops[2].data, 8) + if err != nil { + return nil, nil + } + pushes.SecretSize = int64(locktime) + } else if op := pops[2].opcode; isSmallInt(op) { + pushes.SecretSize = int64(asSmallInt(op)) + } else { + return nil, nil + } + if pops[11].data != nil { + locktime, err := makeScriptNum(pops[11].data, 8) + if err != nil { + return nil, nil + } + pushes.LockTime = uint64(locktime) + } else if op := pops[11].opcode; isSmallInt(op) { + pushes.LockTime = uint64(asSmallInt(op)) + } else { + return nil, nil + } + return pushes, nil +} diff --git a/domain/consensus/utils/txscript/standard_test.go b/domain/consensus/utils/txscript/standard_test.go new file mode 100644 index 0000000..f756a1d --- /dev/null +++ b/domain/consensus/utils/txscript/standard_test.go @@ -0,0 +1,569 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "bytes" + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util" +) + +// mustParseShortForm parses the passed short form script and returns the +// resulting bytes. It panics if an error occurs. This is only used in the +// tests as a helper since the only way it can fail is if there is an error in +// the test source code. +func mustParseShortForm(script string, version uint16) []byte { + s, err := parseShortForm(script, version) + if err != nil { + panic("invalid short form script in test source: err " + + err.Error() + ", script: " + script) + } + + return s +} + +// newAddressPublicKey returns a new util.AddressPublicKey from the +// provided public key. It panics if an error occurs. This is only used in the tests +// as a helper since the only way it can fail is if there is an error in the +// test source code. +func newAddressPublicKey(publicKey []byte) util.Address { + addr, err := util.NewAddressPublicKey(publicKey, util.Bech32PrefixSpectre) + if err != nil { + panic("invalid public key in test source") + } + + return addr +} + +// newAddressPublicKeyECDSA returns a new util.AddressPublicKeyECDSA from the +// provided public key. It panics if an error occurs. This is only used in the tests +// as a helper since the only way it can fail is if there is an error in the +// test source code. +func newAddressPublicKeyECDSA(publicKey []byte) util.Address { + addr, err := util.NewAddressPublicKeyECDSA(publicKey, util.Bech32PrefixSpectre) + if err != nil { + panic("invalid public key in test source") + } + + return addr +} + +// newAddressScriptHash returns a new util.AddressScriptHash from the +// provided hash. It panics if an error occurs. This is only used in the tests +// as a helper since the only way it can fail is if there is an error in the +// test source code. +func newAddressScriptHash(scriptHash []byte) util.Address { + addr, err := util.NewAddressScriptHashFromHash(scriptHash, + util.Bech32PrefixSpectre) + if err != nil { + panic("invalid script hash in test source") + } + + return addr +} + +// TestExtractScriptPubKeyAddrs ensures that extracting the type, addresses, and +// number of required signatures from scriptPubKeys works as intended. +func TestExtractScriptPubKeyAddrs(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + script *externalapi.ScriptPublicKey + addr util.Address + class ScriptClass + }{ + { + name: "standard p2pk", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("202454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722daeac"), + Version: 0, + }, + addr: newAddressPublicKey(hexToBytes("2454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722dae")), + class: PubKeyTy, + }, + { + name: "standard p2pk ECDSA", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("212454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722daeaaab"), + Version: 0, + }, + addr: newAddressPublicKeyECDSA(hexToBytes("2454a285d8566b0cb2792919536ee0f1b6f69b58ba59e9850ecbc91eef722daeaa")), + class: PubKeyECDSATy, + }, + { + name: "standard p2sh", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("aa2063bcc565f9e68ee0189dd5cc67f1b" + + "0e5f02f45cbad06dd6ddee55cbca9a9e37187"), + Version: 0, + }, + addr: newAddressScriptHash(hexToBytes("63bcc565f9e6" + + "8ee0189dd5cc67f1b0e5f02f45cbad06dd6ddee55cbca9a9e371")), + class: ScriptHashTy, + }, + + // The below are nonstandard script due to things such as + // invalid pubkeys, failure to parse, and not being of a + // standard form. + + { + name: "p2pk with uncompressed pk missing OP_CHECKSIG", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("410411db93e1dcdb8a016b49840f8c53b" + + "c1eb68a382e97b1482ecad7b148a6909a5cb2e0eaddf" + + "b84ccf9744464f82e160bfa9b8b64f9d4c03f999b864" + + "3f656b412a3"), + Version: 0, + }, + addr: nil, + class: NonStandardTy, + }, + { + name: "valid signature from a sigscript - no addresses", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("47304402204e45e16932b8af514961a1d" + + "3a1a25fdf3f4f7732e9d624c6c61548ab5fb8cd41022" + + "0181522ec8eca07de4860a4acdd12909d831cc56cbba" + + "c4622082221a8768d1d0901"), + Version: 0, + }, + addr: nil, + class: NonStandardTy, + }, + // Note the technically the pubkey is the second item on the + // stack, but since the address extraction intentionally only + // works with standard scriptPubKeys, this should not return any + // addresses. + { + name: "valid sigscript to reedeem p2pk - no addresses", + script: &externalapi.ScriptPublicKey{ + Script: hexToBytes("493046022100ddc69738bf2336318e4e0" + + "41a5a77f305da87428ab1606f023260017854350ddc0" + + "22100817af09d2eec36862d16009852b7e3a0f6dd765" + + "98290b7834e1453660367e07a014104cd4240c198e12" + + "523b6f9cb9f5bed06de1ba37e96a1bbd13745fcf9d11" + + "c25b1dff9a519675d198804ba9962d3eca2d5937d58e" + + "5a75a71042d40388a4d307f887d"), + Version: 0, + }, + addr: nil, + class: NonStandardTy, + }, + { + name: "empty script", + script: &externalapi.ScriptPublicKey{ + Script: []byte{}, + Version: 0, + }, + addr: nil, + class: NonStandardTy, + }, + { + name: "script that does not parse", + script: &externalapi.ScriptPublicKey{ + Script: []byte{OpData45}, + Version: 0, + }, + addr: nil, + class: NonStandardTy, + }, + } + + t.Logf("Running %d tests.", len(tests)) + for i, test := range tests { + class, addr, _ := ExtractScriptPubKeyAddress( + test.script, &dagconfig.MainnetParams) + + if !reflect.DeepEqual(addr, test.addr) { + t.Errorf("ExtractScriptPubKeyAddress #%d (%s) unexpected "+ + "address\ngot %v\nwant %v", i, test.name, + addr, test.addr) + continue + } + + if class != test.class { + t.Errorf("ExtractScriptPubKeyAddress #%d (%s) unexpected "+ + "script type - got %s, want %s", i, test.name, + class, test.class) + continue + } + } +} + +// TestCalcScriptInfo ensures the CalcScriptInfo provides the expected results +// for various valid and invalid script pairs. +func TestCalcScriptInfo(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + sigScript string + scriptPubKey string + + isP2SH bool + + scriptInfo ScriptInfo + scriptInfoErr error + }{ + { + // Invented scripts, the hashes do not match + // Truncated version of test below: + name: "scriptPubKey doesn't parse", + sigScript: "1 81 DATA_8 2DUP EQUAL NOT VERIFY ABS " + + "SWAP ABS EQUAL", + scriptPubKey: "BLAKE2B DATA_32 0xfe441065b6532231de2fac56" + + "3152205ec4f59cfe441065b6532231de2fac56", + isP2SH: true, + scriptInfoErr: scriptError(ErrMalformedPush, ""), + }, + { + name: "sigScript doesn't parse", + // Truncated version of p2sh script below. + sigScript: "1 81 DATA_8 2DUP EQUAL NOT VERIFY ABS " + + "SWAP ABS", + scriptPubKey: "BLAKE2B DATA_32 0xfe441065b6532231de2fac56" + + "3152205ec4f59c74fe441065b6532231de2fac56 EQUAL", + isP2SH: true, + scriptInfoErr: scriptError(ErrMalformedPush, ""), + }, + { + // Invented scripts, the hashes do not match + name: "p2sh standard script", + sigScript: "1 81 DATA_34 DATA_32 0x010203" + + "0405060708090a0b0c0d0e0f1011121314fe441065b6532231de2fac56 " + + "CHECKSIG", + scriptPubKey: "BLAKE2B DATA_32 0xfe441065b6532231de2fac56" + + "3152205ec4f59c74fe441065b6532231de2fac56 EQUAL", + isP2SH: true, + scriptInfo: ScriptInfo{ + ScriptPubKeyClass: ScriptHashTy, + NumInputs: 3, + ExpectedInputs: 2, // nonstandard p2sh. + SigOps: 1, + }, + }, + { + name: "p2sh nonstandard script", + sigScript: "1 81 DATA_8 2DUP EQUAL NOT VERIFY ABS " + + "SWAP ABS EQUAL", + scriptPubKey: "BLAKE2B DATA_32 0xfe441065b6532231de2fac56" + + "3152205ec4f59c74fe441065b6532231de2fac56 EQUAL", + isP2SH: true, + scriptInfo: ScriptInfo{ + ScriptPubKeyClass: ScriptHashTy, + NumInputs: 3, + ExpectedInputs: -1, // nonstandard p2sh. + SigOps: 0, + }, + }, + } + + for _, test := range tests { + sigScript := mustParseShortForm(test.sigScript, 0) + scriptPubKey := mustParseShortForm(test.scriptPubKey, 0) + + si, err := CalcScriptInfo(sigScript, scriptPubKey, test.isP2SH) + if e := checkScriptError(err, test.scriptInfoErr); e != nil { + t.Errorf("scriptinfo test %q: %v", test.name, e) + continue + } + if err != nil { + continue + } + + if *si != test.scriptInfo { + t.Errorf("%s: scriptinfo doesn't match expected. "+ + "got: %q expected %q", test.name, *si, + test.scriptInfo) + continue + } + } +} + +// bogusAddress implements the util.Address interface so the tests can ensure +// unsupported address types are handled properly. +type bogusAddress struct{} + +// EncodeAddress simply returns an empty string. It exists to satisfy the +// util.Address interface. +func (b *bogusAddress) EncodeAddress() string { + return "" +} + +// ScriptAddress simply returns an empty byte slice. It exists to satisfy the +// util.Address interface. +func (b *bogusAddress) ScriptAddress() []byte { + return nil +} + +// IsForPrefix lies blatantly to satisfy the util.Address interface. +func (b *bogusAddress) IsForPrefix(prefix util.Bech32Prefix) bool { + return true // why not? +} + +// String simply returns an empty string. It exists to satisfy the +// util.Address interface. +func (b *bogusAddress) String() string { + return "" +} + +func (b *bogusAddress) Prefix() util.Bech32Prefix { + return util.Bech32PrefixUnknown +} + +// TestPayToAddrScript ensures the PayToAddrScript function generates the +// correct scripts for the various types of addresses. +func TestPayToAddrScript(t *testing.T) { + t.Parallel() + + p2pkMain, err := util.NewAddressPublicKey(hexToBytes("e34cce70c86"+ + "373273efcc54ce7d2a491bb4a0e84e34cce70c86373273efcc54c"), util.Bech32PrefixSpectre) + if err != nil { + t.Fatalf("Unable to create public key address: %v", err) + } + + p2shMain, err := util.NewAddressScriptHashFromHash(hexToBytes("e8c300"+ + "c87986efa84c37c0519929019ef86eb5b4e34cce70c86373273efcc54c"), util.Bech32PrefixSpectre) + if err != nil { + t.Fatalf("Unable to create script hash address: %v", err) + } + + // Errors used in the tests below defined here for convenience and to + // keep the horizontal test size shorter. + errUnsupportedAddress := scriptError(ErrUnsupportedAddress, "") + + tests := []struct { + in util.Address + expectedScript string + expectedVersion uint16 + err error + }{ + // pay-to-pubkey address on mainnet + { + p2pkMain, + "DATA_32 0xe34cce70c86373273efcc54ce7d2a4" + + "91bb4a0e84e34cce70c86373273efcc54c CHECKSIG", + 0, + nil, + }, + // pay-to-script-hash address on mainnet + { + p2shMain, + "BLAKE2B DATA_32 0xe8c300c87986efa84c37c0519929019ef8" + + "6eb5b4e34cce70c86373273efcc54c EQUAL", + 0, + nil, + }, + + // Supported address types with nil pointers. + {(*util.AddressPublicKey)(nil), "", 0, errUnsupportedAddress}, + {(*util.AddressScriptHash)(nil), "", 0, errUnsupportedAddress}, + + // Unsupported address type. + {&bogusAddress{}, "", 0, errUnsupportedAddress}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + scriptPublicKey, err := PayToAddrScript(test.in) + if e := checkScriptError(err, test.err); e != nil { + t.Errorf("PayToAddrScript #%d unexpected error - "+ + "got %v, want %v", i, err, test.err) + continue + } + + var scriptPublicKeyScript []byte + var scriptPublicKeyVersion uint16 + if scriptPublicKey != nil { + scriptPublicKeyScript = scriptPublicKey.Script + scriptPublicKeyVersion = scriptPublicKey.Version + } + + expectedVersion := test.expectedVersion + expectedScript := mustParseShortForm(test.expectedScript, test.expectedVersion) + if !bytes.Equal(scriptPublicKeyScript, expectedScript) { + t.Errorf("PayToAddrScript #%d got: %x\nwant: %x", + i, scriptPublicKey, expectedScript) + continue + } + if scriptPublicKeyVersion != expectedVersion { + t.Errorf("PayToAddrScript #%d got version: %d\nwant: %d", + i, scriptPublicKeyVersion, expectedVersion) + continue + } + } +} + +// scriptClassTests houses several test scripts used to ensure various class +// determination is working as expected. It's defined as a test global versus +// inside a function scope since this spans both the standard tests and the +// consensus tests (pay-to-script-hash is part of consensus). +var scriptClassTests = []struct { + name string + script string + class ScriptClass +}{ + // p2pk + { + name: "Pay Pubkey", + script: "DATA_32 0x89ac24ea10bb751af4939623ccc5e550d96842b64e8fca0f63e94b4373fd555e CHECKSIG", + class: PubKeyTy, + }, + // p2pk ECDSA + { + name: "Pay Pubkey ECDSA", + script: "DATA_33 0x89ac24ea10bb751af4939623ccc5e550d96842b64e8fca0f63e94b4373fd555eab CHECKSIGECDSA", + class: PubKeyECDSATy, + }, + { + name: "Pay PubkeyHash", + script: "DUP BLAKE2B DATA_32 0x660d4ef3a743e3e696ad990364e55543e3e696ad990364e555e555" + + "c271ad504b EQUALVERIFY CHECKSIG", + class: NonStandardTy, + }, + // mutlisig + { + name: "multisig", + script: "1 DATA_33 0x0232abdc893e7f0631364d7fd01cb33d24da4" + + "5329a00357b3a7886211ab414d55a 1 CHECKMULTISIG", + class: NonStandardTy, + }, + { + name: "P2SH", + script: "BLAKE2B DATA_32 0x433ec2ac1ffa1b7b7d027f564529c57197fa1b7b7d027f564529c57197f" + + "9ae88 EQUAL", + class: ScriptHashTy, + }, + + { + // Nulldata. It is standard in Bitcoin but not in Spectre + name: "nulldata", + script: "RETURN 0", + class: NonStandardTy, + }, + + // The next few are almost multisig (it is the more complex script type) + // but with various changes to make it fail. + { + // Multisig but invalid nsigs. + name: "strange 1", + script: "DUP DATA_33 0x0232abdc893e7f0631364d7fd01cb33d24da45" + + "329a00357b3a7886211ab414d55a 1 CHECKMULTISIG", + class: NonStandardTy, + }, + { + // Multisig but invalid pubkey. + name: "strange 2", + script: "1 1 1 CHECKMULTISIG", + class: NonStandardTy, + }, + { + // Multisig but no matching npubkeys opcode. + name: "strange 3", + script: "1 DATA_33 0x0232abdc893e7f0631364d7fd01cb33d24da4532" + + "9a00357b3a7886211ab414d55a DATA_33 0x0232abdc893e7f0" + + "631364d7fd01cb33d24da45329a00357b3a7886211ab414d55a " + + "CHECKMULTISIG", + class: NonStandardTy, + }, + { + // Multisig but with multisigverify. + name: "strange 4", + script: "1 DATA_33 0x0232abdc893e7f0631364d7fd01cb33d24da4532" + + "9a00357b3a7886211ab414d55a 1 CHECKMULTISIGVERIFY", + class: NonStandardTy, + }, + { + // Multisig but wrong length. + name: "strange 5", + script: "1 CHECKMULTISIG", + class: NonStandardTy, + }, + { + name: "doesn't parse", + script: "DATA_5 0x01020304", + class: NonStandardTy, + }, + { + name: "multisig script with wrong number of pubkeys", + script: "2 " + + "DATA_33 " + + "0x027adf5df7c965a2d46203c781bd4dd8" + + "21f11844136f6673af7cc5a4a05cd29380 " + + "DATA_33 " + + "0x02c08f3de8ee2de9be7bd770f4c10eb0" + + "d6ff1dd81ee96eedd3a9d4aeaf86695e80 " + + "3 CHECKMULTISIG", + class: NonStandardTy, + }, +} + +// TestScriptClass ensures all the scripts in scriptClassTests have the expected +// class. +func TestScriptClass(t *testing.T) { + t.Parallel() + + for _, test := range scriptClassTests { + script := mustParseShortForm(test.script, 0) + class := GetScriptClass(script) + if class != test.class { + t.Errorf("%s: expected %s got %s (script %x)", test.name, + test.class, class, script) + continue + } + } +} + +// TestStringifyClass ensures the script class string returns the expected +// string for each script class. +func TestStringifyClass(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + class ScriptClass + stringed string + }{ + { + name: "nonstandardty", + class: NonStandardTy, + stringed: "nonstandard", + }, + { + name: "pubkey", + class: PubKeyTy, + stringed: "pubkey", + }, + { + name: "pubkeyecdsa", + class: PubKeyECDSATy, + stringed: "pubkeyecdsa", + }, + { + name: "scripthash", + class: ScriptHashTy, + stringed: "scripthash", + }, + { + name: "broken", + class: ScriptClass(255), + stringed: "Invalid", + }, + } + + for _, test := range tests { + typeString := test.class.String() + if typeString != test.stringed { + t.Errorf("%s: got %#q, want %#q", test.name, + typeString, test.stringed) + } + } +} diff --git a/domain/consensus/utils/utxo/diff_algebra.go b/domain/consensus/utils/utxo/diff_algebra.go new file mode 100644 index 0000000..797e27e --- /dev/null +++ b/domain/consensus/utils/utxo/diff_algebra.go @@ -0,0 +1,222 @@ +package utxo + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// checkIntersection checks if there is an intersection between two utxoCollections +func checkIntersection(collection1 utxoCollection, collection2 utxoCollection) bool { + for outpoint := range collection1 { + if collection2.Contains(&outpoint) { + return true + } + } + + return false +} + +// checkIntersectionWithRule checks if there is an intersection between two utxoCollections satisfying arbitrary rule +// returns the first outpoint in the two collections' intersection satsifying the rule, and a boolean indicating whether +// such outpoint exists +func checkIntersectionWithRule(collection1 utxoCollection, collection2 utxoCollection, + extraRule func(*externalapi.DomainOutpoint, externalapi.UTXOEntry, externalapi.UTXOEntry) bool) ( + *externalapi.DomainOutpoint, bool) { + + for outpoint, utxoEntry := range collection1 { + if diffEntry, ok := collection2.Get(&outpoint); ok { + if extraRule(&outpoint, utxoEntry, diffEntry) { + return &outpoint, true + } + } + } + + return nil, false +} + +// intersectionWithRemainderHavingDAAScoreInPlace calculates an intersection between two utxoCollections +// having same DAA score, puts it into result and into remainder from collection1 +func intersectionWithRemainderHavingDAAScoreInPlace(collection1, collection2, result, remainder utxoCollection) { + for outpoint, utxoEntry := range collection1 { + if collection2.containsWithDAAScore(&outpoint, utxoEntry.BlockDAAScore()) { + result.add(&outpoint, utxoEntry) + } else { + remainder.add(&outpoint, utxoEntry) + } + } +} + +// subtractionHavingDAAScoreInPlace calculates a subtraction between collection1 and collection2 +// having same DAA score, puts it into result +func subtractionHavingDAAScoreInPlace(collection1, collection2, result utxoCollection) { + for outpoint, utxoEntry := range collection1 { + if !collection2.containsWithDAAScore(&outpoint, utxoEntry.BlockDAAScore()) { + result.add(&outpoint, utxoEntry) + } + } +} + +// subtractionWithRemainderHavingDAAScoreInPlace calculates a subtraction between collection1 and collection2 +// having same DAA score, puts it into result and into remainder from collection1 +func subtractionWithRemainderHavingDAAScoreInPlace(collection1, collection2, result, remainder utxoCollection) { + for outpoint, utxoEntry := range collection1 { + if !collection2.containsWithDAAScore(&outpoint, utxoEntry.BlockDAAScore()) { + result.add(&outpoint, utxoEntry) + } else { + remainder.add(&outpoint, utxoEntry) + } + } +} + +// DiffFrom returns a new mutableUTXODiff with the difference between this mutableUTXODiff and another +// Assumes that: +// Both mutableUTXODiffs are from the same base +// If a txOut exists in both mutableUTXODiffs, its underlying values would be the same +// +// diffFrom follows a set of rules represented by the following 3 by 3 table: +// +// | | this | | +// +// ---------+-----------+-----------+-----------+----------- +// +// | | toAdd | toRemove | None +// +// ---------+-----------+-----------+-----------+----------- +// other | toAdd | - | X | toAdd +// ---------+-----------+-----------+-----------+----------- +// +// | toRemove | X | - | toRemove +// +// ---------+-----------+-----------+-----------+----------- +// +// | None | toRemove | toAdd | - +// +// Key: +// - Don't add anything to the result +// X Return an error +// toAdd Add the UTXO into the toAdd collection of the result +// toRemove Add the UTXO into the toRemove collection of the result +// +// Examples: +// 1. This diff contains a UTXO in toAdd, and the other diff contains it in toRemove +// diffFrom results in an error +// 2. This diff contains a UTXO in toRemove, and the other diff does not contain it +// diffFrom results in the UTXO being added to toAdd +func diffFrom(this, other *mutableUTXODiff) (*mutableUTXODiff, error) { + // Note that the following cases are not accounted for, as they are impossible + // as long as the base utxoSet is the same: + // - if utxoEntry is in this.toAdd and other.toRemove + // - if utxoEntry is in this.toRemove and other.toAdd + + // check that NOT (entries with unequal DAA scores AND utxoEntry is in this.toAdd and/or other.toRemove) -> Error + isNotAddedOutputRemovedWithDAAScore := func(outpoint *externalapi.DomainOutpoint, utxoEntry, diffEntry externalapi.UTXOEntry) bool { + return !(diffEntry.BlockDAAScore() != utxoEntry.BlockDAAScore() && + (this.toAdd.containsWithDAAScore(outpoint, diffEntry.BlockDAAScore()) || + other.toRemove.containsWithDAAScore(outpoint, utxoEntry.BlockDAAScore()))) + } + + if offendingOutpoint, ok := + checkIntersectionWithRule(this.toRemove, other.toAdd, isNotAddedOutputRemovedWithDAAScore); ok { + return nil, errors.Errorf("diffFrom: outpoint %s both in this.toAdd and in other.toRemove", offendingOutpoint) + } + + //check that NOT (entries with unequal DAA score AND utxoEntry is in this.toRemove and/or other.toAdd) -> Error + isNotRemovedOutputAddedWithDAAScore := + func(outpoint *externalapi.DomainOutpoint, utxoEntry, diffEntry externalapi.UTXOEntry) bool { + + return !(diffEntry.BlockDAAScore() != utxoEntry.BlockDAAScore() && + (this.toRemove.containsWithDAAScore(outpoint, diffEntry.BlockDAAScore()) || + other.toAdd.containsWithDAAScore(outpoint, utxoEntry.BlockDAAScore()))) + } + + if offendingOutpoint, ok := + checkIntersectionWithRule(this.toAdd, other.toRemove, isNotRemovedOutputAddedWithDAAScore); ok { + return nil, errors.Errorf("diffFrom: outpoint %s both in this.toRemove and in other.toAdd", offendingOutpoint) + } + + // if have the same entry in this.toRemove and other.toRemove + // and existing entry is with different DAA score, in this case - this is an error + if offendingOutpoint, ok := checkIntersectionWithRule(this.toRemove, other.toRemove, + func(outpoint *externalapi.DomainOutpoint, utxoEntry, diffEntry externalapi.UTXOEntry) bool { + return utxoEntry.BlockDAAScore() != diffEntry.BlockDAAScore() + }); ok { + return nil, errors.Errorf("diffFrom: outpoint %s both in this.toRemove and other.toRemove with different "+ + "DAA scores, with no corresponding entry in this.toAdd", offendingOutpoint) + } + + result := &mutableUTXODiff{ + toAdd: make(utxoCollection), + toRemove: make(utxoCollection), + } + + // All transactions in this.toAdd: + // If they are not in other.toAdd - should be added in result.toRemove + inBothToAdd := make(utxoCollection) + subtractionWithRemainderHavingDAAScoreInPlace(this.toAdd, other.toAdd, result.toRemove, inBothToAdd) + // If they are in other.toRemove - base utxoSet is not the same + if checkIntersection(inBothToAdd, this.toRemove) != checkIntersection(inBothToAdd, other.toRemove) { + return nil, errors.New( + "diffFrom: outpoint both in this.toAdd, other.toAdd, and only one of this.toRemove and other.toRemove") + } + + // All transactions in other.toRemove: + // If they are not in this.toRemove - should be added in result.toRemove + subtractionHavingDAAScoreInPlace(other.toRemove, this.toRemove, result.toRemove) + + // All transactions in this.toRemove: + // If they are not in other.toRemove - should be added in result.toAdd + subtractionHavingDAAScoreInPlace(this.toRemove, other.toRemove, result.toAdd) + + // All transactions in other.toAdd: + // If they are not in this.toAdd - should be added in result.toAdd + subtractionHavingDAAScoreInPlace(other.toAdd, this.toAdd, result.toAdd) + + return result, nil +} + +// WithDiffInPlace applies provided diff to this diff in-place, that would be the result if +// first d, and than diff were applied to the same base +func withDiffInPlace(this *mutableUTXODiff, other *mutableUTXODiff) error { + if offendingOutpoint, ok := checkIntersectionWithRule(other.toRemove, this.toRemove, + func(outpoint *externalapi.DomainOutpoint, entryToAdd, existingEntry externalapi.UTXOEntry) bool { + return !this.toAdd.containsWithDAAScore(outpoint, entryToAdd.BlockDAAScore()) + }); ok { + return errors.Errorf( + "withDiffInPlace: outpoint %s both in this.toRemove and in other.toRemove", offendingOutpoint) + } + + if offendingOutpoint, ok := checkIntersectionWithRule(other.toAdd, this.toAdd, + func(outpoint *externalapi.DomainOutpoint, entryToAdd, existingEntry externalapi.UTXOEntry) bool { + return !other.toRemove.containsWithDAAScore(outpoint, existingEntry.BlockDAAScore()) + }); ok { + return errors.Errorf( + "withDiffInPlace: outpoint %s both in this.toAdd and in other.toAdd", offendingOutpoint) + } + + intersection := make(utxoCollection) + // If not exists neither in toAdd nor in toRemove - add to toRemove + intersectionWithRemainderHavingDAAScoreInPlace(other.toRemove, this.toAdd, intersection, this.toRemove) + // If already exists in toAdd with the same DAA score - remove from toAdd + this.toAdd.removeMultiple(intersection) + + intersection = make(utxoCollection) + // If not exists neither in toAdd nor in toRemove, or exists in toRemove with different DAA score - add to toAdd + intersectionWithRemainderHavingDAAScoreInPlace(other.toAdd, this.toRemove, intersection, this.toAdd) + // If already exists in toRemove with the same DAA score - remove from toRemove + this.toRemove.removeMultiple(intersection) + + return nil +} + +// WithDiff applies provided diff to this diff, creating a new mutableUTXODiff, that would be the result if +// first d, and than diff were applied to some base +func withDiff(this *mutableUTXODiff, diff *mutableUTXODiff) (*mutableUTXODiff, error) { + clone := this.clone() + + err := withDiffInPlace(clone, diff) + if err != nil { + return nil, err + } + + return clone, nil +} diff --git a/domain/consensus/utils/utxo/diff_algebra_test.go b/domain/consensus/utils/utxo/diff_algebra_test.go new file mode 100644 index 0000000..c896209 --- /dev/null +++ b/domain/consensus/utils/utxo/diff_algebra_test.go @@ -0,0 +1,616 @@ +package utxo + +import ( + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (mud *mutableUTXODiff) equal(other *mutableUTXODiff) bool { + if mud == nil || other == nil { + return mud == other + } + + return reflect.DeepEqual(mud.toAdd, other.toAdd) && + reflect.DeepEqual(mud.toRemove, other.toRemove) +} + +// TestUTXOCollection makes sure that utxoCollection cloning and string representations work as expected. +func TestUTXOCollection(t *testing.T) { + txID0, _ := transactionid.FromString("0000000000000000000000000000000000000000000000000000000000000000") + txID1, _ := transactionid.FromString("1111111111111111111111111111111111111111111111111111111111111111") + outpoint0 := externalapi.NewDomainOutpoint(txID0, 0) + outpoint1 := externalapi.NewDomainOutpoint(txID1, 0) + utxoEntry0 := NewUTXOEntry(10, &externalapi.ScriptPublicKey{[]byte{}, 0}, true, 0) + utxoEntry1 := NewUTXOEntry(20, &externalapi.ScriptPublicKey{[]byte{}, 0}, false, 1) + + // For each of the following test cases, we will: + // .String() the given collection and compare it to expectedStringWithMultiset + // .clone() the given collection and compare its value to itself (expected: equals) and its reference to itself (expected: not equal) + tests := []struct { + name string + collection utxoCollection + expectedString string + }{ + { + name: "empty collection", + collection: utxoCollection{}, + expectedString: "[ ]", + }, + { + name: "one member", + collection: utxoCollection{ + *outpoint0: utxoEntry1, + }, + expectedString: "[ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 20, daaScore: 1 ]", + }, + { + name: "two members", + collection: utxoCollection{ + *outpoint0: utxoEntry0, + *outpoint1: utxoEntry1, + }, + expectedString: "[ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, daaScore: 0, (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, daaScore: 1 ]", + }, + } + + for _, test := range tests { + // Test utxoCollection string representation + collectionString := test.collection.String() + if collectionString != test.expectedString { + t.Errorf("unexpected string in test \"%s\". "+ + "Expected: \"%s\", got: \"%s\".", test.name, test.expectedString, collectionString) + } + + // Test utxoCollection cloning + collectionClone := test.collection.Clone() + if reflect.ValueOf(collectionClone).Pointer() == reflect.ValueOf(test.collection).Pointer() { + t.Errorf("collection is reference-equal to its clone in test \"%s\". ", test.name) + } + if !reflect.DeepEqual(test.collection, collectionClone) { + t.Errorf("collection is not equal to its clone in test \"%s\". "+ + "Expected: \"%s\", got: \"%s\".", test.name, collectionString, collectionClone.String()) + } + } +} + +// TestutxoDiff makes sure that mutableUTXODiff creation, cloning, and string representations work as expected. +func TestUTXODiff(t *testing.T) { + txID0, _ := transactionid.FromString("0000000000000000000000000000000000000000000000000000000000000000") + txID1, _ := transactionid.FromString("1111111111111111111111111111111111111111111111111111111111111111") + outpoint0 := externalapi.NewDomainOutpoint(txID0, 0) + outpoint1 := externalapi.NewDomainOutpoint(txID1, 0) + utxoEntry0 := NewUTXOEntry(10, &externalapi.ScriptPublicKey{[]byte{}, 0}, true, 0) + utxoEntry1 := NewUTXOEntry(20, &externalapi.ScriptPublicKey{[]byte{}, 0}, false, 1) + + diff := newMutableUTXODiff() + + if len(diff.toAdd) != 0 || len(diff.toRemove) != 0 { + t.Errorf("new diff is not empty") + } + + err := diff.addEntry(outpoint0, utxoEntry0) + if err != nil { + t.Fatalf("error adding entry to utxo diff: %s", err) + } + + err = diff.removeEntry(outpoint1, utxoEntry1) + if err != nil { + t.Fatalf("error adding entry to utxo diff: %s", err) + } + + // Test mutableUTXODiff cloning + clonedDiff := diff.clone() + if clonedDiff == diff { + t.Errorf("cloned diff is reference-equal to the original") + } + if !reflect.DeepEqual(clonedDiff, diff) { + t.Errorf("cloned diff not equal to the original"+ + "Original: \"%v\", cloned: \"%v\".", diff, clonedDiff) + } + + // Test mutableUTXODiff string representation + expectedDiffString := "toAdd: [ (0000000000000000000000000000000000000000000000000000000000000000, 0) => 10, daaScore: 0 ]; toRemove: [ (1111111111111111111111111111111111111111111111111111111111111111, 0) => 20, daaScore: 1 ]" + diffString := clonedDiff.String() + if diffString != expectedDiffString { + t.Errorf("unexpected diff string. "+ + "Expected: \"%s\", got: \"%s\".", expectedDiffString, diffString) + } +} + +// TestutxoDiffRules makes sure that all diffFrom and WithDiff rules are followed. +// Each test case represents a cell in the two tables outlined in the documentation for mutableUTXODiff. +func TestUTXODiffRules(t *testing.T) { + txID0, _ := transactionid.FromString("0000000000000000000000000000000000000000000000000000000000000000") + outpoint0 := externalapi.NewDomainOutpoint(txID0, 0) + utxoEntry1 := NewUTXOEntry(10, &externalapi.ScriptPublicKey{[]byte{}, 0}, true, 0) + utxoEntry2 := NewUTXOEntry(20, &externalapi.ScriptPublicKey{[]byte{}, 0}, true, 1) + + // For each of the following test cases, we will: + // this.diffFrom(other) and compare it to expectedDiffFromResult + // this.WithDiff(other) and compare it to expectedWithDiffResult + // this.withDiffInPlace(other) and compare it to expectedWithDiffResult + // + // Note: an expected nil result means that we expect the respective operation to fail + // See the following spreadsheet for a summary of all test-cases: + // https://docs.google.com/spreadsheets/d/1E8G3mp5y1-yifouwLLXRLueSRfXdDRwRKFieYE07buY/edit?usp=sharing + tests := []struct { + name string + this *mutableUTXODiff + other *mutableUTXODiff + expectedDiffFromResult *mutableUTXODiff + expectedWithDiffResult *mutableUTXODiff + }{ + { + name: "first toAdd in this, first toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this, second in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this, second in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + }, + { + name: "first in toAdd in this and other, second in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this and toRemove in other, second in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{}, + }, + }, + { + name: "first in toAdd in this, empty other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + }, + { + name: "first in toRemove in this and in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + }, + { + name: "first in toRemove in this, second in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + }, + { + name: "first in toRemove in this and other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toRemove in this, second in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: nil, + }, + { + name: "first in toRemove in this and toAdd in other, second in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: nil, + }, + { + name: "first in toRemove in this and other, second in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toRemove in this, empty other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + }, + { + name: "first in toAdd in this and other, second in toRemove in this", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this, second in toRemove in this and toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this and toRemove in other, second in toRemove in this", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + }, + { + name: "first in toAdd in this, second in toRemove in this and in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd and second in toRemove in both this and other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: nil, + }, + { + name: "first in toAdd in this and toRemove in other, second in toRemove in this and toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: nil, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + }, + { + name: "first in toAdd and second in toRemove in this, empty other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry2}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + }, + { + name: "empty this, first in toAdd in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{}, + }, + }, + { + name: "empty this, first in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{*outpoint0: utxoEntry1}, + }, + }, + { + name: "empty this, first in toAdd and second in toRemove in other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{*outpoint0: utxoEntry1}, + toRemove: utxoCollection{*outpoint0: utxoEntry2}, + }, + }, + { + name: "empty this, empty other", + this: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + other: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedDiffFromResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + expectedWithDiffResult: &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + }, + }, + } + + for _, test := range tests { + // diffFrom from test.this to test.other + diffResult, err := diffFrom(test.this, test.other) + + // Test whether diffFrom returned an error + isDiffFromOk := err == nil + expectedIsDiffFromOk := test.expectedDiffFromResult != nil + if isDiffFromOk != expectedIsDiffFromOk { + t.Errorf("unexpected diffFrom error in test \"%s\". "+ + "Expected: \"%t\", got: \"%t\".", test.name, expectedIsDiffFromOk, isDiffFromOk) + } + + // If not error, test the diffFrom result + if isDiffFromOk && !test.expectedDiffFromResult.equal(diffResult) { + t.Errorf("unexpected diffFrom result in test \"%s\". "+ + "Expected: \"%v\", got: \"%v\".", test.name, test.expectedDiffFromResult, diffResult) + } + + // Make sure that WithDiff after diffFrom results in the original test.other + if isDiffFromOk { + otherResult, err := withDiff(test.this, diffResult) + if err != nil { + t.Errorf("WithDiff unexpectedly failed in test \"%s\": %s", test.name, err) + } + if !test.other.equal(otherResult) { + t.Errorf("unexpected WithDiff result in test \"%s\". "+ + "Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult) + } + } + + // WithDiff from test.this to test.other + withDiffResult, err := withDiff(test.this, test.other) + + // Test whether WithDiff returned an error + isWithDiffOk := err == nil + expectedIsWithDiffOk := test.expectedWithDiffResult != nil + if isWithDiffOk != expectedIsWithDiffOk { + t.Errorf("unexpected WithDiff error in test \"%s\". "+ + "Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffOk, isWithDiffOk) + } + + // If not error, test the WithDiff result + if isWithDiffOk && !withDiffResult.equal(test.expectedWithDiffResult) { + t.Errorf("unexpected WithDiff result in test \"%s\". "+ + "Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, withDiffResult) + } + + // Repeat WithDiff check test.this time using withDiffInPlace + thisClone := test.this.clone() + err = withDiffInPlace(thisClone, test.other) + + // Test whether withDiffInPlace returned an error + isWithDiffInPlaceOk := err == nil + expectedIsWithDiffInPlaceOk := test.expectedWithDiffResult != nil + if isWithDiffInPlaceOk != expectedIsWithDiffInPlaceOk { + t.Errorf("unexpected withDiffInPlace error in test \"%s\". "+ + "Expected: \"%t\", got: \"%t\".", test.name, expectedIsWithDiffInPlaceOk, isWithDiffInPlaceOk) + } + + // If not error, test the withDiffInPlace result + if isWithDiffInPlaceOk && !thisClone.equal(test.expectedWithDiffResult) { + t.Errorf("unexpected withDiffInPlace result in test \"%s\". "+ + "Expected: \"%v\", got: \"%v\".", test.name, test.expectedWithDiffResult, thisClone) + } + + // Make sure that diffFrom after WithDiff results in the original test.other + if isWithDiffOk { + otherResult, err := diffFrom(test.this, withDiffResult) + if err != nil { + t.Errorf("diffFrom unexpectedly failed in test \"%s\": %s", test.name, err) + } + if !test.other.equal(otherResult) { + t.Errorf("unexpected diffFrom result in test \"%s\". "+ + "Expected: \"%v\", got: \"%v\".", test.name, test.other, otherResult) + } + } + } +} diff --git a/domain/consensus/utils/utxo/immutable_utxo_diff.go b/domain/consensus/utils/utxo/immutable_utxo_diff.go new file mode 100644 index 0000000..d59f4ef --- /dev/null +++ b/domain/consensus/utils/utxo/immutable_utxo_diff.go @@ -0,0 +1,103 @@ +package utxo + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type immutableUTXODiff struct { + mutableUTXODiff *mutableUTXODiff + + isInvalidated bool +} + +func (iud *immutableUTXODiff) ToAdd() externalapi.UTXOCollection { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + + return iud.mutableUTXODiff.ToAdd() +} + +func (iud *immutableUTXODiff) ToRemove() externalapi.UTXOCollection { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + + return iud.mutableUTXODiff.ToRemove() +} + +func (iud *immutableUTXODiff) WithDiff(other externalapi.UTXODiff) (externalapi.UTXODiff, error) { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + + return iud.mutableUTXODiff.WithDiff(other) +} + +func (iud *immutableUTXODiff) DiffFrom(other externalapi.UTXODiff) (externalapi.UTXODiff, error) { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + + return iud.mutableUTXODiff.DiffFrom(other) +} + +// NewUTXODiff creates an empty UTXODiff +func NewUTXODiff() externalapi.UTXODiff { + return newUTXODiff() +} + +func newUTXODiff() *immutableUTXODiff { + return &immutableUTXODiff{ + mutableUTXODiff: newMutableUTXODiff(), + isInvalidated: false, + } +} + +// NewUTXODiffFromCollections returns a new UTXODiff with the given toAdd and toRemove collections +func NewUTXODiffFromCollections(toAdd, toRemove externalapi.UTXOCollection) (externalapi.UTXODiff, error) { + add, ok := toAdd.(utxoCollection) + if !ok { + return nil, errors.New("toAdd is not of type utxoCollection") + } + remove, ok := toRemove.(utxoCollection) + if !ok { + return nil, errors.New("toRemove is not of type utxoCollection") + } + return &immutableUTXODiff{ + mutableUTXODiff: &mutableUTXODiff{ + toAdd: add, + toRemove: remove, + }, + }, nil +} + +func (iud *immutableUTXODiff) CloneMutable() externalapi.MutableUTXODiff { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + return iud.cloneMutable() +} + +func (iud *immutableUTXODiff) Reversed() externalapi.UTXODiff { + if iud.isInvalidated { + panic(errors.New("Attempt to read from an invalidated UTXODiff")) + } + return &immutableUTXODiff{ + mutableUTXODiff: iud.mutableUTXODiff.Reversed(), + isInvalidated: false, + } +} + +func (iud *immutableUTXODiff) cloneMutable() *mutableUTXODiff { + if iud == nil { + return nil + } + + return iud.mutableUTXODiff.clone() +} + +func (iud immutableUTXODiff) String() string { + return iud.mutableUTXODiff.String() +} diff --git a/domain/consensus/utils/utxo/mutable_utxo_diff.go b/domain/consensus/utils/utxo/mutable_utxo_diff.go new file mode 100644 index 0000000..bc3cb10 --- /dev/null +++ b/domain/consensus/utils/utxo/mutable_utxo_diff.go @@ -0,0 +1,167 @@ +package utxo + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +type mutableUTXODiff struct { + toAdd utxoCollection + toRemove utxoCollection + + immutableReferences []*immutableUTXODiff +} + +// NewMutableUTXODiff creates an empty mutable UTXO-Diff +func NewMutableUTXODiff() externalapi.MutableUTXODiff { + return newMutableUTXODiff() +} + +func newMutableUTXODiff() *mutableUTXODiff { + return &mutableUTXODiff{ + toAdd: utxoCollection{}, + toRemove: utxoCollection{}, + } +} + +func (mud *mutableUTXODiff) ToImmutable() externalapi.UTXODiff { + immutableReference := &immutableUTXODiff{ + mutableUTXODiff: mud, + isInvalidated: false, + } + + mud.immutableReferences = append(mud.immutableReferences, immutableReference) + + return immutableReference +} + +func (mud *mutableUTXODiff) invalidateImmutableReferences() { + for _, immutableReference := range mud.immutableReferences { + immutableReference.isInvalidated = true + } + + mud.immutableReferences = nil +} + +func (mud *mutableUTXODiff) WithDiff(other externalapi.UTXODiff) (externalapi.UTXODiff, error) { + o, ok := other.(*immutableUTXODiff) + if !ok { + return nil, errors.New("other is not of type *immutableUTXODiff") + } + + result, err := withDiff(mud, o.mutableUTXODiff) + if err != nil { + return nil, err + } + + return result.ToImmutable(), nil +} + +func (mud *mutableUTXODiff) WithDiffInPlace(other externalapi.UTXODiff) error { + o, ok := other.(*immutableUTXODiff) + if !ok { + return errors.New("other is not of type *immutableUTXODiff") + } + + mud.invalidateImmutableReferences() + + return withDiffInPlace(mud, o.mutableUTXODiff) +} + +func (mud *mutableUTXODiff) DiffFrom(other externalapi.UTXODiff) (externalapi.UTXODiff, error) { + o, ok := other.(*immutableUTXODiff) + if !ok { + return nil, errors.New("other is not of type *immutableUTXODiff") + } + + result, err := diffFrom(mud, o.mutableUTXODiff) + if err != nil { + return nil, err + } + + return result.ToImmutable(), nil +} + +func (mud *mutableUTXODiff) ToAdd() externalapi.UTXOCollection { + return mud.toAdd +} + +func (mud *mutableUTXODiff) ToRemove() externalapi.UTXOCollection { + return mud.toRemove +} +func (mud *mutableUTXODiff) AddTransaction(transaction *externalapi.DomainTransaction, blockDAAScore uint64) error { + mud.invalidateImmutableReferences() + + for _, input := range transaction.Inputs { + err := mud.removeEntry(&input.PreviousOutpoint, input.UTXOEntry) + if err != nil { + return err + } + } + + isCoinbase := transactionhelper.IsCoinBase(transaction) + transactionID := *consensushashing.TransactionID(transaction) + for i, output := range transaction.Outputs { + outpoint := &externalapi.DomainOutpoint{ + TransactionID: transactionID, + Index: uint32(i), + } + entry := NewUTXOEntry(output.Value, output.ScriptPublicKey, isCoinbase, blockDAAScore) + + err := mud.addEntry(outpoint, entry) + if err != nil { + return err + } + } + + return nil +} + +func (mud *mutableUTXODiff) addEntry(outpoint *externalapi.DomainOutpoint, entry externalapi.UTXOEntry) error { + if mud.toRemove.containsWithDAAScore(outpoint, entry.BlockDAAScore()) { + mud.toRemove.remove(outpoint) + } else if mud.toAdd.Contains(outpoint) { + return errors.Errorf("AddEntry: Cannot add outpoint %s twice", outpoint) + } else { + mud.toAdd.add(outpoint, entry) + } + return nil +} + +func (mud *mutableUTXODiff) removeEntry(outpoint *externalapi.DomainOutpoint, entry externalapi.UTXOEntry) error { + if mud.toAdd.containsWithDAAScore(outpoint, entry.BlockDAAScore()) { + mud.toAdd.remove(outpoint) + } else if mud.toRemove.Contains(outpoint) { + return errors.Errorf("removeEntry: Cannot remove outpoint %s twice", outpoint) + } else { + mud.toRemove.add(outpoint, entry) + } + return nil +} + +func (mud *mutableUTXODiff) clone() *mutableUTXODiff { + if mud == nil { + return nil + } + + return &mutableUTXODiff{ + toAdd: mud.toAdd.Clone(), + toRemove: mud.toRemove.Clone(), + } +} + +func (mud *mutableUTXODiff) String() string { + return fmt.Sprintf("toAdd: %s; toRemove: %s", mud.toAdd, mud.toRemove) +} + +func (mud *mutableUTXODiff) Reversed() *mutableUTXODiff { + return &mutableUTXODiff{ + toAdd: mud.toRemove, + toRemove: mud.toAdd, + immutableReferences: mud.immutableReferences, + } +} diff --git a/domain/consensus/utils/utxo/serialization.go b/domain/consensus/utils/utxo/serialization.go new file mode 100644 index 0000000..a6ff46f --- /dev/null +++ b/domain/consensus/utils/utxo/serialization.go @@ -0,0 +1,136 @@ +package utxo + +import ( + "bytes" + "io" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/serialization" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" +) + +// SerializeUTXO returns the byte-slice representation for given UTXOEntry-outpoint pair +func SerializeUTXO(entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint) ([]byte, error) { + w := &bytes.Buffer{} + + err := serializeOutpoint(w, outpoint) + if err != nil { + return nil, err + } + + err = serializeUTXOEntry(w, entry) + if err != nil { + return nil, err + } + + return w.Bytes(), nil +} + +// DeserializeUTXO deserializes the given byte slice to UTXOEntry-outpoint pair +func DeserializeUTXO(utxoBytes []byte) (entry externalapi.UTXOEntry, outpoint *externalapi.DomainOutpoint, err error) { + r := bytes.NewReader(utxoBytes) + outpoint, err = deserializeOutpoint(r) + if err != nil { + return nil, nil, err + } + + entry, err = deserializeUTXOEntry(r) + if err != nil { + return nil, nil, err + } + + return entry, outpoint, nil +} + +func serializeOutpoint(w io.Writer, outpoint *externalapi.DomainOutpoint) error { + _, err := w.Write(outpoint.TransactionID.ByteSlice()) + if err != nil { + return err + } + + err = serialization.WriteElement(w, outpoint.Index) + if err != nil { + return errors.WithStack(err) + } + + return nil +} + +func deserializeOutpoint(r io.Reader) (*externalapi.DomainOutpoint, error) { + transactionIDBytes := make([]byte, externalapi.DomainHashSize) + _, err := io.ReadFull(r, transactionIDBytes) + if err != nil { + return nil, err + } + + transactionID, err := transactionid.FromBytes(transactionIDBytes) + if err != nil { + return nil, err + } + + var index uint32 + err = serialization.ReadElement(r, &index) + if err != nil { + return nil, err + } + + return &externalapi.DomainOutpoint{ + TransactionID: *transactionID, + Index: index, + }, nil +} + +func serializeUTXOEntry(w io.Writer, entry externalapi.UTXOEntry) error { + err := serialization.WriteElements(w, entry.BlockDAAScore(), entry.Amount(), entry.IsCoinbase()) + if err != nil { + return err + } + err = serialization.WriteElement(w, entry.ScriptPublicKey().Version) + if err != nil { + return err + } + count := uint64(len(entry.ScriptPublicKey().Script)) + err = serialization.WriteElement(w, count) + if err != nil { + return err + } + + _, err = w.Write(entry.ScriptPublicKey().Script) + if err != nil { + return errors.WithStack(err) + } + + return nil +} + +func deserializeUTXOEntry(r io.Reader) (externalapi.UTXOEntry, error) { + var blockDAAScore uint64 + var amount uint64 + var isCoinbase bool + err := serialization.ReadElements(r, &blockDAAScore, &amount, &isCoinbase) + if err != nil { + return nil, err + } + + var version uint16 + err = serialization.ReadElement(r, &version) + if err != nil { + return nil, err + } + + var scriptPubKeyLen uint64 + err = serialization.ReadElement(r, &scriptPubKeyLen) + if err != nil { + return nil, err + } + + scriptPubKeyScript := make([]byte, scriptPubKeyLen) + _, err = io.ReadFull(r, scriptPubKeyScript) + if err != nil { + return nil, errors.WithStack(err) + } + scriptPubKey := externalapi.ScriptPublicKey{scriptPubKeyScript, version} + + return NewUTXOEntry(amount, &scriptPubKey, isCoinbase, blockDAAScore), nil +} diff --git a/domain/consensus/utils/utxo/serialization_test.go b/domain/consensus/utils/utxo/serialization_test.go new file mode 100644 index 0000000..3303630 --- /dev/null +++ b/domain/consensus/utils/utxo/serialization_test.go @@ -0,0 +1,70 @@ +package utxo + +import ( + "encoding/hex" + "reflect" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func Benchmark_serializeUTXO(b *testing.B) { + script, err := hex.DecodeString("76a914ad06dd6ddee55cbca9a9e3713bd7587509a3056488ac") + if err != nil { + b.Fatalf("Error decoding scriptPublicKey string: %s", err) + } + scriptPublicKey := &externalapi.ScriptPublicKey{script, 0} + entry := NewUTXOEntry(5000000000, scriptPublicKey, false, 1432432) + outpoint := &externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + } + + for i := 0; i < b.N; i++ { + _, err := SerializeUTXO(entry, outpoint) + if err != nil { + b.Fatal(err) + } + } +} + +func Test_serializeUTXO(t *testing.T) { + script, err := hex.DecodeString("76a914ad06dd6ddee55cbca9a9e3713bd7587509a3056488ac") + if err != nil { + t.Fatalf("Error decoding scriptPublicKey script string: %s", err) + } + scriptPublicKey := &externalapi.ScriptPublicKey{Script: script, Version: 0} + entry := NewUTXOEntry(5000000000, scriptPublicKey, false, 1432432) + outpoint := &externalapi.DomainOutpoint{ + TransactionID: *externalapi.NewDomainTransactionIDFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x16, 0x5e, 0x38, 0xe8, 0xb3, 0x91, 0x45, 0x95, + 0xd9, 0xc6, 0x41, 0xf3, 0xb8, 0xee, 0xc2, 0xf3, + 0x46, 0x11, 0x89, 0x6b, 0x82, 0x1a, 0x68, 0x3b, + 0x7a, 0x4e, 0xde, 0xfe, 0x2c, 0x00, 0x00, 0x00, + }), + Index: 0xffffffff, + } + + serialized, err := SerializeUTXO(entry, outpoint) + if err != nil { + t.Fatalf("SerializeUTXO: %+v", err) + } + + deserializedEntry, deserializedOutpoint, err := DeserializeUTXO(serialized) + if err != nil { + t.Fatalf("DeserializeUTXO: %+v", err) + } + + if !reflect.DeepEqual(deserializedEntry, entry) { + t.Fatalf("deserialized entry is not equal to the original") + } + + if !reflect.DeepEqual(deserializedOutpoint, outpoint) { + t.Fatalf("deserialized outpoint is not equal to the original") + } +} diff --git a/domain/consensus/utils/utxo/utxo_collection.go b/domain/consensus/utils/utxo/utxo_collection.go new file mode 100644 index 0000000..4a5a154 --- /dev/null +++ b/domain/consensus/utils/utxo/utxo_collection.go @@ -0,0 +1,93 @@ +package utxo + +import ( + "fmt" + "sort" + "strings" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type utxoCollection map[externalapi.DomainOutpoint]externalapi.UTXOEntry + +// NewUTXOCollection creates a UTXO-Collection from the given map from outpoint to UTXOEntry +func NewUTXOCollection(utxoMap map[externalapi.DomainOutpoint]externalapi.UTXOEntry) externalapi.UTXOCollection { + return utxoCollection(utxoMap) +} + +// Get returns the model.UTXOEntry represented by provided outpoint, +// and a boolean value indicating if said model.UTXOEntry is in the set or not +func (uc utxoCollection) Get(outpoint *externalapi.DomainOutpoint) (externalapi.UTXOEntry, bool) { + entry, ok := uc[*outpoint] + return entry, ok +} + +// Contains returns a boolean value indicating whether a UTXO entry is in the set +func (uc utxoCollection) Contains(outpoint *externalapi.DomainOutpoint) bool { + _, ok := uc[*outpoint] + return ok +} + +func (uc utxoCollection) Len() int { + return len(uc) +} + +func (uc utxoCollection) Clone() utxoCollection { + if uc == nil { + return nil + } + + clone := make(utxoCollection, len(uc)) + for outpoint, entry := range uc { + clone[outpoint] = entry + } + + return clone +} + +func (uc utxoCollection) String() string { + utxoStrings := make([]string, len(uc)) + + i := 0 + for outpoint, utxoEntry := range uc { + utxoStrings[i] = fmt.Sprintf("(%s, %d) => %d, daaScore: %d", + outpoint.TransactionID, outpoint.Index, utxoEntry.Amount(), utxoEntry.BlockDAAScore()) + i++ + } + + // Sort strings for determinism. + sort.Strings(utxoStrings) + + return fmt.Sprintf("[ %s ]", strings.Join(utxoStrings, ", ")) +} + +// add adds a new UTXO entry to this collection +func (uc utxoCollection) add(outpoint *externalapi.DomainOutpoint, entry externalapi.UTXOEntry) { + uc[*outpoint] = entry +} + +// addMultiple adds multiple UTXO entries to this collection +func (uc utxoCollection) addMultiple(collectionToAdd utxoCollection) { + for outpoint, entry := range collectionToAdd { + uc[outpoint] = entry + } +} + +// remove removes a UTXO entry from this collection if it exists +func (uc utxoCollection) remove(outpoint *externalapi.DomainOutpoint) { + delete(uc, *outpoint) +} + +// removeMultiple removes multiple UTXO entries from this collection if it exists +func (uc utxoCollection) removeMultiple(collectionToRemove utxoCollection) { + for outpoint := range collectionToRemove { + delete(uc, outpoint) + } +} + +// containsWithDAAScore returns a boolean value indicating whether a model.UTXOEntry +// is in the set and its DAA score is equal to the given DAA score. +func (uc utxoCollection) containsWithDAAScore(outpoint *externalapi.DomainOutpoint, daaScore uint64) bool { + entry, ok := uc.Get(outpoint) + return ok && entry.BlockDAAScore() == daaScore +} diff --git a/domain/consensus/utils/utxo/utxo_entry.go b/domain/consensus/utils/utxo/utxo_entry.go new file mode 100644 index 0000000..9cd06e7 --- /dev/null +++ b/domain/consensus/utils/utxo/utxo_entry.go @@ -0,0 +1,75 @@ +package utxo + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type utxoEntry struct { + amount uint64 + scriptPublicKey *externalapi.ScriptPublicKey + blockDAAScore uint64 + isCoinbase bool +} + +// NewUTXOEntry creates a new utxoEntry representing the given txOut +func NewUTXOEntry(amount uint64, scriptPubKey *externalapi.ScriptPublicKey, isCoinbase bool, blockDAAScore uint64) externalapi.UTXOEntry { + scriptPubKeyClone := externalapi.ScriptPublicKey{Script: make([]byte, len(scriptPubKey.Script)), Version: scriptPubKey.Version} + copy(scriptPubKeyClone.Script, scriptPubKey.Script) + return &utxoEntry{ + amount: amount, + scriptPublicKey: &scriptPubKeyClone, + blockDAAScore: blockDAAScore, + isCoinbase: isCoinbase, + } +} + +func (u *utxoEntry) Amount() uint64 { + return u.amount +} + +func (u *utxoEntry) ScriptPublicKey() *externalapi.ScriptPublicKey { + clone := externalapi.ScriptPublicKey{Script: make([]byte, len(u.scriptPublicKey.Script)), Version: u.scriptPublicKey.Version} + copy(clone.Script, u.scriptPublicKey.Script) + return &clone +} + +func (u *utxoEntry) BlockDAAScore() uint64 { + return u.blockDAAScore +} + +func (u *utxoEntry) IsCoinbase() bool { + return u.isCoinbase +} + +// Equal returns whether entry equals to other +func (u *utxoEntry) Equal(other externalapi.UTXOEntry) bool { + if u == nil || other == nil { + return u == other + } + + // If only the underlying value of other is nil it'll + // make `other == nil` return false, so we check it + // explicitly. + downcastedOther := other.(*utxoEntry) + if u == nil || downcastedOther == nil { + return u == downcastedOther + } + + if u.Amount() != other.Amount() { + return false + } + + if !u.ScriptPublicKey().Equal(other.ScriptPublicKey()) { + return false + } + + if u.BlockDAAScore() != other.BlockDAAScore() { + return false + } + + if u.IsCoinbase() != other.IsCoinbase() { + return false + } + + return true +} diff --git a/domain/consensus/utils/utxo/utxo_entry_test.go b/domain/consensus/utils/utxo/utxo_entry_test.go new file mode 100644 index 0000000..da95524 --- /dev/null +++ b/domain/consensus/utils/utxo/utxo_entry_test.go @@ -0,0 +1,114 @@ +package utxo + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestUTXOEntry_Equal(t *testing.T) { + type testUTXOEntryToCompare struct { + utxoEntry *utxoEntry + expectedResult bool + } + + tests := []struct { + baseUTXOEntry *utxoEntry + UTXOEntryToCompareTo []testUTXOEntryToCompare + }{ + { + baseUTXOEntry: nil, + UTXOEntryToCompareTo: []testUTXOEntryToCompare{ + { + utxoEntry: nil, + expectedResult: true, + }, + { + utxoEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFFF, + false, + }, + expectedResult: false, + }, + }, + }, { + baseUTXOEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFFF, + true, + }, + UTXOEntryToCompareTo: []testUTXOEntryToCompare{ + { + utxoEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFFF, + true, + }, + expectedResult: true, + }, + { + utxoEntry: nil, + expectedResult: false, + }, + { + utxoEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA0, 0xA3}, Version: 0}, // Changed + 0xFFFF, + true, + }, + expectedResult: false, + }, + { + utxoEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFFF, + false, // Changed + }, + expectedResult: false, + }, + { + utxoEntry: &utxoEntry{ + 0xFFFF, + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFF0, // Changed + true, + }, + expectedResult: false, + }, + { + utxoEntry: nil, + expectedResult: false, + }, + { + utxoEntry: &utxoEntry{ + 0xFFF0, // Changed + &externalapi.ScriptPublicKey{Script: []byte{0xA1, 0xA2, 0xA3}, Version: 0}, + 0xFFFF, + true, + }, + expectedResult: false, + }, + }, + }, + } + + for i, test := range tests { + for j, subTest := range test.UTXOEntryToCompareTo { + var base externalapi.UTXOEntry = test.baseUTXOEntry + result1 := base.Equal(subTest.utxoEntry) + if result1 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result1) + } + result2 := subTest.utxoEntry.Equal(base) + if result2 != subTest.expectedResult { + t.Fatalf("Test #%d:%d: Expected %t but got %t", i, j, subTest.expectedResult, result2) + } + } + } +} diff --git a/domain/consensus/utils/utxo/utxo_iterator.go b/domain/consensus/utils/utxo/utxo_iterator.go new file mode 100644 index 0000000..0026b13 --- /dev/null +++ b/domain/consensus/utils/utxo/utxo_iterator.go @@ -0,0 +1,79 @@ +package utxo + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type utxoOutpointEntryPair struct { + outpoint externalapi.DomainOutpoint + entry externalapi.UTXOEntry +} + +type utxoCollectionIterator struct { + index int + pairs []utxoOutpointEntryPair + isClosed bool +} + +func (uc utxoCollection) Iterator() externalapi.ReadOnlyUTXOSetIterator { + pairs := make([]utxoOutpointEntryPair, len(uc)) + i := 0 + for outpoint, entry := range uc { + pairs[i] = utxoOutpointEntryPair{ + outpoint: outpoint, + entry: entry, + } + i++ + } + return &utxoCollectionIterator{index: -1, pairs: pairs} +} + +func (uci *utxoCollectionIterator) First() bool { + if uci.isClosed { + panic("Tried using a closed utxoCollectionIterator") + } + uci.index = 0 + return len(uci.pairs) > 0 +} + +func (uci *utxoCollectionIterator) Next() bool { + if uci.isClosed { + panic("Tried using a closed utxoCollectionIterator") + } + uci.index++ + return uci.index < len(uci.pairs) +} + +func (uci *utxoCollectionIterator) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) { + if uci.isClosed { + return nil, nil, errors.New("Tried using a closed utxoCollectionIterator") + } + pair := uci.pairs[uci.index] + return &pair.outpoint, pair.entry, nil +} + +func (uci *utxoCollectionIterator) WithDiff(diff externalapi.UTXODiff) (externalapi.ReadOnlyUTXOSetIterator, error) { + if uci.isClosed { + return nil, errors.New("Tried using a closed utxoCollectionIterator") + } + d, ok := diff.(*immutableUTXODiff) + if !ok { + return nil, errors.New("diff is not of type *immutableUTXODiff") + } + + return &readOnlyUTXOIteratorWithDiff{ + baseIterator: uci, + diff: d, + toAddIterator: diff.ToAdd().Iterator(), + }, nil +} + +func (uci *utxoCollectionIterator) Close() error { + if uci.isClosed { + return errors.New("Tried using a closed utxoCollectionIterator") + } + uci.isClosed = true + uci.pairs = nil + return nil +} diff --git a/domain/consensus/utils/utxo/utxo_iterator_with_diff.go b/domain/consensus/utils/utxo/utxo_iterator_with_diff.go new file mode 100644 index 0000000..90ba7ba --- /dev/null +++ b/domain/consensus/utils/utxo/utxo_iterator_with_diff.go @@ -0,0 +1,118 @@ +package utxo + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +type readOnlyUTXOIteratorWithDiff struct { + baseIterator externalapi.ReadOnlyUTXOSetIterator + diff *immutableUTXODiff + + currentOutpoint *externalapi.DomainOutpoint + currentUTXOEntry externalapi.UTXOEntry + currentErr error + + toAddIterator externalapi.ReadOnlyUTXOSetIterator + isClosed bool +} + +// IteratorWithDiff applies a UTXODiff to given utxo iterator +func IteratorWithDiff(iterator externalapi.ReadOnlyUTXOSetIterator, diff externalapi.UTXODiff) (externalapi.ReadOnlyUTXOSetIterator, error) { + d, ok := diff.(*immutableUTXODiff) + if !ok { + return nil, errors.New("diff is not of type *immutableUTXODiff") + } + + if iteratorWithDiff, ok := iterator.(*readOnlyUTXOIteratorWithDiff); ok { + combinedDiff, err := iteratorWithDiff.diff.WithDiff(d) + if err != nil { + return nil, err + } + + return IteratorWithDiff(iteratorWithDiff.baseIterator, combinedDiff) + } + + return &readOnlyUTXOIteratorWithDiff{ + baseIterator: iterator, + diff: d, + toAddIterator: d.ToAdd().Iterator(), + }, nil +} + +func (r *readOnlyUTXOIteratorWithDiff) First() bool { + if r.isClosed { + panic("Tried using a closed readOnlyUTXOIteratorWithDiff") + } + baseNotEmpty := r.baseIterator.First() + baseEmpty := !baseNotEmpty + + err := r.toAddIterator.Close() + if err != nil { + r.currentErr = err + return true + } + r.toAddIterator = r.diff.ToAdd().Iterator() + toAddEmpty := r.diff.ToAdd().Len() == 0 + + if baseEmpty { + if toAddEmpty { + return false + } + return r.Next() + } + + r.currentOutpoint, r.currentUTXOEntry, r.currentErr = r.baseIterator.Get() + if r.diff.mutableUTXODiff.toRemove.containsWithDAAScore(r.currentOutpoint, r.currentUTXOEntry.BlockDAAScore()) { + return r.Next() + } + return true +} + +func (r *readOnlyUTXOIteratorWithDiff) Next() bool { + if r.isClosed { + panic("Tried using a closed readOnlyUTXOIteratorWithDiff") + } + for r.baseIterator.Next() { // keep looping until we reach an outpoint/entry pair that is not in r.diff.toRemove + r.currentOutpoint, r.currentUTXOEntry, r.currentErr = r.baseIterator.Get() + if !r.diff.mutableUTXODiff.toRemove.containsWithDAAScore(r.currentOutpoint, r.currentUTXOEntry.BlockDAAScore()) { + return true + } + } + + if r.toAddIterator.Next() { + r.currentOutpoint, r.currentUTXOEntry, r.currentErr = r.toAddIterator.Get() + return true + } + + return false +} + +func (r *readOnlyUTXOIteratorWithDiff) Get() (outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry, err error) { + if r.isClosed { + return nil, nil, errors.New("Tried using a closed readOnlyUTXOIteratorWithDiff") + } + return r.currentOutpoint, r.currentUTXOEntry, r.currentErr +} + +func (r *readOnlyUTXOIteratorWithDiff) Close() error { + if r.isClosed { + return errors.New("Tried using a closed readOnlyUTXOIteratorWithDiff") + } + r.isClosed = true + err := r.baseIterator.Close() + if err != nil { + return err + } + err = r.toAddIterator.Close() + if err != nil { + return err + } + r.baseIterator = nil + r.diff = nil + r.currentOutpoint = nil + r.currentUTXOEntry = nil + r.currentErr = nil + r.toAddIterator = nil + return nil +} diff --git a/domain/consensus/utils/utxolrucache/utxolrucache.go b/domain/consensus/utils/utxolrucache/utxolrucache.go new file mode 100644 index 0000000..3ed9597 --- /dev/null +++ b/domain/consensus/utils/utxolrucache/utxolrucache.go @@ -0,0 +1,77 @@ +package utxolrucache + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// LRUCache is a least-recently-used cache for UTXO entries +// indexed by DomainOutpoint +type LRUCache struct { + cache map[externalapi.DomainOutpoint]externalapi.UTXOEntry + capacity int +} + +// New creates a new LRUCache +func New(capacity int, preallocate bool) *LRUCache { + var cache map[externalapi.DomainOutpoint]externalapi.UTXOEntry + if preallocate { + cache = make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry, capacity+1) + } else { + cache = make(map[externalapi.DomainOutpoint]externalapi.UTXOEntry) + } + return &LRUCache{ + cache: cache, + capacity: capacity, + } +} + +// Add adds an entry to the LRUCache +func (c *LRUCache) Add(key *externalapi.DomainOutpoint, value externalapi.UTXOEntry) { + c.cache[*key] = value + + if len(c.cache) > c.capacity { + c.evictRandom() + } +} + +// Get returns the entry for the given key, or (nil, false) otherwise +func (c *LRUCache) Get(key *externalapi.DomainOutpoint) (externalapi.UTXOEntry, bool) { + value, ok := c.cache[*key] + if !ok { + return nil, false + } + return value, true +} + +// Has returns whether the LRUCache contains the given key +func (c *LRUCache) Has(key *externalapi.DomainOutpoint) bool { + _, ok := c.cache[*key] + return ok +} + +// Remove removes the entry for the the given key. Does nothing if +// the entry does not exist +func (c *LRUCache) Remove(key *externalapi.DomainOutpoint) { + delete(c.cache, *key) +} + +// Clear clears the cache +func (c *LRUCache) Clear() { + keys := make([]externalapi.DomainOutpoint, len(c.cache)) + for outpoint := range c.cache { + keys = append(keys, outpoint) + } + + for _, key := range keys { + delete(c.cache, key) + } +} + +func (c *LRUCache) evictRandom() { + var keyToEvict externalapi.DomainOutpoint + for key := range c.cache { + keyToEvict = key + break + } + c.Remove(&keyToEvict) +} diff --git a/domain/consensus/utils/virtual/virtual.go b/domain/consensus/utils/virtual/virtual.go new file mode 100644 index 0000000..eb45770 --- /dev/null +++ b/domain/consensus/utils/virtual/virtual.go @@ -0,0 +1,12 @@ +package virtual + +import ( + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// ContainsOnlyVirtualGenesis returns whether the given block hashes contain only the virtual +// genesis hash. +func ContainsOnlyVirtualGenesis(blockHashes []*externalapi.DomainHash) bool { + return len(blockHashes) == 1 && blockHashes[0].Equal(model.VirtualGenesisBlockHash) +} diff --git a/domain/consensusreference/consensusreference.go b/domain/consensusreference/consensusreference.go new file mode 100644 index 0000000..85f4ff5 --- /dev/null +++ b/domain/consensusreference/consensusreference.go @@ -0,0 +1,22 @@ +package consensusreference + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// ConsensusReference holds a reference to a consensus object. +// The consensus object may be swapped with a new one entirely +// during the IBD process. Before an atomic consensus operation, +// callers are expected to call Consensus() once and work against +// that instance throughout. +type ConsensusReference struct { + consensus **externalapi.Consensus +} + +// Consensus returns the underlying consensus +func (ref ConsensusReference) Consensus() externalapi.Consensus { + return **ref.consensus +} + +// NewConsensusReference constructs a new ConsensusReference +func NewConsensusReference(consensus **externalapi.Consensus) ConsensusReference { + return ConsensusReference{consensus: consensus} +} diff --git a/domain/dagconfig/README.md b/domain/dagconfig/README.md new file mode 100644 index 0000000..33176f7 --- /dev/null +++ b/domain/dagconfig/README.md @@ -0,0 +1,47 @@ +# DAG Configuration + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/spectre-project/spectred/dagconfig) + +Package dagconfig defines DAG configuration parameters for the +standard Spectred networks and provides the ability for callers to +define their own custom Spectred networks. + +## Sample Use + +```Go +package main + +import ( + "flag" + "fmt" + "log" + + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +var testnet = flag.Bool("testnet", false, "operate on the testnet Spectre network") + +// By default (without --testnet), use mainnet. +var dagParams = &dagconfig.MainnetParams + +func main() { + flag.Parse() + + // Modify active network parameters if operating on testnet. + if *testnet { + dagParams = &dagconfig.TestnetParams + } + + // later... + + // Create and print new payment address, specific to the active network. + pubKey := make([]byte, 32) + addr, err := util.NewAddressPubKey(pubKey, dagParams) + if err != nil { + log.Fatal(err) + } + fmt.Println(addr) +} +``` diff --git a/domain/dagconfig/consensus_defaults.go b/domain/dagconfig/consensus_defaults.go new file mode 100644 index 0000000..2b6b28f --- /dev/null +++ b/domain/dagconfig/consensus_defaults.go @@ -0,0 +1,86 @@ +package dagconfig + +import ( + "time" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" +) + +// The documentation refers to the following constants which aren't explicated in the code: +// d - an upper bound on the round trip time of a block +// delta - the expected fraction of time the width of the network exceeds defaultGHOSTDAGK +// +// For more information about defaultGHOSTDAGK, and its dependency on delta and defaultTargetTimePerBlock +// please refer to the PHANTOM paper: https://eprint.iacr.org/2018/104.pdf +// +// For more information about the DAA constants defaultDifficultyAdjustmentWindowSize, defaultTimestampDeviationTolerance, +// and their relation to defaultGHOSTDAGK and defaultTargetTimePerBlock see: +// https://research.kas.pa/t/handling-timestamp-manipulations/97 +// +// For more information about defaultMergeSetSizeLimit, defaultFinalityDuration and their relation to pruning, see: +// https://research.kas.pa/t/a-proposal-for-finality-in-ghostdag/66/17 +// https://research.kas.pa/t/some-of-the-intuition-behind-the-design-of-the-invalidation-rules-for-pruning/95 +// + +const ( + defaultMaxCoinbasePayloadLength = 204 + // defaultMaxBlockMass is a bound on the mass of a block, larger values increase the bound d + // on the round trip time of a block, which affects the other parameters as described below + defaultMaxBlockMass = 500_000 + // defaultMassPerTxByte, defaultMassPerScriptPubKeyByte and defaultMassPerSigOp define the number of grams per + // transaction byte, script pub key byte and sig op respectively. + // These values are used when calculating a transactions mass. + defaultMassPerTxByte = 1 + defaultMassPerScriptPubKeyByte = 10 + defaultMassPerSigOp = 1000 + // defaultMaxBlockParents is the number of blocks any block can point to. + // Should be about d/defaultTargetTimePerBlock where d is a bound on the round trip time of a block. + defaultMaxBlockParents = 10 + // defaultGHOSTDAGK is a bound on the number of blue blocks in the anticone of a blue block. Approximates the maximal + // width of the network. + // Formula (1) in section 4.2 of the PHANTOM paper shows how to calculate defaultGHOSTDAGK. The delta term represents a bound + // on the expected fraction of the network life in which the width was higher than defaultGHOSTDAGK. The current value of K + // was calculated for d = 5 seconds and delta = 0.05. + defaultGHOSTDAGK = 18 + // defaultMergeSetSizeLimit is a bound on the size of the past of a block and the size of the past + // of its selected parent. Any block which violates this bound is invalid. + // Should be at least an order of magnitude smaller than defaultFinalityDuration/defaultTargetTimePerBlock. + // (Higher values make pruning attacks easier by a constant, lower values make merging after a split or a spike + // in block take longer) + defaultMergeSetSizeLimit = defaultGHOSTDAGK * 10 + defaultSubsidyGenesisReward = 1 * constants.SompiPerSpectre + defaultPreDeflationaryPhaseBaseSubsidy = 15 * constants.SompiPerSpectre + defaultDeflationaryPhaseBaseSubsidy = 12 * constants.SompiPerSpectre + defaultCoinbasePayloadScriptPublicKeyMaxLength = 150 + // defaultDifficultyAdjustmentWindowSize is the number of blocks in a block's past used to calculate its difficulty + // target. + // The DAA should take the median of 2640 blocks, so in order to do that we need 2641 window size. + defaultDifficultyAdjustmentWindowSize = 2641 + // defaultTimestampDeviationTolerance is the allowed deviance of an inconming block's timestamp, measured in block delays. + // A new block can't hold a timestamp lower than the median timestamp of the (defaultTimestampDeviationTolerance*2-1) blocks + // with highest accumulated blue work in its past, such blocks are considered invalid. + // A new block can't hold a timestamp higher than the local system time + defaultTimestampDeviationTolerance/defaultTargetTimePerBlock, + // such blocks are not marked as invalid but are rejected. + defaultTimestampDeviationTolerance = 132 + // defaultFinalityDuration is an approximate lower bound of how old the finality block is. The finality block is chosen to + // be the newest block in the selected chain whose blue score difference from the selected tip is at least + // defaultFinalityDuration/defaultTargetTimePerBlock. + // The pruning block is selected similarly, with the following duration: + // pruning block duration = + // 2*defaultFinalityDuration/defaultTargetTimePerBlock + 4*defaultMergeSetSizeLimit*defaultGHOSTDAGK + 2*defaultGHOSTDAGK + 2 + defaultFinalityDuration = 24 * time.Hour + // defaultTargetTimePerBlock represents how much time should pass on average between two consecutive block creations. + // Should be parametrized such that the average width of the DAG is about defaultMaxBlockParents and such that most of the + // time the width of the DAG is at most defaultGHOSTDAGK. + defaultTargetTimePerBlock = 1 * time.Second + + defaultPruningProofM = 1000 + + // defaultDeflationaryPhaseDaaScore is the DAA score after which the pre-deflationary period + // switches to the deflationary period. This number is calculated as follows: + // We define a year as 365.25 days + // One week in seconds = 7 * 24 * 60 * 60 = 604800 + defaultDeflationaryPhaseDaaScore = 604800 + + defaultMergeDepth = 3600 +) diff --git a/domain/dagconfig/doc.go b/domain/dagconfig/doc.go new file mode 100644 index 0000000..a9c92b9 --- /dev/null +++ b/domain/dagconfig/doc.go @@ -0,0 +1,63 @@ +/* +Package dagconfig defines DAG configuration parameters. + +In addition to the main Spectre network, which is intended for the transfer +of monetary value, there also exists the following standard networks: + - testnet + - simnet + - devnet + +These networks are incompatible with each other (each sharing a different +genesis block) and software should handle errors where input intended for +one network is used on an application instance running on a different +network. + +For library packages, dagconfig provides the ability to lookup DAG +parameters and encoding magics when passed a *Params. + +For main packages, a (typically global) var may be assigned the address of +one of the standard Param vars for use as the application's "active" network. +When a network parameter is needed, it may then be looked up through this +variable (either directly, or hidden in a library call). + + package main + + import ( + "flag" + "fmt" + "log" + + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/domain/dagconfig" + ) + + var testnet = flag.Bool("testnet", false, "operate on the testnet Spectre network") + + // By default (without --testnet), use mainnet. + var dagParams = &dagconfig.MainnetParams + + func main() { + flag.Parse() + + // Modify active network parameters if operating on testnet. + if *testnet { + dagParams = &dagconfig.TestnetParams + } + + // later... + + // Create and print new payment address, specific to the active network. + pubKey := make([]byte, 32) + addr, err := util.NewAddressPubKey(pubKey, dagParams) + if err != nil { + log.Fatal(err) + } + fmt.Println(addr) + } + +If an application does not use one of the standard Spectre networks, a new +Params struct may be created which defines the parameters for the non- +standard network. As a general rule of thumb, all network parameters +should be unique to the network, but parameter collisions can still occur. +*/ +package dagconfig diff --git a/domain/dagconfig/genesis.go b/domain/dagconfig/genesis.go new file mode 100644 index 0000000..8fcf0f7 --- /dev/null +++ b/domain/dagconfig/genesis.go @@ -0,0 +1,232 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dagconfig + +import ( + "math/big" + + "github.com/spectre-project/go-muhash" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/blockheader" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +var genesisTxOuts = []*externalapi.DomainTransactionOutput{} + +var genesisTxPayload = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score + 0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy + 0x00, 0x00, // Script version + 0x01, // Varint + 0x00, // OP-FALSE + 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, // Euler's number = 2.718281828459045 +} + +// genesisCoinbaseTx is the coinbase transaction for the genesis blocks for +// the main network. +var genesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, []*externalapi.DomainTransactionInput{}, genesisTxOuts, + &subnetworks.SubnetworkIDCoinbase, 0, genesisTxPayload) + +// genesisHash is the hash of the first block in the block DAG for the main +// network (genesis block). +var genesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x15, 0x27, 0xB0, 0x53, 0x82, 0xBA, 0x00, 0xE3, + 0xFB, 0x19, 0x9C, 0x8D, 0x55, 0x94, 0xB1, 0x1C, + 0x28, 0x73, 0x8E, 0x53, 0xA4, 0x66, 0x9A, 0xC5, + 0x44, 0xB8, 0x72, 0xA9, 0xA0, 0xC5, 0x08, 0x73, +}) + +// genesisMerkleRoot is the hash of the first transaction in the genesis block +// for the main network. +var genesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x55, 0x29, 0xA6, 0xB3, 0xB8, 0x7F, 0xC2, 0x09, + 0x12, 0xA6, 0xE6, 0xD7, 0x9E, 0xFF, 0x9B, 0x92, + 0x49, 0xF2, 0x4F, 0xF9, 0xED, 0xDA, 0x4D, 0xEC, + 0x40, 0x59, 0xEF, 0x9E, 0xD7, 0xC5, 0xBD, 0xCB, +}) + +// genesisBlock defines the genesis block of the block DAG which serves as the +// public transaction ledger for the main network. +var genesisBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{}, + genesisMerkleRoot, + &externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()), + 1713884558432, + 510999959, // Prime number + 271828, // Euler's number + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: []*externalapi.DomainTransaction{genesisCoinbaseTx}, +} + +var devnetGenesisTxOuts = []*externalapi.DomainTransactionOutput{} + +var devnetGenesisTxPayload = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score + 0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy + 0x00, 0x00, // Script version + 0x01, // Varint + 0x00, // OP-FALSE + 0x24, 0x14, 0x21, 0x35, 0x62, 0x37, 0x30, 0x95, // Silver ratio +} + +// devnetGenesisCoinbaseTx is the coinbase transaction for the genesis blocks for +// the development network. +var devnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, + []*externalapi.DomainTransactionInput{}, devnetGenesisTxOuts, + &subnetworks.SubnetworkIDCoinbase, 0, devnetGenesisTxPayload) + +// devGenesisHash is the hash of the first block in the block DAG for the development +// network (genesis block). +var devnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xAB, 0x08, 0x47, 0x48, 0x55, 0x48, 0xA7, 0x1D, + 0x8B, 0xE4, 0xCF, 0x22, 0x68, 0x2F, 0x96, 0xF2, + 0x71, 0x06, 0x8D, 0x46, 0xCB, 0x00, 0x17, 0xE5, + 0x61, 0x3E, 0x5F, 0x54, 0x01, 0xAC, 0x35, 0x0B, +}) + +// devnetGenesisMerkleRoot is the hash of the first transaction in the genesis block +// for the devopment network. +var devnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x45, 0x7F, 0x6D, 0xF5, 0x76, 0x25, 0xCF, 0xC9, + 0x4A, 0x63, 0x16, 0x9E, 0xBA, 0xC8, 0xE1, 0x86, + 0xCF, 0x1B, 0x5F, 0x1E, 0xF6, 0x8D, 0x1A, 0xEF, + 0x3B, 0x8D, 0x3F, 0xFC, 0xC2, 0x6C, 0x01, 0xE4, +}) + +// devnetGenesisBlock defines the genesis block of the block DAG which serves as the +// public transaction ledger for the development network. +var devnetGenesisBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{}, + devnetGenesisMerkleRoot, + &externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()), + 1713884849877, + 525264379, // Prime number + 241421, // Silver ratio + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: []*externalapi.DomainTransaction{devnetGenesisCoinbaseTx}, +} + +var simnetGenesisTxOuts = []*externalapi.DomainTransactionOutput{} + +var simnetGenesisTxPayload = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score + 0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy + 0x00, 0x00, // Script version + 0x01, // Varint + 0x00, // OP-FALSE + 0x54, 0x36, 0x56, 0x36, 0x56, 0x91, 0x80, 0x90, // Euler's number * 2 = 5.436563656918090 +} + +// simnetGenesisCoinbaseTx is the coinbase transaction for the simnet genesis block. +var simnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, + []*externalapi.DomainTransactionInput{}, simnetGenesisTxOuts, + &subnetworks.SubnetworkIDCoinbase, 0, simnetGenesisTxPayload) + +// simnetGenesisHash is the hash of the first block in the block DAG for +// the simnet (genesis block). +var simnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x56, 0xBB, 0x87, 0xCF, 0x18, 0x77, 0x7B, 0x76, + 0x35, 0x8E, 0xEE, 0xF0, 0x20, 0xA9, 0x01, 0xCD, + 0xDD, 0xDC, 0x0B, 0xA4, 0x46, 0xC0, 0x99, 0x2D, + 0xE2, 0x7C, 0xC2, 0xA8, 0x9E, 0xC7, 0xA1, 0x30, +}) + +// simnetGenesisMerkleRoot is the hash of the first transaction in the genesis block +// for the development network. +var simnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x85, 0x81, 0x84, 0xD0, 0x98, 0x16, 0x40, 0x4F, + 0xD7, 0xD7, 0x96, 0xFB, 0xDE, 0x60, 0xAC, 0x4B, + 0x99, 0x29, 0xB9, 0x18, 0x63, 0x39, 0xDA, 0x23, + 0x08, 0x3C, 0xDF, 0xC3, 0x5F, 0x13, 0x8F, 0xC6, +}) + +// simnetGenesisBlock defines the genesis block of the block DAG which serves as the +// public transaction ledger for the development network. +var simnetGenesisBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{}, + simnetGenesisMerkleRoot, + &externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()), + 1713885012324, + 543656363, // Prime number + 2, // Two + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: []*externalapi.DomainTransaction{simnetGenesisCoinbaseTx}, +} + +var testnetGenesisTxOuts = []*externalapi.DomainTransactionOutput{} + +var testnetGenesisTxPayload = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Blue score + 0x00, 0xE1, 0xF5, 0x05, 0x00, 0x00, 0x00, 0x00, // Subsidy + 0x00, 0x00, // Script version + 0x01, // Varint + 0x00, // OP-FALSE + 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, // Pi = 3.141592653589793 +} + +// testnetGenesisCoinbaseTx is the coinbase transaction for the testnet genesis block. +var testnetGenesisCoinbaseTx = transactionhelper.NewSubnetworkTransaction(0, + []*externalapi.DomainTransactionInput{}, testnetGenesisTxOuts, + &subnetworks.SubnetworkIDCoinbase, 0, testnetGenesisTxPayload) + +// testnetGenesisHash is the hash of the first block in the block DAG for the test +// network (genesis block). +var testnetGenesisHash = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0x48, 0x44, 0xDF, 0x54, 0x95, 0x72, 0x66, 0x0E, + 0xAF, 0xDC, 0x9A, 0xA0, 0xBC, 0x1D, 0x2B, 0xEE, + 0xB8, 0xCA, 0x14, 0x0A, 0x5B, 0x5D, 0x63, 0x15, + 0xDC, 0x41, 0xBA, 0x42, 0x9B, 0xD2, 0x44, 0x00, +}) + +// testnetGenesisMerkleRoot is the hash of the first transaction in the genesis block +// for testnet. +var testnetGenesisMerkleRoot = externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{ + 0xC5, 0xAE, 0xEF, 0x98, 0xF3, 0xE4, 0xF2, 0xBA, + 0x2C, 0xB4, 0xAF, 0x00, 0xC1, 0x6F, 0xEC, 0x3D, + 0x59, 0x9A, 0xF8, 0x03, 0x4E, 0xE1, 0xE0, 0x15, + 0xBC, 0x20, 0xCA, 0x60, 0xC9, 0x3E, 0x99, 0x1C, +}) + +// testnetGenesisBlock defines the genesis block of the block DAG which serves as the +// public transaction ledger for testnet. +var testnetGenesisBlock = externalapi.DomainBlock{ + Header: blockheader.NewImmutableBlockHeader( + 0, + []externalapi.BlockLevelParents{}, + testnetGenesisMerkleRoot, + &externalapi.DomainHash{}, + externalapi.NewDomainHashFromByteArray(muhash.EmptyMuHashHash.AsArray()), + 1713884672545, + 511699987, // Prime number + 314159, // Pi number + 0, + 0, + big.NewInt(0), + &externalapi.DomainHash{}, + ), + Transactions: []*externalapi.DomainTransaction{testnetGenesisCoinbaseTx}, +} diff --git a/domain/dagconfig/genesis_test.go b/domain/dagconfig/genesis_test.go new file mode 100644 index 0000000..8ad8543 --- /dev/null +++ b/domain/dagconfig/genesis_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dagconfig + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// TestGenesisBlock tests the genesis block of the main network for validity by +// checking the encoded hash. +func TestGenesisBlock(t *testing.T) { + // Check hash of the block against expected hash. + hash := consensushashing.BlockHash(MainnetParams.GenesisBlock) + if !MainnetParams.GenesisHash.Equal(hash) { + t.Fatalf("TestGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", hash, MainnetParams.GenesisHash) + } +} + +// TestTestnetGenesisBlock tests the genesis block of the test network for +// validity by checking the hash. +func TestTestnetGenesisBlock(t *testing.T) { + // Check hash of the block against expected hash. + hash := consensushashing.BlockHash(TestnetParams.GenesisBlock) + if !TestnetParams.GenesisHash.Equal(hash) { + t.Fatalf("TestTestnetGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", hash, + TestnetParams.GenesisHash) + } +} + +// TestSimnetGenesisBlock tests the genesis block of the simulation test network +// for validity by checking the hash. +func TestSimnetGenesisBlock(t *testing.T) { + // Check hash of the block against expected hash. + hash := consensushashing.BlockHash(SimnetParams.GenesisBlock) + if !SimnetParams.GenesisHash.Equal(hash) { + t.Fatalf("TestSimnetGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", hash, + SimnetParams.GenesisHash) + } +} + +// TestDevnetGenesisBlock tests the genesis block of the development network +// for validity by checking the encoded hash. +func TestDevnetGenesisBlock(t *testing.T) { + // Check hash of the block against expected hash. + hash := consensushashing.BlockHash(DevnetParams.GenesisBlock) + if !DevnetParams.GenesisHash.Equal(hash) { + t.Fatalf("TestDevnetGenesisBlock: Genesis block hash does "+ + "not appear valid - got %v, want %v", hash, + DevnetParams.GenesisHash) + } +} diff --git a/domain/dagconfig/params.go b/domain/dagconfig/params.go new file mode 100644 index 0000000..a7b61bf --- /dev/null +++ b/domain/dagconfig/params.go @@ -0,0 +1,508 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dagconfig + +import ( + "math/big" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/util/network" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/util" +) + +// These variables are the DAG proof-of-work limit parameters for each default +// network. +var ( + // bigOne is 1 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigOne = big.NewInt(1) + + // mainPowMax is the highest proof of work value a Spectre block can + // have for the main network. It is the value 2^255 - 1. + mainPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) + + // testnetPowMax is the highest proof of work value a Spectre block + // can have for the test network. It is the value 2^255 - 1. + testnetPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) + + // simnetPowMax is the highest proof of work value a Spectre block + // can have for the simulation test network. It is the value 2^255 - 1. + simnetPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) + + // devnetPowMax is the highest proof of work value a Spectre block + // can have for the development network. It is the value + // 2^255 - 1. + devnetPowMax = new(big.Int).Sub(new(big.Int).Lsh(bigOne, 255), bigOne) +) + +// KType defines the size of GHOSTDAG consensus algorithm K parameter. +type KType uint8 + +// Params defines a Spectre network by its parameters. These parameters may be +// used by Spectre applications to differentiate networks as well as addresses +// and keys for one network from those intended for use on another network. +type Params struct { + // K defines the K parameter for GHOSTDAG consensus algorithm. + // See ghostdag.go for further details. + K externalapi.KType + + // Name defines a human-readable identifier for the network. + Name string + + // Net defines the magic bytes used to identify the network. + Net appmessage.SpectreNet + + // RPCPort defines the rpc server port + RPCPort string + + // DefaultPort defines the default peer-to-peer port for the network. + DefaultPort string + + // DNSSeeds defines a list of DNS seeds for the network that are used + // as one method to discover peers. + DNSSeeds []string + + // GRPCSeeds defines a list of GRPC seeds for the network that are used + // as one method to discover peers. + GRPCSeeds []string + + // GenesisBlock defines the first block of the DAG. + GenesisBlock *externalapi.DomainBlock + + // GenesisHash is the starting block hash. + GenesisHash *externalapi.DomainHash + + // PowMax defines the highest allowed proof of work value for a block + // as a uint256. + PowMax *big.Int + + // BlockCoinbaseMaturity is the number of blocks required before newly mined + // coins can be spent. + BlockCoinbaseMaturity uint64 + + // SubsidyGenesisReward SubsidyMergeSetRewardMultiplier, and + // SubsidyPastRewardMultiplier are part of the block subsidy equation. + // Further details: https://hashdag.medium.com/kaspa-launch-plan-9a63f4d754a6 + SubsidyGenesisReward uint64 + PreDeflationaryPhaseBaseSubsidy uint64 + DeflationaryPhaseBaseSubsidy uint64 + + // TargetTimePerBlock is the desired amount of time to generate each + // block. + TargetTimePerBlock time.Duration + + // FinalityDuration is the duration of the finality window. + FinalityDuration time.Duration + + // TimestampDeviationTolerance is the maximum offset a block timestamp + // is allowed to be in the future before it gets delayed + TimestampDeviationTolerance int + + // DifficultyAdjustmentWindowSize is the size of window that is inspected + // to calculate the required difficulty of each block. + DifficultyAdjustmentWindowSize int + + // These fields are related to voting on consensus rule changes as + // defined by BIP0009. + // + // RuleChangeActivationThreshold is the number of blocks in a threshold + // state retarget window for which a positive vote for a rule change + // must be cast in order to lock in a rule change. It should typically + // be 95% for the main network and 75% for test networks. + // + // MinerConfirmationWindow is the number of blocks in each threshold + // state retarget window. + // + // Deployments define the specific consensus rule changes to be voted + // on. + RuleChangeActivationThreshold uint64 + MinerConfirmationWindow uint64 + + // Mempool parameters + RelayNonStdTxs bool + + // AcceptUnroutable specifies whether this network accepts unroutable + // IP addresses, such as 10.0.0.0/8 + AcceptUnroutable bool + + // Human-readable prefix for Bech32 encoded addresses + Prefix util.Bech32Prefix + + // Address encoding magics + PrivateKeyID byte // First byte of a WIF private key + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks bool + + // DisableDifficultyAdjustment determine whether to use difficulty + DisableDifficultyAdjustment bool + + // SkipProofOfWork indicates whether proof of work should be checked. + SkipProofOfWork bool + + // MaxCoinbasePayloadLength is the maximum length in bytes allowed for a block's coinbase's payload + MaxCoinbasePayloadLength uint64 + + // MaxBlockMass is the maximum mass a block is allowed + MaxBlockMass uint64 + + // MaxBlockParents is the maximum number of blocks a block is allowed to point to + MaxBlockParents externalapi.KType + + // MassPerTxByte is the number of grams that any byte + // adds to a transaction. + MassPerTxByte uint64 + + // MassPerScriptPubKeyByte is the number of grams that any + // scriptPubKey byte adds to a transaction. + MassPerScriptPubKeyByte uint64 + + // MassPerSigOp is the number of grams that any + // signature operation adds to a transaction. + MassPerSigOp uint64 + + // MergeSetSizeLimit is the maximum number of blocks in a block's merge set + MergeSetSizeLimit uint64 + + // CoinbasePayloadScriptPublicKeyMaxLength is the maximum allowed script public key in the coinbase's payload + CoinbasePayloadScriptPublicKeyMaxLength uint8 + + // PruningProofM is the 'm' constant in the pruning proof. For more details see: https://github.com/spectre-project/research/issues/3 + PruningProofM uint64 + + // DeflationaryPhaseDaaScore is the DAA score after which the monetary policy switches + // to its deflationary phase + DeflationaryPhaseDaaScore uint64 + + DisallowDirectBlocksOnTopOfGenesis bool + + // MaxBlockLevel is the maximum possible block level. + MaxBlockLevel int + + MergeDepth uint64 +} + +// NormalizeRPCServerAddress returns addr with the current network default +// port appended if there is not already a port specified. +func (p *Params) NormalizeRPCServerAddress(addr string) (string, error) { + return network.NormalizeAddress(addr, p.RPCPort) +} + +// FinalityDepth returns the finality duration represented in blocks +func (p *Params) FinalityDepth() uint64 { + return uint64(p.FinalityDuration / p.TargetTimePerBlock) +} + +// PruningDepth returns the pruning duration represented in blocks +func (p *Params) PruningDepth() uint64 { + return 2*p.FinalityDepth() + 4*p.MergeSetSizeLimit*uint64(p.K) + 2*uint64(p.K) + 2 +} + +// MainnetParams defines the network parameters for the main Spectre network. +var MainnetParams = Params{ + K: defaultGHOSTDAGK, + Name: "spectre-mainnet", + Net: appmessage.Mainnet, + RPCPort: "18110", + DefaultPort: "18111", + DNSSeeds: []string{ + // Official DNS seeders. + "mainnet-dnsseed-1.spectre-network.org", + "mainnet-dnsseed-2.spectre-network.org", + }, + + // DAG parameters + GenesisBlock: &genesisBlock, + GenesisHash: genesisHash, + PowMax: mainPowMax, + BlockCoinbaseMaturity: 100, + SubsidyGenesisReward: defaultSubsidyGenesisReward, + PreDeflationaryPhaseBaseSubsidy: defaultPreDeflationaryPhaseBaseSubsidy, + DeflationaryPhaseBaseSubsidy: defaultDeflationaryPhaseBaseSubsidy, + TargetTimePerBlock: defaultTargetTimePerBlock, + FinalityDuration: defaultFinalityDuration, + DifficultyAdjustmentWindowSize: defaultDifficultyAdjustmentWindowSize, + TimestampDeviationTolerance: defaultTimestampDeviationTolerance, + + // Consensus rule change deployments. + // + // The miner confirmation window is defined as: + // target proof of work timespan / target proof of work spacing + RuleChangeActivationThreshold: 1916, // 95% of MinerConfirmationWindow + MinerConfirmationWindow: 2016, // + + // Mempool parameters + RelayNonStdTxs: false, + + // AcceptUnroutable specifies whether this network accepts unroutable + // IP addresses, such as 10.0.0.0/8 + AcceptUnroutable: false, + + // Human-readable part for Bech32 encoded addresses + Prefix: util.Bech32PrefixSpectre, + + // Address encoding magics + PrivateKeyID: 0x80, // starts with 5 (uncompressed) or K (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, + + DisableDifficultyAdjustment: false, + + MaxCoinbasePayloadLength: defaultMaxCoinbasePayloadLength, + MaxBlockMass: defaultMaxBlockMass, + MaxBlockParents: defaultMaxBlockParents, + MassPerTxByte: defaultMassPerTxByte, + MassPerScriptPubKeyByte: defaultMassPerScriptPubKeyByte, + MassPerSigOp: defaultMassPerSigOp, + MergeSetSizeLimit: defaultMergeSetSizeLimit, + CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength, + PruningProofM: defaultPruningProofM, + DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore, + // DisallowDirectBlocksOnTopOfGenesis: true, + DisallowDirectBlocksOnTopOfGenesis: false, + + // This is technically 255, but we clamped it at 256 - block level of mainnet genesis + // This means that any block that has a level lower or equal to genesis will be level 0. + MaxBlockLevel: 225, + MergeDepth: defaultMergeDepth, +} + +// TestnetParams defines the network parameters for the test Spectre network. +var TestnetParams = Params{ + K: defaultGHOSTDAGK, + Name: "spectre-testnet-1", + Net: appmessage.Testnet, + RPCPort: "18210", + DefaultPort: "18211", + DNSSeeds: []string{}, // NOTE: Will be added later with privacy feature. + + // DAG parameters + GenesisBlock: &testnetGenesisBlock, + GenesisHash: testnetGenesisHash, + PowMax: testnetPowMax, + BlockCoinbaseMaturity: 100, + SubsidyGenesisReward: defaultSubsidyGenesisReward, + PreDeflationaryPhaseBaseSubsidy: defaultPreDeflationaryPhaseBaseSubsidy, + DeflationaryPhaseBaseSubsidy: defaultDeflationaryPhaseBaseSubsidy, + TargetTimePerBlock: defaultTargetTimePerBlock, + FinalityDuration: defaultFinalityDuration, + DifficultyAdjustmentWindowSize: defaultDifficultyAdjustmentWindowSize, + TimestampDeviationTolerance: defaultTimestampDeviationTolerance, + + // Consensus rule change deployments. + // + // The miner confirmation window is defined as: + // target proof of work timespan / target proof of work spacing + RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow + MinerConfirmationWindow: 2016, + + // Mempool parameters + RelayNonStdTxs: false, + + // AcceptUnroutable specifies whether this network accepts unroutable + // IP addresses, such as 10.0.0.0/8 + AcceptUnroutable: false, + + // Human-readable part for Bech32 encoded addresses + Prefix: util.Bech32PrefixSpectreTest, + + // Address encoding magics + PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, + + DisableDifficultyAdjustment: false, + + MaxCoinbasePayloadLength: defaultMaxCoinbasePayloadLength, + MaxBlockMass: defaultMaxBlockMass, + MaxBlockParents: defaultMaxBlockParents, + MassPerTxByte: defaultMassPerTxByte, + MassPerScriptPubKeyByte: defaultMassPerScriptPubKeyByte, + MassPerSigOp: defaultMassPerSigOp, + MergeSetSizeLimit: defaultMergeSetSizeLimit, + CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength, + PruningProofM: defaultPruningProofM, + DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore, + + MaxBlockLevel: 250, + MergeDepth: defaultMergeDepth, +} + +// SimnetParams defines the network parameters for the simulation test Spectre +// network. This network is similar to the normal test network except it is +// intended for private use within a group of individuals doing simulation +// testing. The functionality is intended to differ in that the only nodes +// which are specifically specified are used to create the network rather than +// following normal discovery rules. This is important as otherwise it would +// just turn into another public testnet. +var SimnetParams = Params{ + K: defaultGHOSTDAGK, + Name: "spectre-simnet", + Net: appmessage.Simnet, + RPCPort: "18510", + DefaultPort: "18511", + DNSSeeds: []string{}, // NOTE: There must NOT be any seeds. + + // DAG parameters + GenesisBlock: &simnetGenesisBlock, + GenesisHash: simnetGenesisHash, + PowMax: simnetPowMax, + BlockCoinbaseMaturity: 100, + SubsidyGenesisReward: defaultSubsidyGenesisReward, + PreDeflationaryPhaseBaseSubsidy: defaultPreDeflationaryPhaseBaseSubsidy, + DeflationaryPhaseBaseSubsidy: defaultDeflationaryPhaseBaseSubsidy, + TargetTimePerBlock: time.Millisecond, + FinalityDuration: time.Minute, + DifficultyAdjustmentWindowSize: defaultDifficultyAdjustmentWindowSize, + TimestampDeviationTolerance: defaultTimestampDeviationTolerance, + + // Consensus rule change deployments. + // + // The miner confirmation window is defined as: + // target proof of work timespan / target proof of work spacing + RuleChangeActivationThreshold: 75, // 75% of MinerConfirmationWindow + MinerConfirmationWindow: 100, + + // Mempool parameters + RelayNonStdTxs: false, + + // AcceptUnroutable specifies whether this network accepts unroutable + // IP addresses, such as 10.0.0.0/8 + AcceptUnroutable: false, + + PrivateKeyID: 0x64, // starts with 4 (uncompressed) or F (compressed) + // Human-readable part for Bech32 encoded addresses + Prefix: util.Bech32PrefixSpectreSim, + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, + + DisableDifficultyAdjustment: true, + + MaxCoinbasePayloadLength: defaultMaxCoinbasePayloadLength, + MaxBlockMass: defaultMaxBlockMass, + MaxBlockParents: defaultMaxBlockParents, + MassPerTxByte: defaultMassPerTxByte, + MassPerScriptPubKeyByte: defaultMassPerScriptPubKeyByte, + MassPerSigOp: defaultMassPerSigOp, + MergeSetSizeLimit: defaultMergeSetSizeLimit, + CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength, + PruningProofM: defaultPruningProofM, + DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore, + + MaxBlockLevel: 250, + MergeDepth: defaultMergeDepth, +} + +// DevnetParams defines the network parameters for the development Spectre network. +var DevnetParams = Params{ + K: defaultGHOSTDAGK, + Name: "spectre-devnet", + Net: appmessage.Devnet, + RPCPort: "18610", + DefaultPort: "18611", + DNSSeeds: []string{}, // NOTE: There must NOT be any seeds. + + // DAG parameters + GenesisBlock: &devnetGenesisBlock, + GenesisHash: devnetGenesisHash, + PowMax: devnetPowMax, + BlockCoinbaseMaturity: 100, + SubsidyGenesisReward: defaultSubsidyGenesisReward, + PreDeflationaryPhaseBaseSubsidy: defaultPreDeflationaryPhaseBaseSubsidy, + DeflationaryPhaseBaseSubsidy: defaultDeflationaryPhaseBaseSubsidy, + TargetTimePerBlock: defaultTargetTimePerBlock, + FinalityDuration: defaultFinalityDuration, + DifficultyAdjustmentWindowSize: defaultDifficultyAdjustmentWindowSize, + TimestampDeviationTolerance: defaultTimestampDeviationTolerance, + + // Consensus rule change deployments. + // + // The miner confirmation window is defined as: + // target proof of work timespan / target proof of work spacing + RuleChangeActivationThreshold: 1512, // 75% of MinerConfirmationWindow + MinerConfirmationWindow: 2016, + + // Mempool parameters + RelayNonStdTxs: false, + + // AcceptUnroutable specifies whether this network accepts unroutable + // IP addresses, such as 10.0.0.0/8 + AcceptUnroutable: true, + + // Human-readable part for Bech32 encoded addresses + Prefix: util.Bech32PrefixSpectreDev, + + // Address encoding magics + PrivateKeyID: 0xef, // starts with 9 (uncompressed) or c (compressed) + + // EnableNonNativeSubnetworks enables non-native/coinbase transactions + EnableNonNativeSubnetworks: false, + + DisableDifficultyAdjustment: false, + + MaxCoinbasePayloadLength: defaultMaxCoinbasePayloadLength, + MaxBlockMass: defaultMaxBlockMass, + MaxBlockParents: defaultMaxBlockParents, + MassPerTxByte: defaultMassPerTxByte, + MassPerScriptPubKeyByte: defaultMassPerScriptPubKeyByte, + MassPerSigOp: defaultMassPerSigOp, + MergeSetSizeLimit: defaultMergeSetSizeLimit, + CoinbasePayloadScriptPublicKeyMaxLength: defaultCoinbasePayloadScriptPublicKeyMaxLength, + PruningProofM: defaultPruningProofM, + DeflationaryPhaseDaaScore: defaultDeflationaryPhaseDaaScore, + + MaxBlockLevel: 250, + MergeDepth: defaultMergeDepth, +} + +// ErrDuplicateNet describes an error where the parameters for a Spectre +// network could not be set due to the network already being a standard +// network or previously-registered into this package. +var ErrDuplicateNet = errors.New("duplicate Spectre network") + +var registeredNets = make(map[appmessage.SpectreNet]struct{}) + +// Register registers the network parameters for a Spectre network. This may +// error with ErrDuplicateNet if the network is already registered (either +// due to a previous Register call, or the network being one of the default +// networks). +// +// Network parameters should be registered into this package by a main package +// as early as possible. Then, library packages may lookup networks or network +// parameters based on inputs and work regardless of the network being standard +// or not. +func Register(params *Params) error { + if _, ok := registeredNets[params.Net]; ok { + return ErrDuplicateNet + } + registeredNets[params.Net] = struct{}{} + + return nil +} + +// mustRegister performs the same function as Register except it panics if there +// is an error. This should only be called from package init functions. +func mustRegister(params *Params) { + if err := Register(params); err != nil { + panic("failed to register network: " + err.Error()) + } +} + +func init() { + // Register all default networks when the package is initialized. + mustRegister(&MainnetParams) + mustRegister(&TestnetParams) + mustRegister(&SimnetParams) + mustRegister(&DevnetParams) +} diff --git a/domain/dagconfig/params_test.go b/domain/dagconfig/params_test.go new file mode 100644 index 0000000..460d4be --- /dev/null +++ b/domain/dagconfig/params_test.go @@ -0,0 +1,89 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dagconfig + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestNewHashFromStr(t *testing.T) { + tests := []struct { + hexStr string + expectedHash *externalapi.DomainHash + expectedPanic bool + }{ + {"banana", nil, true}, + {"0000000000000000000000000000000000000000000000000000000000000000", + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), + false}, + {"0101010101010101010101010101010101010101010101010101010101010101", + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), + false}, + } + + for _, test := range tests { + func() { + defer func() { + err := recover() + if (err != nil) != test.expectedPanic { + t.Errorf("%s: Expected panic: %t for invalid hash, got %t", test.hexStr, test.expectedPanic, err != nil) + } + }() + + result := newHashFromStr(test.hexStr) + + if !result.Equal(test.expectedHash) { + t.Errorf("%s: Expected hash: %s, but got %s", test.hexStr, test.expectedHash, result) + } + }() + } +} + +// newHashFromStr converts the passed big-endian hex string into a externalapi.DomainHash. +// It only differs from the one available in hashes package in that it panics on an error +// since it will only be called from tests. +func newHashFromStr(hexStr string) *externalapi.DomainHash { + hash, err := externalapi.NewDomainHashFromString(hexStr) + if err != nil { + panic(err) + } + return hash +} + +// TestMustRegisterPanic ensures the mustRegister function panics when used to +// register an invalid network. +func TestMustRegisterPanic(t *testing.T) { + t.Parallel() + + // Setup a defer to catch the expected panic to ensure it actually + // paniced. + defer func() { + if err := recover(); err == nil { + t.Error("mustRegister did not panic as expected") + } + }() + + // Intentionally try to register duplicate params to force a panic. + mustRegister(&MainnetParams) +} + +// TestSkipProofOfWork ensures all of the hard coded network params don't set SkipProofOfWork as true. +func TestSkipProofOfWork(t *testing.T) { + allParams := []Params{ + MainnetParams, + TestnetParams, + SimnetParams, + DevnetParams, + } + + for _, params := range allParams { + if params.SkipProofOfWork { + t.Errorf("SkipProofOfWork is enabled for %s. This option should be "+ + "used only for tests.", params.Name) + } + } +} diff --git a/domain/dagconfig/register_test.go b/domain/dagconfig/register_test.go new file mode 100644 index 0000000..2ff61f0 --- /dev/null +++ b/domain/dagconfig/register_test.go @@ -0,0 +1,95 @@ +package dagconfig_test + +import ( + "testing" + + . "github.com/spectre-project/spectred/domain/dagconfig" +) + +// Define some of the required parameters for a user-registered +// network. This is necessary to test the registration of and +// lookup of encoding magics from the network. +var mockNetParams = Params{ + Name: "mocknet", + Net: 1<<32 - 1, +} + +func TestRegister(t *testing.T) { + type registerTest struct { + name string + params *Params + err error + } + + tests := []struct { + name string + register []registerTest + }{ + { + name: "default networks", + register: []registerTest{ + { + name: "duplicate mainnet", + params: &MainnetParams, + err: ErrDuplicateNet, + }, + { + name: "duplicate testnet", + params: &TestnetParams, + err: ErrDuplicateNet, + }, + { + name: "duplicate simnet", + params: &SimnetParams, + err: ErrDuplicateNet, + }, + }, + }, + { + name: "register mocknet", + register: []registerTest{ + { + name: "mocknet", + params: &mockNetParams, + err: nil, + }, + }, + }, + { + name: "more duplicates", + register: []registerTest{ + { + name: "duplicate mainnet", + params: &MainnetParams, + err: ErrDuplicateNet, + }, + { + name: "duplicate testnet", + params: &TestnetParams, + err: ErrDuplicateNet, + }, + { + name: "duplicate simnet", + params: &SimnetParams, + err: ErrDuplicateNet, + }, + { + name: "duplicate mocknet", + params: &mockNetParams, + err: ErrDuplicateNet, + }, + }, + }, + } + + for _, test := range tests { + for _, network := range test.register { + err := Register(network.params) + + if err != network.err { + t.Errorf("%s:%s: Registered network with unexpected error: got %v expected %v", + network.name, network.name, err, network.err) + } + } + } +} diff --git a/domain/domain.go b/domain/domain.go new file mode 100644 index 0000000..b5da014 --- /dev/null +++ b/domain/domain.go @@ -0,0 +1,227 @@ +package domain + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/spectre-project/spectred/domain/consensusreference" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/miningmanager" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/domain/prefixmanager" + "github.com/spectre-project/spectred/domain/prefixmanager/prefix" + infrastructuredatabase "github.com/spectre-project/spectred/infrastructure/db/database" +) + +// Domain provides a reference to the domain's external aps +type Domain interface { + MiningManager() miningmanager.MiningManager + Consensus() externalapi.Consensus + StagingConsensus() externalapi.Consensus + InitStagingConsensusWithoutGenesis() error + CommitStagingConsensus() error + DeleteStagingConsensus() error + ConsensusEventsChannel() chan externalapi.ConsensusEvent +} + +type domain struct { + miningManager miningmanager.MiningManager + consensus *externalapi.Consensus + stagingConsensus *externalapi.Consensus + stagingConsensusLock sync.RWMutex + consensusConfig *consensus.Config + db infrastructuredatabase.Database + consensusEventsChannel chan externalapi.ConsensusEvent +} + +func (d *domain) ConsensusEventsChannel() chan externalapi.ConsensusEvent { + return d.consensusEventsChannel +} + +func (d *domain) Consensus() externalapi.Consensus { + return *d.consensus +} + +func (d *domain) StagingConsensus() externalapi.Consensus { + d.stagingConsensusLock.RLock() + defer d.stagingConsensusLock.RUnlock() + return *d.stagingConsensus +} + +func (d *domain) MiningManager() miningmanager.MiningManager { + return d.miningManager +} + +func (d *domain) InitStagingConsensusWithoutGenesis() error { + cfg := *d.consensusConfig + cfg.SkipAddingGenesis = true + return d.initStagingConsensus(&cfg) +} + +func (d *domain) initStagingConsensus(cfg *consensus.Config) error { + d.stagingConsensusLock.Lock() + defer d.stagingConsensusLock.Unlock() + + _, hasInactivePrefix, err := prefixmanager.InactivePrefix(d.db) + if err != nil { + return err + } + + if hasInactivePrefix { + return errors.Errorf("cannot create staging consensus when a staging consensus already exists") + } + + activePrefix, exists, err := prefixmanager.ActivePrefix(d.db) + if err != nil { + return err + } + + if !exists { + return errors.Errorf("cannot create a staging consensus when there's " + + "no active consensus") + } + + inactivePrefix := activePrefix.Flip() + err = prefixmanager.SetPrefixAsInactive(d.db, inactivePrefix) + if err != nil { + return err + } + + consensusFactory := consensus.NewFactory() + + consensusInstance, shouldMigrate, err := consensusFactory.NewConsensus(cfg, d.db, inactivePrefix, d.consensusEventsChannel) + if err != nil { + return err + } + + if shouldMigrate { + return errors.Errorf("A fresh consensus should never return shouldMigrate=true") + } + + d.stagingConsensus = &consensusInstance + return nil +} + +func (d *domain) CommitStagingConsensus() error { + d.stagingConsensusLock.Lock() + defer d.stagingConsensusLock.Unlock() + + dbTx, err := d.db.Begin() + if err != nil { + return err + } + defer dbTx.RollbackUnlessClosed() + + inactivePrefix, hasInactivePrefix, err := prefixmanager.InactivePrefix(d.db) + if err != nil { + return err + } + + if !hasInactivePrefix { + return errors.Errorf("there's no inactive prefix to commit") + } + + activePrefix, exists, err := prefixmanager.ActivePrefix(dbTx) + if err != nil { + return err + } + + if !exists { + return errors.Errorf("cannot commit a staging consensus when there's " + + "no active consensus") + } + + err = prefixmanager.SetPrefixAsActive(dbTx, inactivePrefix) + if err != nil { + return err + } + + err = prefixmanager.SetPrefixAsInactive(dbTx, activePrefix) + if err != nil { + return err + } + + err = dbTx.Commit() + if err != nil { + return err + } + + // We delete anything associated with the old prefix outside + // of the transaction in order to save memory. + err = prefixmanager.DeleteInactivePrefix(d.db) + if err != nil { + return err + } + + tempConsensusPointer := unsafe.Pointer(d.stagingConsensus) + consensusPointer := (*unsafe.Pointer)(unsafe.Pointer(&d.consensus)) + atomic.StorePointer(consensusPointer, tempConsensusPointer) + d.stagingConsensus = nil + return nil +} + +func (d *domain) DeleteStagingConsensus() error { + d.stagingConsensusLock.Lock() + defer d.stagingConsensusLock.Unlock() + + err := prefixmanager.DeleteInactivePrefix(d.db) + if err != nil { + return err + } + + d.stagingConsensus = nil + return nil +} + +// New instantiates a new instance of a Domain object +func New(consensusConfig *consensus.Config, mempoolConfig *mempool.Config, db infrastructuredatabase.Database) (Domain, error) { + err := prefixmanager.DeleteInactivePrefix(db) + if err != nil { + return nil, err + } + + activePrefix, exists, err := prefixmanager.ActivePrefix(db) + if err != nil { + return nil, err + } + + if !exists { + activePrefix = &prefix.Prefix{} + err = prefixmanager.SetPrefixAsActive(db, activePrefix) + if err != nil { + return nil, err + } + } + + consensusEventsChan := make(chan externalapi.ConsensusEvent, 100e3) + consensusFactory := consensus.NewFactory() + consensusInstance, shouldMigrate, err := consensusFactory.NewConsensus(consensusConfig, db, activePrefix, consensusEventsChan) + if err != nil { + return nil, err + } + + domainInstance := &domain{ + consensus: &consensusInstance, + consensusConfig: consensusConfig, + db: db, + consensusEventsChannel: consensusEventsChan, + } + + if shouldMigrate { + err := domainInstance.migrate() + if err != nil { + return nil, err + } + } + + miningManagerFactory := miningmanager.NewFactory() + + // We create a consensus wrapper because the actual consensus might change + consensusReference := consensusreference.NewConsensusReference(&domainInstance.consensus) + domainInstance.miningManager = miningManagerFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempoolConfig) + return domainInstance, nil +} diff --git a/domain/domain_test.go b/domain/domain_test.go new file mode 100644 index 0000000..b13046b --- /dev/null +++ b/domain/domain_test.go @@ -0,0 +1,157 @@ +package domain_test + +import ( + "fmt" + "math/big" + + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" +) + +func TestCreateStagingConsensus(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + dataDir, err := ioutil.TempDir("", fmt.Sprintf("TestCreateStagingConsensus-%s", consensusConfig.Name)) + if err != nil { + t.Fatalf("ioutil.TempDir: %+v", err) + } + defer os.RemoveAll(dataDir) + + db, err := ldb.NewLevelDB(dataDir, 8) + if err != nil { + t.Fatalf("NewLevelDB: %+v", err) + } + + domainInstance, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), db) + if err != nil { + t.Fatalf("New: %+v", err) + } + + err = domainInstance.InitStagingConsensusWithoutGenesis() + if err != nil { + t.Fatalf("InitStagingConsensusWithoutGenesis: %+v", err) + } + + err = domainInstance.InitStagingConsensusWithoutGenesis() + if !strings.Contains(err.Error(), "cannot create staging consensus when a staging consensus already exists") { + t.Fatalf("unexpected error: %+v", err) + } + + addGenesisToStagingConsensus := func() { + genesisWithTrustedData := &externalapi.BlockWithTrustedData{ + Block: consensusConfig.GenesisBlock, + DAAWindow: nil, + GHOSTDAGData: []*externalapi.BlockGHOSTDAGDataHashPair{ + { + GHOSTDAGData: externalapi.NewBlockGHOSTDAGData(0, big.NewInt(0), model.VirtualGenesisBlockHash, nil, nil, make(map[externalapi.DomainHash]externalapi.KType)), + Hash: consensusConfig.GenesisHash, + }, + }, + } + err = domainInstance.StagingConsensus().ValidateAndInsertBlockWithTrustedData(genesisWithTrustedData, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlockWithTrustedData: %+v", err) + } + } + + addGenesisToStagingConsensus() + + coinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{}, + ExtraData: []byte{}, + } + block, err := domainInstance.StagingConsensus().BuildBlock(coinbaseData, nil) + if err != nil { + t.Fatalf("BuildBlock: %+v", err) + } + + err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + blockHash := consensushashing.BlockHash(block) + blockInfo, err := domainInstance.StagingConsensus().GetBlockInfo(blockHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if !blockInfo.Exists { + t.Fatalf("block not found on staging consensus") + } + + blockInfo, err = domainInstance.Consensus().GetBlockInfo(blockHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if blockInfo.Exists { + t.Fatalf("a block from staging consensus was found on consensus") + } + + err = domainInstance.CommitStagingConsensus() + if err != nil { + t.Fatalf("CommitStagingConsensus: %+v", err) + } + + blockInfo, err = domainInstance.Consensus().GetBlockInfo(blockHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if !blockInfo.Exists { + t.Fatalf("a block from staging consensus was not found on consensus after commit") + } + + // Now we create a new staging consensus and check that it's deleted once we init a new domain. We also + // validate that the main consensus persisted the data from the committed temp consensus. + err = domainInstance.InitStagingConsensusWithoutGenesis() + if err != nil { + t.Fatalf("InitStagingConsensusWithoutGenesis: %+v", err) + } + + addGenesisToStagingConsensus() + err = domainInstance.StagingConsensus().ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + + domainInstance2, err := domain.New(consensusConfig, mempool.DefaultConfig(&consensusConfig.Params), db) + if err != nil { + t.Fatalf("New: %+v", err) + } + + blockInfo, err = domainInstance2.Consensus().GetBlockInfo(blockHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if !blockInfo.Exists { + t.Fatalf("a block from committed staging consensus was not persisted to the active consensus") + } + + err = domainInstance2.InitStagingConsensusWithoutGenesis() + if err != nil { + t.Fatalf("InitStagingConsensusWithoutGenesis: %+v", err) + } + + blockInfo, err = domainInstance2.StagingConsensus().GetBlockInfo(blockHash) + if err != nil { + t.Fatalf("GetBlockInfo: %+v", err) + } + + if blockInfo.Exists { + t.Fatalf("block from previous temp consensus shouldn't be found on a fresh temp consensus") + } + }) +} diff --git a/domain/log.go b/domain/log.go new file mode 100644 index 0000000..7e6e8bf --- /dev/null +++ b/domain/log.go @@ -0,0 +1,7 @@ +package domain + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("DOMN") diff --git a/domain/migrate.go b/domain/migrate.go new file mode 100644 index 0000000..d91e5e4 --- /dev/null +++ b/domain/migrate.go @@ -0,0 +1,241 @@ +package domain + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (d *domain) migrate() error { + log.Infof("Starting migration") + pruningPoint, err := d.Consensus().PruningPoint() + if err != nil { + return err + } + log.Infof("Current pruning point: %s", pruningPoint) + + if d.consensusConfig.Params.GenesisHash.Equal(pruningPoint) { + err = d.initStagingConsensus(d.consensusConfig) + if err != nil { + return err + } + } else { + err = d.InitStagingConsensusWithoutGenesis() + if err != nil { + return err + } + + err = syncConsensuses(d.Consensus(), d.StagingConsensus()) + if err != nil { + return err + } + } + + err = d.CommitStagingConsensus() + if err != nil { + return err + } + + log.Info("Done migrating") + return nil +} + +func syncConsensuses(syncer, syncee externalapi.Consensus) error { + pruningPointProof, err := syncer.BuildPruningPointProof() + if err != nil { + return err + } + + err = syncee.ApplyPruningPointProof(pruningPointProof) + if err != nil { + return err + } + + pruningPointHeaders, err := syncer.PruningPointHeaders() + if err != nil { + return err + } + + err = syncee.ImportPruningPoints(pruningPointHeaders) + if err != nil { + return err + } + + pruningPointAndItsAnticone, err := syncer.PruningPointAndItsAnticone() + if err != nil { + return err + } + + for _, blockHash := range pruningPointAndItsAnticone { + block, found, err := syncer.GetBlock(blockHash) + if err != nil { + return err + } + + if !found { + return errors.Errorf("block %s is missing", blockHash) + } + + blockDAAWindowHashes, err := syncer.BlockDAAWindowHashes(blockHash) + if err != nil { + return err + } + + ghostdagDataBlockHashes, err := syncer.TrustedBlockAssociatedGHOSTDAGDataBlockHashes(blockHash) + if err != nil { + return err + } + + blockWithTrustedData := &externalapi.BlockWithTrustedData{ + Block: block, + DAAWindow: make([]*externalapi.TrustedDataDataDAAHeader, 0, len(blockDAAWindowHashes)), + GHOSTDAGData: make([]*externalapi.BlockGHOSTDAGDataHashPair, 0, len(ghostdagDataBlockHashes)), + } + + for i, daaBlockHash := range blockDAAWindowHashes { + trustedDataDataDAAHeader, err := syncer.TrustedDataDataDAAHeader(blockHash, daaBlockHash, uint64(i)) + if err != nil { + return err + } + blockWithTrustedData.DAAWindow = append(blockWithTrustedData.DAAWindow, trustedDataDataDAAHeader) + } + + for _, ghostdagDataBlockHash := range ghostdagDataBlockHashes { + data, err := syncer.TrustedGHOSTDAGData(ghostdagDataBlockHash) + if err != nil { + return err + } + blockWithTrustedData.GHOSTDAGData = append(blockWithTrustedData.GHOSTDAGData, &externalapi.BlockGHOSTDAGDataHashPair{ + Hash: ghostdagDataBlockHash, + GHOSTDAGData: data, + }) + } + + err = syncee.ValidateAndInsertBlockWithTrustedData(blockWithTrustedData, false) + if err != nil { + return err + } + } + + syncerVirtualSelectedParent, err := syncer.GetVirtualSelectedParent() + if err != nil { + return err + } + + pruningPoint, err := syncer.PruningPoint() + if err != nil { + return err + } + + missingBlocks, _, err := syncer.GetHashesBetween(pruningPoint, syncerVirtualSelectedParent, math.MaxUint64) + if err != nil { + return err + } + + syncerTips, err := syncer.Tips() + if err != nil { + return err + } + + for _, tip := range syncerTips { + if tip.Equal(syncerVirtualSelectedParent) { + continue + } + + anticone, err := syncer.GetAnticone(syncerVirtualSelectedParent, tip, 0) + if err != nil { + return err + } + + missingBlocks = append(missingBlocks, anticone...) + } + + percents := 0 + for i, blocksHash := range missingBlocks { + blockInfo, err := syncee.GetBlockInfo(blocksHash) + if err != nil { + return err + } + + if blockInfo.Exists { + continue + } + + block, found, err := syncer.GetBlock(blocksHash) + if err != nil { + return err + } + + if !found { + return errors.Errorf("block %s is missing", blocksHash) + } + err = syncee.ValidateAndInsertBlock(block, false) + if err != nil { + return err + } + + newPercents := 100 * i / len(missingBlocks) + if newPercents > percents { + percents = newPercents + log.Infof("Processed %d%% of the blocks", 100*i/len(missingBlocks)) + } + } + + var fromOutpoint *externalapi.DomainOutpoint + const step = 100_000 + for { + outpointAndUTXOEntryPairs, err := syncer.GetPruningPointUTXOs(pruningPoint, fromOutpoint, step) + if err != nil { + return err + } + fromOutpoint = outpointAndUTXOEntryPairs[len(outpointAndUTXOEntryPairs)-1].Outpoint + err = syncee.AppendImportedPruningPointUTXOs(outpointAndUTXOEntryPairs) + if err != nil { + return err + } + if len(outpointAndUTXOEntryPairs) < step { + break + } + } + + // Check that ValidateAndInsertImportedPruningPoint works given the right arguments. + err = syncee.ValidateAndInsertImportedPruningPoint(pruningPoint) + if err != nil { + return err + } + + emptyCoinbase := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + + // Check that we can build a block just after importing the pruning point. + _, err = syncee.BuildBlock(emptyCoinbase, nil) + if err != nil { + return err + } + + estimatedVirtualDAAScoreTarget, err := syncer.GetVirtualDAAScore() + if err != nil { + return err + } + + err = syncer.ResolveVirtual(func(virtualDAAScoreStart uint64, virtualDAAScore uint64) { + if estimatedVirtualDAAScoreTarget-virtualDAAScoreStart <= 0 { + percents = 100 + } else { + percents = int(float64(virtualDAAScore-virtualDAAScoreStart) / float64(estimatedVirtualDAAScoreTarget-virtualDAAScoreStart) * 100) + } + log.Infof("Resolving virtual. Estimated progress: %d%%", percents) + }) + if err != nil { + return err + } + + log.Infof("Resolved virtual") + + return nil +} diff --git a/domain/miningmanager/blocktemplatebuilder/blocktemplatebuilder.go b/domain/miningmanager/blocktemplatebuilder/blocktemplatebuilder.go new file mode 100644 index 0000000..fe2866e --- /dev/null +++ b/domain/miningmanager/blocktemplatebuilder/blocktemplatebuilder.go @@ -0,0 +1,219 @@ +package blocktemplatebuilder + +import ( + "math" + "sort" + + "github.com/spectre-project/spectred/domain/consensus/processes/coinbasemanager" + "github.com/spectre-project/spectred/domain/consensus/utils/merkle" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensusreference" + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/util/difficulty" + + "github.com/pkg/errors" + consensusexternalapi "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + miningmanagerapi "github.com/spectre-project/spectred/domain/miningmanager/model" +) + +type candidateTx struct { + *consensusexternalapi.DomainTransaction + txValue float64 + gasLimit uint64 + + p float64 + start float64 + end float64 + + isMarkedForDeletion bool +} + +// blockTemplateBuilder creates block templates for a miner to consume +type blockTemplateBuilder struct { + consensusReference consensusreference.ConsensusReference + mempool miningmanagerapi.Mempool + policy policy + + coinbasePayloadScriptPublicKeyMaxLength uint8 +} + +// New creates a new blockTemplateBuilder +func New(consensusReference consensusreference.ConsensusReference, mempool miningmanagerapi.Mempool, + blockMaxMass uint64, coinbasePayloadScriptPublicKeyMaxLength uint8) miningmanagerapi.BlockTemplateBuilder { + return &blockTemplateBuilder{ + consensusReference: consensusReference, + mempool: mempool, + policy: policy{BlockMaxMass: blockMaxMass}, + + coinbasePayloadScriptPublicKeyMaxLength: coinbasePayloadScriptPublicKeyMaxLength, + } +} + +// BuildBlockTemplate creates a block template for a miner to consume +// BuildBlockTemplate returns a new block template that is ready to be solved +// using the transactions from the passed transaction source pool and a coinbase +// that either pays to the passed address if it is not nil, or a coinbase that +// is redeemable by anyone if the passed address is nil. The nil address +// functionality is useful since there are cases such as the getblocktemplate +// RPC where external mining software is responsible for creating their own +// coinbase which will replace the one generated for the block template. Thus +// the need to have configured address can be avoided. +// +// The transactions selected and included are prioritized according to several +// factors. First, each transaction has a priority calculated based on its +// value, age of inputs, and size. Transactions which consist of larger +// amounts, older inputs, and small sizes have the highest priority. Second, a +// fee per kilobyte is calculated for each transaction. Transactions with a +// higher fee per kilobyte are preferred. Finally, the block generation related +// policy settings are all taken into account. +// +// Transactions which only spend outputs from other transactions already in the +// block DAG are immediately added to a priority queue which either +// prioritizes based on the priority (then fee per kilobyte) or the fee per +// kilobyte (then priority) depending on whether or not the BlockPrioritySize +// policy setting allots space for high-priority transactions. Transactions +// which spend outputs from other transactions in the source pool are added to a +// dependency map so they can be added to the priority queue once the +// transactions they depend on have been included. +// +// Once the high-priority area (if configured) has been filled with +// transactions, or the priority falls below what is considered high-priority, +// the priority queue is updated to prioritize by fees per kilobyte (then +// priority). +// +// When the fees per kilobyte drop below the TxMinFreeFee policy setting, the +// transaction will be skipped unless the BlockMinSize policy setting is +// nonzero, in which case the block will be filled with the low-fee/free +// transactions until the block size reaches that minimum size. +// +// Any transactions which would cause the block to exceed the BlockMaxMass +// policy setting, exceed the maximum allowed signature operations per block, or +// otherwise cause the block to be invalid are skipped. +// +// Given the above, a block generated by this function is of the following form: +// +// ----------------------------------- -- -- +// | Coinbase Transaction | | | +// |-----------------------------------| | | +// | | | | ----- policy.BlockPrioritySize +// | High-priority Transactions | | | +// | | | | +// |-----------------------------------| | -- +// | | | +// | | | +// | | |--- policy.BlockMaxMass +// | Transactions prioritized by fee | | +// | until <= policy.TxMinFreeFee | | +// | | | +// | | | +// | | | +// |-----------------------------------| | +// | Low-fee/Non high-priority (free) | | +// | transactions (while block size | | +// | <= policy.BlockMinSize) | | +// ----------------------------------- -- + +func (btb *blockTemplateBuilder) BuildBlockTemplate( + coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlockTemplate, error) { + + mempoolTransactions := btb.mempool.BlockCandidateTransactions() + candidateTxs := make([]*candidateTx, 0, len(mempoolTransactions)) + for _, tx := range mempoolTransactions { + // Calculate the tx value + gasLimit := uint64(0) + if !subnetworks.IsBuiltInOrNative(tx.SubnetworkID) { + panic("We currently don't support non native subnetworks") + } + candidateTxs = append(candidateTxs, &candidateTx{ + DomainTransaction: tx, + txValue: btb.calcTxValue(tx), + gasLimit: gasLimit, + }) + } + + // Sort the candidate txs by subnetworkID. + sort.Slice(candidateTxs, func(i, j int) bool { + return subnetworks.Less(candidateTxs[i].SubnetworkID, candidateTxs[j].SubnetworkID) + }) + + log.Debugf("Considering %d transactions for inclusion to new block", + len(candidateTxs)) + + blockTxs := btb.selectTransactions(candidateTxs) + blockTemplate, err := btb.consensusReference.Consensus().BuildBlockTemplate(coinbaseData, blockTxs.selectedTxs) + + invalidTxsErr := ruleerrors.ErrInvalidTransactionsInNewBlock{} + if errors.As(err, &invalidTxsErr) { + log.Criticalf("consensusReference.Consensus().BuildBlock returned invalid txs in BuildBlockTemplate") + err = btb.mempool.RemoveInvalidTransactions(&invalidTxsErr) + if err != nil { + // mempool.RemoveInvalidTransactions might return errors in situations that are perfectly fine in this context. + // TODO: Once the mempool invariants are clear, this should be converted back `return nil, err`: + // https://github.com/spectre-project/spectred/issues/1553 + log.Criticalf("Error from mempool.RemoveInvalidTransactions: %+v", err) + } + // We can call this recursively without worry because this should almost never happen + return btb.BuildBlockTemplate(coinbaseData) + } + + if err != nil { + return nil, err + } + + log.Debugf("Created new block template (%d transactions, %d in fees, %d mass, target difficulty %064x)", + len(blockTemplate.Block.Transactions), blockTxs.totalFees, blockTxs.totalMass, difficulty.CompactToBig(blockTemplate.Block.Header.Bits())) + + return blockTemplate, nil +} + +// ModifyBlockTemplate modifies an existing block template to the requested coinbase data and updates the timestamp +func (btb *blockTemplateBuilder) ModifyBlockTemplate(newCoinbaseData *consensusexternalapi.DomainCoinbaseData, + blockTemplateToModify *consensusexternalapi.DomainBlockTemplate) (*consensusexternalapi.DomainBlockTemplate, error) { + + // The first transaction is always the coinbase transaction + coinbaseTx := blockTemplateToModify.Block.Transactions[transactionhelper.CoinbaseTransactionIndex] + newPayload, err := coinbasemanager.ModifyCoinbasePayload(coinbaseTx.Payload, newCoinbaseData, btb.coinbasePayloadScriptPublicKeyMaxLength) + if err != nil { + return nil, err + } + coinbaseTx.Payload = newPayload + if blockTemplateToModify.CoinbaseHasRedReward { + // The last output is always the coinbase red blocks reward + coinbaseTx.Outputs[len(coinbaseTx.Outputs)-1].ScriptPublicKey = newCoinbaseData.ScriptPublicKey + } + // Update the hash merkle root according to the modified transactions + mutableHeader := blockTemplateToModify.Block.Header.ToMutable() + // TODO: can be optimized to O(log(#transactions)) by caching the whole merkle tree in BlockTemplate and changing only the relevant path + mutableHeader.SetHashMerkleRoot(merkle.CalculateHashMerkleRoot(blockTemplateToModify.Block.Transactions)) + + newTimestamp := mstime.Now().UnixMilliseconds() + if newTimestamp >= mutableHeader.TimeInMilliseconds() { + // Only if new time stamp is later than current, update the header. Otherwise, + // we keep the previous time as built by internal consensus median time logic + mutableHeader.SetTimeInMilliseconds(newTimestamp) + } + + blockTemplateToModify.Block.Header = mutableHeader.ToImmutable() + blockTemplateToModify.CoinbaseData = newCoinbaseData + + return blockTemplateToModify, nil +} + +// calcTxValue calculates a value to be used in transaction selection. +// The higher the number the more likely it is that the transaction will be +// included in the block. +func (btb *blockTemplateBuilder) calcTxValue(tx *consensusexternalapi.DomainTransaction) float64 { + massLimit := btb.policy.BlockMaxMass + + mass := tx.Mass + fee := tx.Fee + if subnetworks.IsBuiltInOrNative(tx.SubnetworkID) { + return float64(fee) / (float64(mass) / float64(massLimit)) + } + // TODO: Replace with real gas once implemented + gasLimit := uint64(math.MaxUint64) + return float64(fee) / (float64(mass)/float64(massLimit) + float64(tx.Gas)/float64(gasLimit)) +} diff --git a/domain/miningmanager/blocktemplatebuilder/log.go b/domain/miningmanager/blocktemplatebuilder/log.go new file mode 100644 index 0000000..1783867 --- /dev/null +++ b/domain/miningmanager/blocktemplatebuilder/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blocktemplatebuilder + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("BLTB") diff --git a/domain/miningmanager/blocktemplatebuilder/policy.go b/domain/miningmanager/blocktemplatebuilder/policy.go new file mode 100644 index 0000000..58980f8 --- /dev/null +++ b/domain/miningmanager/blocktemplatebuilder/policy.go @@ -0,0 +1,14 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blocktemplatebuilder + +// policy houses the policy (configuration parameters) which is used to control +// the generation of block templates. See the documentation for +// NewBlockTemplate for more details on each of these parameters are used. +type policy struct { + // BlockMaxMass is the maximum block mass to be used when generating a + // block template. + BlockMaxMass uint64 +} diff --git a/domain/miningmanager/blocktemplatebuilder/txselection.go b/domain/miningmanager/blocktemplatebuilder/txselection.go new file mode 100644 index 0000000..61714b1 --- /dev/null +++ b/domain/miningmanager/blocktemplatebuilder/txselection.go @@ -0,0 +1,213 @@ +package blocktemplatebuilder + +import ( + "math" + "math/rand" + "sort" + + consensusexternalapi "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" +) + +const ( + // alpha is a coefficient that defines how uniform the distribution of + // candidate transactions should be. A smaller alpha makes the distribution + // more uniform. Alpha is used when determining a candidate transaction's + // initial p value. + alpha = 3 + + // rebalanceThreshold is the percentage of candidate transactions under which + // we don't rebalance. Rebalancing is a heavy operation so we prefer to avoid + // rebalancing very often. On the other hand, if we don't rebalance often enough + // we risk having too many collisions. + // The value is derived from the max probability of collision. That is to say, + // if rebalanceThreshold is 0.95, there's a 1-in-20 chance of collision. + // See selectTxs for further details. + rebalanceThreshold = 0.95 +) + +type selectedTransactions struct { + selectedTxs []*consensusexternalapi.DomainTransaction + txMasses []uint64 + txFees []uint64 + totalMass uint64 + totalFees uint64 +} + +// selectTransactions implements a probabilistic transaction selection algorithm. +// The algorithm, roughly, is as follows: +// 1. We assign a probability to each transaction equal to: +// (candidateTx.Value^alpha) / Σ(tx.Value^alpha) +// Where the sum of the probabilities of all txs is 1. +// 2. We draw a random number in [0,1) and select a transaction accordingly. +// 3. If it's valid, add it to the selectedTxs and remove it from the candidates. +// 4. Continue iterating the above until we have either selected all +// available transactions or ran out of gas/block space. +// +// Note that we make two optimizations here: +// * Draw a number in [0,Σ(tx.Value^alpha)) to avoid normalization +// * Instead of removing a candidate after each iteration, mark it for deletion. +// Once the sum of probabilities of marked transactions is greater than +// rebalanceThreshold percent of the sum of probabilities of all transactions, +// rebalance. + +// selectTransactions loops over the candidate transactions +// and appends the ones that will be included in the next block into +// txsForBlockTemplates. +// See selectTxs for further details. +func (btb *blockTemplateBuilder) selectTransactions(candidateTxs []*candidateTx) selectedTransactions { + txsForBlockTemplate := selectedTransactions{ + selectedTxs: make([]*consensusexternalapi.DomainTransaction, 0, len(candidateTxs)), + txMasses: make([]uint64, 0, len(candidateTxs)), + txFees: make([]uint64, 0, len(candidateTxs)), + totalMass: 0, + totalFees: 0, + } + usedCount, usedP := 0, 0.0 + candidateTxs, totalP := rebalanceCandidates(candidateTxs, true) + gasUsageMap := make(map[consensusexternalapi.DomainSubnetworkID]uint64) + + markCandidateTxForDeletion := func(candidateTx *candidateTx) { + candidateTx.isMarkedForDeletion = true + usedCount++ + usedP += candidateTx.p + } + + selectedTxs := make([]*candidateTx, 0) + for len(candidateTxs)-usedCount > 0 { + // Rebalance the candidates if it's required + if usedP >= rebalanceThreshold*totalP { + candidateTxs, totalP = rebalanceCandidates(candidateTxs, false) + usedCount, usedP = 0, 0.0 + + // Break if we now ran out of transactions + if len(candidateTxs) == 0 { + break + } + } + + // Select a candidate tx at random + r := rand.Float64() + r *= totalP + selectedTx := findTx(candidateTxs, r) + + // If isMarkedForDeletion is set, it means we got a collision. + // Ignore and select another Tx. + if selectedTx.isMarkedForDeletion { + continue + } + tx := selectedTx.DomainTransaction + + // Enforce maximum transaction mass per block. Also check + // for overflow. + if txsForBlockTemplate.totalMass+selectedTx.Mass < txsForBlockTemplate.totalMass || + txsForBlockTemplate.totalMass+selectedTx.Mass > btb.policy.BlockMaxMass { + log.Tracef("Tx %s would exceed the max block mass. "+ + "As such, stopping.", consensushashing.TransactionID(tx)) + break + } + + // Enforce maximum gas per subnetwork per block. Also check + // for overflow. + if !subnetworks.IsBuiltInOrNative(tx.SubnetworkID) { + subnetworkID := tx.SubnetworkID + gasUsage, ok := gasUsageMap[subnetworkID] + if !ok { + gasUsage = 0 + } + txGas := tx.Gas + if gasUsage+txGas < gasUsage || + gasUsage+txGas > selectedTx.gasLimit { + log.Tracef("Tx %s would exceed the gas limit in "+ + "subnetwork %s. Removing all remaining txs from this "+ + "subnetwork.", + consensushashing.TransactionID(tx), subnetworkID) + for _, candidateTx := range candidateTxs { + // candidateTxs are ordered by subnetwork, so we can safely assume + // that transactions after subnetworkID will not be relevant. + if subnetworks.Less(subnetworkID, candidateTx.SubnetworkID) { + break + } + + if candidateTx.SubnetworkID == subnetworkID { + markCandidateTxForDeletion(candidateTx) + } + } + continue + } + gasUsageMap[subnetworkID] = gasUsage + txGas + } + + // Add the transaction to the result, increment counters, and + // save the masses, fees, and signature operation counts to the + // result. + selectedTxs = append(selectedTxs, selectedTx) + txsForBlockTemplate.totalMass += selectedTx.Mass + txsForBlockTemplate.totalFees += selectedTx.Fee + + log.Tracef("Adding tx %s (feePerMegaGram %d)", + consensushashing.TransactionID(tx), selectedTx.Fee*1e6/selectedTx.Mass) + + markCandidateTxForDeletion(selectedTx) + } + + sort.Slice(selectedTxs, func(i, j int) bool { + return subnetworks.Less(selectedTxs[i].SubnetworkID, selectedTxs[j].SubnetworkID) + }) + for _, selectedTx := range selectedTxs { + txsForBlockTemplate.selectedTxs = append(txsForBlockTemplate.selectedTxs, selectedTx.DomainTransaction) + txsForBlockTemplate.txMasses = append(txsForBlockTemplate.txMasses, selectedTx.Mass) + txsForBlockTemplate.txFees = append(txsForBlockTemplate.txFees, selectedTx.Fee) + } + return txsForBlockTemplate +} + +func rebalanceCandidates(oldCandidateTxs []*candidateTx, isFirstRun bool) ( + candidateTxs []*candidateTx, totalP float64) { + + totalP = 0.0 + + candidateTxs = make([]*candidateTx, 0, len(oldCandidateTxs)) + for _, candidateTx := range oldCandidateTxs { + if candidateTx.isMarkedForDeletion { + continue + } + + candidateTxs = append(candidateTxs, candidateTx) + } + + for _, candidateTx := range candidateTxs { + if isFirstRun { + candidateTx.p = math.Pow(candidateTx.txValue, alpha) + } + candidateTx.start = totalP + candidateTx.end = totalP + candidateTx.p + + totalP += candidateTx.p + } + return +} + +// findTx finds the candidateTx in whose range r falls. +// For example, if we have candidateTxs with starts and ends: +// * tx1: start 0, end 100 +// * tx2: start 100, end 105 +// * tx3: start 105, end 2000 +// And r=102, then findTx will return tx2. +func findTx(candidateTxs []*candidateTx, r float64) *candidateTx { + min := 0 + max := len(candidateTxs) - 1 + for { + i := (min + max) / 2 + candidateTx := candidateTxs[i] + if candidateTx.end < r { + min = i + 1 + continue + } else if candidateTx.start > r { + max = i - 1 + continue + } + return candidateTx + } +} diff --git a/domain/miningmanager/factory.go b/domain/miningmanager/factory.go new file mode 100644 index 0000000..f916997 --- /dev/null +++ b/domain/miningmanager/factory.go @@ -0,0 +1,39 @@ +package miningmanager + +import ( + "sync" + "time" + + "github.com/spectre-project/spectred/domain/consensusreference" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/domain/miningmanager/blocktemplatebuilder" + mempoolpkg "github.com/spectre-project/spectred/domain/miningmanager/mempool" +) + +// Factory instantiates new mining managers +type Factory interface { + NewMiningManager(consensus consensusreference.ConsensusReference, params *dagconfig.Params, mempoolConfig *mempoolpkg.Config) MiningManager +} + +type factory struct{} + +// NewMiningManager instantiate a new mining manager +func (f *factory) NewMiningManager(consensusReference consensusreference.ConsensusReference, params *dagconfig.Params, + mempoolConfig *mempoolpkg.Config) MiningManager { + + mempool := mempoolpkg.New(mempoolConfig, consensusReference) + blockTemplateBuilder := blocktemplatebuilder.New(consensusReference, mempool, params.MaxBlockMass, params.CoinbasePayloadScriptPublicKeyMaxLength) + + return &miningManager{ + consensusReference: consensusReference, + mempool: mempool, + blockTemplateBuilder: blockTemplateBuilder, + cachingTime: time.Time{}, + cacheLock: &sync.Mutex{}, + } +} + +// NewFactory creates a new mining manager factory +func NewFactory() Factory { + return &factory{} +} diff --git a/domain/miningmanager/mempool/check_transaction_standard.go b/domain/miningmanager/mempool/check_transaction_standard.go new file mode 100644 index 0000000..12ab147 --- /dev/null +++ b/domain/miningmanager/mempool/check_transaction_standard.go @@ -0,0 +1,206 @@ +package mempool + +import ( + "fmt" + + "github.com/spectre-project/spectred/util/txmass" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" +) + +const ( + // maxStandardP2SHSigOps is the maximum number of signature operations + // that are considered standard in a pay-to-script-hash script. + maxStandardP2SHSigOps = 15 + + // maximumStandardSignatureScriptSize is the maximum size allowed for a + // transaction input signature script to be considered standard. This + // value allows for a 15-of-15 CHECKMULTISIG pay-to-script-hash with + // compressed keys. + // + // The form of the overall script is: OP_0 <15 signatures> OP_PUSHDATA2 + // <2 bytes len> [OP_15 <15 pubkeys> OP_15 OP_CHECKMULTISIG] + // + // For the p2sh script portion, each of the 15 compressed pubkeys are + // 33 bytes (plus one for the OP_DATA_33 opcode), and the thus it totals + // to (15*34)+3 = 513 bytes. Next, each of the 15 signatures is a max + // of 73 bytes (plus one for the OP_DATA_73 opcode). Also, there is one + // extra byte for the initial extra OP_0 push and 3 bytes for the + // OP_PUSHDATA2 needed to specify the 513 bytes for the script push. + // That brings the total to 1+(15*74)+3+513 = 1627. This value also + // adds a few extra bytes to provide a little buffer. + // (1 + 15*74 + 3) + (15*34 + 3) + 23 = 1650 + maximumStandardSignatureScriptSize = 1650 + + // MaximumStandardTransactionMass is the maximum mass allowed for transactions that + // are considered standard and will therefore be relayed and considered for mining. + MaximumStandardTransactionMass = 100_000 +) + +func (mp *mempool) checkTransactionStandardInIsolation(transaction *externalapi.DomainTransaction) error { + // The transaction must be a currently supported version. + // + // This check is currently mirrored in consensus. + // However, in a later version of Spectre the consensus-valid transaction version range might diverge from the + // standard transaction version range, and thus the validation should happen in both levels. + if transaction.Version > mp.config.MaximumStandardTransactionVersion || + transaction.Version < mp.config.MinimumStandardTransactionVersion { + str := fmt.Sprintf("transaction version %d is not in the valid range of %d-%d", transaction.Version, + mp.config.MinimumStandardTransactionVersion, mp.config.MaximumStandardTransactionVersion) + return transactionRuleError(RejectNonstandard, str) + } + + // Since extremely large transactions with a lot of inputs can cost + // almost as much to process as the sender fees, limit the maximum + // size of a transaction. This also helps mitigate CPU exhaustion + // attacks. + if transaction.Mass > MaximumStandardTransactionMass { + str := fmt.Sprintf("transaction mass of %d is larger than max allowed size of %d", + transaction.Mass, MaximumStandardTransactionMass) + return transactionRuleError(RejectNonstandard, str) + } + + for i, input := range transaction.Inputs { + // Each transaction input signature script must not exceed the + // maximum size allowed for a standard transaction. See + // the comment on maximumStandardSignatureScriptSize for more details. + signatureScriptLen := len(input.SignatureScript) + if signatureScriptLen > maximumStandardSignatureScriptSize { + str := fmt.Sprintf("transaction input %d: signature script size of %d bytes is larger than the "+ + "maximum allowed size of %d bytes", i, signatureScriptLen, maximumStandardSignatureScriptSize) + return transactionRuleError(RejectNonstandard, str) + } + } + + // None of the output public key scripts can be a non-standard script or be "dust". + for i, output := range transaction.Outputs { + if output.ScriptPublicKey.Version > constants.MaxScriptPublicKeyVersion { + return transactionRuleError(RejectNonstandard, "The version of the scriptPublicKey is higher than the known version.") + } + scriptClass := txscript.GetScriptClass(output.ScriptPublicKey.Script) + if scriptClass == txscript.NonStandardTy { + str := fmt.Sprintf("transaction output %d: non-standard script form", i) + return transactionRuleError(RejectNonstandard, str) + } + + if mp.IsTransactionOutputDust(output) { + str := fmt.Sprintf("transaction output %d: payment "+ + "of %d is dust", i, output.Value) + return transactionRuleError(RejectDust, str) + } + } + + return nil +} + +// IsTransactionOutputDust returns whether or not the passed transaction output amount +// is considered dust or not based on the configured minimum transaction relay fee. +// Dust is defined in terms of the minimum transaction relay fee. In +// particular, if the cost to the network to spend coins is more than 1/3 of the +// minimum transaction relay fee, it is considered dust. +// +// It is exported for use by transaction generators and wallets +func (mp *mempool) IsTransactionOutputDust(output *externalapi.DomainTransactionOutput) bool { + // Unspendable outputs are considered dust. + if txscript.IsUnspendable(output.ScriptPublicKey.Script) { + return true + } + + // The total serialized size consists of the output and the associated + // input script to redeem it. Since there is no input script + // to redeem it yet, use the minimum size of a typical input script. + // + // Pay-to-pubkey bytes breakdown: + // + // Output to pubkey (43 bytes): + // 8 value, 1 script len, 34 script [1 OP_DATA_32, + // 32 pubkey, 1 OP_CHECKSIG] + // + // Input (105 bytes): + // 36 prev outpoint, 1 script len, 64 script [1 OP_DATA_64, + // 64 sig], 4 sequence + // + // The most common scripts are pay-to-pubkey, and as per the above + // breakdown, the minimum size of a p2pk input script is 148 bytes. So + // that figure is used. + totalSerializedSize := txmass.TransactionOutputEstimatedSerializedSize(output) + 148 + + // The output is considered dust if the cost to the network to spend the + // coins is more than 1/3 of the minimum free transaction relay fee. + // mp.config.MinimumRelayTransactionFee is in sompi/KB, so multiply + // by 1000 to convert to bytes. + // + // Using the typical values for a pay-to-pubkey transaction from + // the breakdown above and the default minimum free transaction relay + // fee of 1000, this equates to values less than 546 sompi being + // considered dust. + // + // The following is equivalent to (value/totalSerializedSize) * (1/3) * 1000 + // without needing to do floating point math. + return output.Value*1000/(3*totalSerializedSize) < uint64(mp.config.MinimumRelayTransactionFee) +} + +// checkTransactionStandardInContext performs a series of checks on a transaction's +// inputs to ensure they are "standard". A standard transaction input within the +// context of this function is one whose referenced public key script is of a +// standard form and, for pay-to-script-hash, does not have more than +// maxStandardP2SHSigOps signature operations. +// In addition, makes sure that the transaction's fee is above the minimum for acceptance +// into the mempool and relay +func (mp *mempool) checkTransactionStandardInContext(transaction *externalapi.DomainTransaction) error { + for i, input := range transaction.Inputs { + // It is safe to elide existence and index checks here since + // they have already been checked prior to calling this + // function. + utxoEntry := input.UTXOEntry + originScriptPubKey := utxoEntry.ScriptPublicKey() + switch txscript.GetScriptClass(originScriptPubKey.Script) { + case txscript.ScriptHashTy: + numSigOps := txscript.GetPreciseSigOpCount( + input.SignatureScript, originScriptPubKey, true) + if numSigOps > maxStandardP2SHSigOps { + str := fmt.Sprintf("transaction input #%d has %d signature operations which is more "+ + "than the allowed max amount of %d", i, numSigOps, maxStandardP2SHSigOps) + return transactionRuleError(RejectNonstandard, str) + } + + case txscript.NonStandardTy: + str := fmt.Sprintf("transaction input #%d has a non-standard script form", i) + return transactionRuleError(RejectNonstandard, str) + } + } + + minimumFee := mp.minimumRequiredTransactionRelayFee(transaction.Mass) + if transaction.Fee < minimumFee { + str := fmt.Sprintf("transaction %s has %d fees which is under the required amount of %d", + consensushashing.TransactionID(transaction), transaction.Fee, minimumFee) + return transactionRuleError(RejectInsufficientFee, str) + } + + return nil +} + +// minimumRequiredTransactionRelayFee returns the minimum transaction fee required for a +// transaction with the passed mass to be accepted into the mampool and relayed. +func (mp *mempool) minimumRequiredTransactionRelayFee(mass uint64) uint64 { + // Calculate the minimum fee for a transaction to be allowed into the + // mempool and relayed by scaling the base fee. MinimumRelayTransactionFee is in + // sompi/kg so multiply by mass (which is in grams) and divide by 1000 to get minimum sompis. + minimumFee := (mass * uint64(mp.config.MinimumRelayTransactionFee)) / 1000 + + if minimumFee == 0 && mp.config.MinimumRelayTransactionFee > 0 { + minimumFee = uint64(mp.config.MinimumRelayTransactionFee) + } + + // Set the minimum fee to the maximum possible value if the calculated + // fee is not in the valid range for monetary amounts. + if minimumFee > constants.MaxSompi { + minimumFee = constants.MaxSompi + } + + return minimumFee +} diff --git a/domain/miningmanager/mempool/check_transaction_standard_test.go b/domain/miningmanager/mempool/check_transaction_standard_test.go new file mode 100644 index 0000000..53ed88c --- /dev/null +++ b/domain/miningmanager/mempool/check_transaction_standard_test.go @@ -0,0 +1,352 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package mempool + +import ( + "bytes" + "math" + "testing" + + "github.com/spectre-project/spectred/domain/consensusreference" + + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + + "github.com/spectre-project/spectred/domain/consensus" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util" +) + +func TestCalcMinRequiredTxRelayFee(t *testing.T) { + tests := []struct { + name string // test description. + size uint64 // Transaction size in bytes. + minimumRelayTransactionFee util.Amount // minimum relay transaction fee. + want uint64 // Expected fee. + }{ + { + // Ensure combination of size and fee that are less than 1000 + // produce a non-zero fee. + "250 bytes with relay fee of 3", + 250, + 3, + 3, + }, + { + "100 bytes with default minimum relay fee", + 100, + defaultMinimumRelayTransactionFee, + 100, + }, + { + "max standard tx size with default minimum relay fee", + MaximumStandardTransactionMass, + defaultMinimumRelayTransactionFee, + 100000, + }, + { + "1500 bytes with 5000 relay fee", + 1500, + 5000, + 7500, + }, + { + "1500 bytes with 3000 relay fee", + 1500, + 3000, + 4500, + }, + { + "782 bytes with 5000 relay fee", + 782, + 5000, + 3910, + }, + { + "782 bytes with 3000 relay fee", + 782, + 3000, + 2346, + }, + { + "782 bytes with 2550 relay fee", + 782, + 2550, + 1994, + }, + } + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCalcMinRequiredTxRelayFee") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + for _, test := range tests { + mempoolConfig := DefaultConfig(tc.DAGParams()) + mempoolConfig.MinimumRelayTransactionFee = test.minimumRelayTransactionFee + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + mempool := New(mempoolConfig, consensusreference.NewConsensusReference(&tcAsConsensusPointer)).(*mempool) + + got := mempool.minimumRequiredTransactionRelayFee(test.size) + if got != test.want { + t.Errorf("TestCalcMinRequiredTxRelayFee test '%s' "+ + "failed: got %v want %v", test.name, got, + test.want) + } + } + }) +} + +func TestIsTransactionOutputDust(t *testing.T) { + scriptPublicKey := &externalapi.ScriptPublicKey{ + []byte{0x76, 0xa9, 0x21, 0x03, 0x2f, 0x7e, 0x43, + 0x0a, 0xa4, 0xc9, 0xd1, 0x59, 0x43, 0x7e, 0x84, 0xb9, + 0x75, 0xdc, 0x76, 0xd9, 0x00, 0x3b, 0xf0, 0x92, 0x2c, + 0xf3, 0xaa, 0x45, 0x28, 0x46, 0x4b, 0xab, 0x78, 0x0d, + 0xba, 0x5e}, 0} + + tests := []struct { + name string // test description + txOut externalapi.DomainTransactionOutput + minimumRelayTransactionFee util.Amount // minimum relay transaction fee. + isDust bool + }{ + { + // Any value is allowed with a zero relay fee. + "zero value with zero relay fee", + externalapi.DomainTransactionOutput{Value: 0, ScriptPublicKey: scriptPublicKey}, + 0, + false, + }, + { + // Zero value is dust with any relay fee" + "zero value with very small tx fee", + externalapi.DomainTransactionOutput{Value: 0, ScriptPublicKey: scriptPublicKey}, + 1, + true, + }, + { + "36 byte public key script with value 605", + externalapi.DomainTransactionOutput{Value: 605, ScriptPublicKey: scriptPublicKey}, + 1000, + true, + }, + { + "36 byte public key script with value 606", + externalapi.DomainTransactionOutput{Value: 606, ScriptPublicKey: scriptPublicKey}, + 1000, + false, + }, + { + // Maximum allowed value is never dust. + "max sompi amount is never dust", + externalapi.DomainTransactionOutput{Value: constants.MaxSompi, ScriptPublicKey: scriptPublicKey}, + util.Amount(1000), + false, + }, + { + // Maximum uint64 value causes overflow. + "maximum uint64 value", + externalapi.DomainTransactionOutput{Value: math.MaxUint64, ScriptPublicKey: scriptPublicKey}, + math.MaxUint64, + true, + }, + { + // Unspendable ScriptPublicKey due to an invalid public key + // script. + "unspendable ScriptPublicKey", + externalapi.DomainTransactionOutput{Value: 5000, ScriptPublicKey: &externalapi.ScriptPublicKey{[]byte{0x01}, 0}}, + 0, // no relay fee + true, + }, + } + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestIsTransactionOutputDust") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + for _, test := range tests { + mempoolConfig := DefaultConfig(tc.DAGParams()) + mempoolConfig.MinimumRelayTransactionFee = test.minimumRelayTransactionFee + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + mempool := New(mempoolConfig, consensusreference.NewConsensusReference(&tcAsConsensusPointer)).(*mempool) + + res := mempool.IsTransactionOutputDust(&test.txOut) + if res != test.isDust { + t.Errorf("Dust test '%s' failed: want %v got %v", + test.name, test.isDust, res) + } + } + }) +} + +func TestCheckTransactionStandardInIsolation(t *testing.T) { + // Create some dummy, but otherwise standard, data for transactions. + prevOutTxID := &externalapi.DomainTransactionID{} + dummyPrevOut := externalapi.DomainOutpoint{TransactionID: *prevOutTxID, Index: 1} + dummySigScript := bytes.Repeat([]byte{0x00}, 65) + dummyTxIn := externalapi.DomainTransactionInput{ + PreviousOutpoint: dummyPrevOut, + SignatureScript: dummySigScript, + Sequence: constants.MaxTxInSequenceNum, + } + addrHash := [32]byte{0x01} + addr, err := util.NewAddressPublicKey(addrHash[:], util.Bech32PrefixSpectreTest) + if err != nil { + t.Fatalf("NewAddressPublicKey: unexpected error: %v", err) + } + dummyScriptPublicKey, err := txscript.PayToAddrScript(addr) + if err != nil { + t.Fatalf("PayToAddrScript: unexpected error: %v", err) + } + dummyTxOut := externalapi.DomainTransactionOutput{ + Value: 100000000, // 1 SPR + ScriptPublicKey: dummyScriptPublicKey, + } + + tests := []struct { + name string + tx *externalapi.DomainTransaction + height uint64 + isStandard bool + code RejectCode + }{ + { + name: "Typical pay-to-pubkey transaction", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{&dummyTxOut}}, + height: 300000, + isStandard: true, + }, + { + name: "Transaction version too high", + tx: &externalapi.DomainTransaction{Version: constants.MaxTransactionVersion + 1, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{&dummyTxOut}}, + height: 300000, + isStandard: false, + code: RejectNonstandard, + }, + + { + name: "Transaction size is too large", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 0, + ScriptPublicKey: &externalapi.ScriptPublicKey{bytes.Repeat([]byte{0x00}, MaximumStandardTransactionMass+1), 0}, + }}}, + height: 300000, + isStandard: false, + code: RejectNonstandard, + }, + { + name: "Signature script size is too large", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{{ + PreviousOutpoint: dummyPrevOut, + SignatureScript: bytes.Repeat([]byte{0x00}, maximumStandardSignatureScriptSize+1), + Sequence: constants.MaxTxInSequenceNum, + }}, Outputs: []*externalapi.DomainTransactionOutput{&dummyTxOut}}, + height: 300000, + isStandard: false, + code: RejectNonstandard, + }, + { + name: "Valid but non standard public key script", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 100000000, + ScriptPublicKey: &externalapi.ScriptPublicKey{[]byte{txscript.OpTrue}, 0}, + }}}, + height: 300000, + isStandard: false, + code: RejectNonstandard, + }, + { //Todo : check on ScriptPublicKey type. + name: "Dust output", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 0, + ScriptPublicKey: dummyScriptPublicKey, + }}}, + height: 300000, + isStandard: false, + code: RejectDust, + }, + { + name: "Nulldata transaction", + tx: &externalapi.DomainTransaction{Version: 0, Inputs: []*externalapi.DomainTransactionInput{&dummyTxIn}, Outputs: []*externalapi.DomainTransactionOutput{{ + Value: 0, + ScriptPublicKey: &externalapi.ScriptPublicKey{[]byte{txscript.OpReturn}, 0}, + }}}, + height: 300000, + isStandard: false, + code: RejectNonstandard, + }, + } + + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestCheckTransactionStandardInIsolation") + if err != nil { + t.Fatalf("Error setting up consensus: %+v", err) + } + defer teardown(false) + + for _, test := range tests { + mempoolConfig := DefaultConfig(tc.DAGParams()) + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + mempool := New(mempoolConfig, consensusReference).(*mempool) + + // Ensure standardness is as expected. + err := mempool.checkTransactionStandardInIsolation(test.tx) + if err == nil && test.isStandard { + // Test passes since function returned standard for a + // transaction which is intended to be standard. + continue + } + if err == nil && !test.isStandard { + t.Errorf("checkTransactionStandardInIsolation (%s): standard when "+ + "it should not be", test.name) + continue + } + if err != nil && test.isStandard { + t.Errorf("checkTransactionStandardInIsolation (%s): nonstandard "+ + "when it should not be: %v", test.name, err) + continue + } + + // Ensure error type is a TxRuleError inside of a RuleError. + var ruleErr RuleError + if !errors.As(err, &ruleErr) { + t.Errorf("checkTransactionStandardInIsolation (%s): unexpected "+ + "error type - got %T", test.name, err) + continue + } + txRuleErr, ok := ruleErr.Err.(TxRuleError) + if !ok { + t.Errorf("checkTransactionStandardInIsolation (%s): unexpected "+ + "error type - got %T", test.name, ruleErr.Err) + continue + } + + // Ensure the reject code is the expected one. + if txRuleErr.RejectCode != test.code { + t.Errorf("checkTransactionStandardInIsolation (%s): unexpected "+ + "error code - got %v, want %v", test.name, + txRuleErr.RejectCode, test.code) + continue + } + } + }) +} diff --git a/domain/miningmanager/mempool/config.go b/domain/miningmanager/mempool/config.go new file mode 100644 index 0000000..fd0e540 --- /dev/null +++ b/domain/miningmanager/mempool/config.go @@ -0,0 +1,74 @@ +package mempool + +import ( + "time" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/util" + + "github.com/spectre-project/spectred/domain/dagconfig" +) + +const ( + defaultMaximumTransactionCount = 1_000_000 + + defaultTransactionExpireIntervalSeconds uint64 = 60 + defaultTransactionExpireScanIntervalSeconds uint64 = 10 + defaultOrphanExpireIntervalSeconds uint64 = 60 + defaultOrphanExpireScanIntervalSeconds uint64 = 10 + + defaultMaximumOrphanTransactionMass = 100000 + // defaultMaximumOrphanTransactionCount should remain small as long as we have recursion in + // removeOrphans when removeRedeemers = true + defaultMaximumOrphanTransactionCount = 50 + + // defaultMinimumRelayTransactionFee specifies the minimum transaction fee for a transaction to be accepted to + // the mempool and relayed. It is specified in sompi per 1kg (or 1000 grams) of transaction mass. + defaultMinimumRelayTransactionFee = util.Amount(1000) + + // Standard transaction version range might be different from what consensus accepts, therefore + // we define separate values in mempool. + // However, currently there's exactly one transaction version, so mempool accepts the same version + // as consensus. + defaultMinimumStandardTransactionVersion = constants.MaxTransactionVersion + defaultMaximumStandardTransactionVersion = constants.MaxTransactionVersion +) + +// Config represents a mempool configuration +type Config struct { + MaximumTransactionCount uint64 + TransactionExpireIntervalDAAScore uint64 + TransactionExpireScanIntervalDAAScore uint64 + TransactionExpireScanIntervalSeconds uint64 + OrphanExpireIntervalDAAScore uint64 + OrphanExpireScanIntervalDAAScore uint64 + MaximumOrphanTransactionMass uint64 + MaximumOrphanTransactionCount uint64 + AcceptNonStandard bool + MaximumMassPerBlock uint64 + MinimumRelayTransactionFee util.Amount + MinimumStandardTransactionVersion uint16 + MaximumStandardTransactionVersion uint16 +} + +// DefaultConfig returns the default mempool configuration +func DefaultConfig(dagParams *dagconfig.Params) *Config { + targetBlocksPerSecond := time.Second.Seconds() / dagParams.TargetTimePerBlock.Seconds() + + return &Config{ + MaximumTransactionCount: defaultMaximumTransactionCount, + TransactionExpireIntervalDAAScore: uint64(float64(defaultTransactionExpireIntervalSeconds) / targetBlocksPerSecond), + TransactionExpireScanIntervalDAAScore: uint64(float64(defaultTransactionExpireScanIntervalSeconds) / targetBlocksPerSecond), + TransactionExpireScanIntervalSeconds: defaultTransactionExpireScanIntervalSeconds, + OrphanExpireIntervalDAAScore: uint64(float64(defaultOrphanExpireIntervalSeconds) / targetBlocksPerSecond), + OrphanExpireScanIntervalDAAScore: uint64(float64(defaultOrphanExpireScanIntervalSeconds) / targetBlocksPerSecond), + MaximumOrphanTransactionMass: defaultMaximumOrphanTransactionMass, + MaximumOrphanTransactionCount: defaultMaximumOrphanTransactionCount, + AcceptNonStandard: dagParams.RelayNonStdTxs, + MaximumMassPerBlock: dagParams.MaxBlockMass, + MinimumRelayTransactionFee: defaultMinimumRelayTransactionFee, + MinimumStandardTransactionVersion: defaultMinimumStandardTransactionVersion, + MaximumStandardTransactionVersion: defaultMaximumStandardTransactionVersion, + } +} diff --git a/domain/miningmanager/mempool/error.go b/domain/miningmanager/mempool/error.go new file mode 100644 index 0000000..827fa90 --- /dev/null +++ b/domain/miningmanager/mempool/error.go @@ -0,0 +1,125 @@ +// Copyright (c) 2014-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package mempool + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// RuleError identifies a rule violation. It is used to indicate that +// processing of a transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation and use the Err field to access the +// underlying error, which will be either a TxRuleError or a +// ruleerrors.RuleError. +type RuleError struct { + Err error +} + +// Error satisfies the error interface and prints human-readable errors. +func (e RuleError) Error() string { + if e.Err == nil { + return "" + } + return e.Err.Error() +} + +// Unwrap unwraps the wrapped error +func (e RuleError) Unwrap() error { + return e.Err +} + +// RejectCode represents a numeric value by which a remote peer indicates +// why a message was rejected. +type RejectCode uint8 + +// These constants define the various supported reject codes. +const ( + RejectMalformed RejectCode = 0x01 + RejectInvalid RejectCode = 0x10 + RejectObsolete RejectCode = 0x11 + RejectDuplicate RejectCode = 0x12 + RejectNotRequested RejectCode = 0x13 + RejectNonstandard RejectCode = 0x40 + RejectDust RejectCode = 0x41 + RejectInsufficientFee RejectCode = 0x42 + RejectFinality RejectCode = 0x43 + RejectDifficulty RejectCode = 0x44 + RejectImmatureSpend RejectCode = 0x45 + RejectBadOrphan RejectCode = 0x64 + RejectSpamTx RejectCode = 0x65 +) + +// Map of reject codes back strings for pretty printing. +var rejectCodeStrings = map[RejectCode]string{ + RejectMalformed: "REJECT_MALFORMED", + RejectInvalid: "REJECT_INVALID", + RejectObsolete: "REJECT_OBSOLETE", + RejectDuplicate: "REJECT_DUPLICATE", + RejectNonstandard: "REJECT_NON_STANDARD", + RejectDust: "REJECT_DUST", + RejectInsufficientFee: "REJECT_INSUFFICIENT_FEE", + RejectFinality: "REJECT_FINALITY", + RejectDifficulty: "REJECT_DIFFICULTY", + RejectNotRequested: "REJECT_NOT_REQUESTED", + RejectImmatureSpend: "REJECT_IMMATURE_SPEND", + RejectBadOrphan: "REJECT_BAD_ORPHAN", +} + +// String returns the RejectCode in human-readable form. +func (code RejectCode) String() string { + if s, ok := rejectCodeStrings[code]; ok { + return s + } + + return fmt.Sprintf("Unknown RejectCode (%d)", uint8(code)) +} + +// TxRuleError identifies a rule violation. It is used to indicate that +// processing of a transaction failed due to one of the many validation +// rules. The caller can use type assertions to determine if a failure was +// specifically due to a rule violation and access the ErrorCode field to +// ascertain the specific reason for the rule violation. +type TxRuleError struct { + RejectCode RejectCode // The code to send with reject messages + Description string // Human readable description of the issue +} + +// Error satisfies the error interface and prints human-readable errors. +func (e TxRuleError) Error() string { + return e.Description +} + +// transactionRuleError creates an underlying TxRuleError with the given a set of +// arguments and returns a RuleError that encapsulates it. +func transactionRuleError(c RejectCode, desc string) RuleError { + return newRuleError(TxRuleError{RejectCode: c, Description: desc}) +} + +func newRuleError(err error) RuleError { + return RuleError{ + Err: err, + } +} + +// extractRejectCode attempts to return a relevant reject code for a given error +// by examining the error for known types. It will return true if a code +// was successfully extracted. +func extractRejectCode(err error) (RejectCode, bool) { + // Pull the underlying error out of a RuleError. + var ruleErr RuleError + if ok := errors.As(err, &ruleErr); ok { + err = ruleErr.Err + } + + var trErr TxRuleError + if errors.As(err, &trErr) { + return trErr.RejectCode, true + } + + return RejectInvalid, false +} diff --git a/domain/miningmanager/mempool/fill_inputs_and_get_missing_parents.go b/domain/miningmanager/mempool/fill_inputs_and_get_missing_parents.go new file mode 100644 index 0000000..5ae939b --- /dev/null +++ b/domain/miningmanager/mempool/fill_inputs_and_get_missing_parents.go @@ -0,0 +1,48 @@ +package mempool + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" +) + +func (mp *mempool) fillInputsAndGetMissingParents(transaction *externalapi.DomainTransaction) ( + parents model.IDToTransactionMap, missingOutpoints []*externalapi.DomainOutpoint, err error) { + + parentsInPool := mp.transactionsPool.getParentTransactionsInPool(transaction) + + fillInputs(transaction, parentsInPool) + + err = mp.consensusReference.Consensus().ValidateTransactionAndPopulateWithConsensusData(transaction) + if err != nil { + errMissingOutpoints := ruleerrors.ErrMissingTxOut{} + if errors.As(err, &errMissingOutpoints) { + return parentsInPool, errMissingOutpoints.MissingOutpoints, nil + } + if errors.Is(err, ruleerrors.ErrImmatureSpend) { + return nil, nil, transactionRuleError( + RejectImmatureSpend, "one of the transaction inputs spends an immature UTXO") + } + if errors.As(err, &ruleerrors.RuleError{}) { + return nil, nil, newRuleError(err) + } + return nil, nil, err + } + + return parentsInPool, nil, nil +} + +func fillInputs(transaction *externalapi.DomainTransaction, parentsInPool model.IDToTransactionMap) { + for _, input := range transaction.Inputs { + parent, ok := parentsInPool[input.PreviousOutpoint.TransactionID] + if !ok { + continue + } + relevantOutput := parent.Transaction().Outputs[input.PreviousOutpoint.Index] + input.UTXOEntry = utxo.NewUTXOEntry(relevantOutput.Value, relevantOutput.ScriptPublicKey, + false, constants.UnacceptedDAAScore) + } +} diff --git a/domain/miningmanager/mempool/handle_new_block_transactions.go b/domain/miningmanager/mempool/handle_new_block_transactions.go new file mode 100644 index 0000000..12df4ba --- /dev/null +++ b/domain/miningmanager/mempool/handle_new_block_transactions.go @@ -0,0 +1,62 @@ +package mempool + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +func (mp *mempool) handleNewBlockTransactions(blockTransactions []*externalapi.DomainTransaction) ( + []*externalapi.DomainTransaction, error) { + + // Skip the coinbase transaction + blockTransactions = blockTransactions[transactionhelper.CoinbaseTransactionIndex+1:] + + acceptedOrphans := []*externalapi.DomainTransaction{} + for _, transaction := range blockTransactions { + transactionID := consensushashing.TransactionID(transaction) + err := mp.removeTransaction(transactionID, false) + if err != nil { + return nil, err + } + + err = mp.removeDoubleSpends(transaction) + if err != nil { + return nil, err + } + + err = mp.orphansPool.removeOrphan(transactionID, false) + if err != nil { + return nil, err + } + + acceptedOrphansFromThisTransaction, err := mp.orphansPool.processOrphansAfterAcceptedTransaction(transaction) + if err != nil { + return nil, err + } + + acceptedOrphans = append(acceptedOrphans, acceptedOrphansFromThisTransaction...) + } + err := mp.orphansPool.expireOrphanTransactions() + if err != nil { + return nil, err + } + err = mp.transactionsPool.expireOldTransactions() + if err != nil { + return nil, err + } + + return acceptedOrphans, nil +} + +func (mp *mempool) removeDoubleSpends(transaction *externalapi.DomainTransaction) error { + for _, input := range transaction.Inputs { + if redeemer, ok := mp.mempoolUTXOSet.transactionByPreviousOutpoint[input.PreviousOutpoint]; ok { + err := mp.removeTransaction(redeemer.TransactionID(), true) + if err != nil { + return err + } + } + } + return nil +} diff --git a/domain/miningmanager/mempool/log.go b/domain/miningmanager/mempool/log.go new file mode 100644 index 0000000..ed41869 --- /dev/null +++ b/domain/miningmanager/mempool/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package mempool + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("TXMP") diff --git a/domain/miningmanager/mempool/mempool.go b/domain/miningmanager/mempool/mempool.go new file mode 100644 index 0000000..6dba974 --- /dev/null +++ b/domain/miningmanager/mempool/mempool.go @@ -0,0 +1,229 @@ +package mempool + +import ( + "sync" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensusreference" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + miningmanagermodel "github.com/spectre-project/spectred/domain/miningmanager/model" +) + +type mempool struct { + mtx sync.RWMutex + + config *Config + consensusReference consensusreference.ConsensusReference + + mempoolUTXOSet *mempoolUTXOSet + transactionsPool *transactionsPool + orphansPool *orphansPool +} + +// New constructs a new mempool +func New(config *Config, consensusReference consensusreference.ConsensusReference) miningmanagermodel.Mempool { + mp := &mempool{ + config: config, + consensusReference: consensusReference, + } + + mp.mempoolUTXOSet = newMempoolUTXOSet(mp) + mp.transactionsPool = newTransactionsPool(mp) + mp.orphansPool = newOrphansPool(mp) + + return mp +} + +func (mp *mempool) ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, allowOrphan bool) ( + acceptedTransactions []*externalapi.DomainTransaction, err error) { + + mp.mtx.Lock() + defer mp.mtx.Unlock() + + return mp.validateAndInsertTransaction(transaction, isHighPriority, allowOrphan) +} + +func (mp *mempool) GetTransaction(transactionID *externalapi.DomainTransactionID, + includeTransactionPool bool, + includeOrphanPool bool) ( + transaction *externalapi.DomainTransaction, + isOrphan bool, + found bool) { + + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + var transactionfound bool + isOrphan = false + + if includeTransactionPool { + transaction, transactionfound = mp.transactionsPool.getTransaction(transactionID, true) + isOrphan = false + } + if !transactionfound && includeOrphanPool { + transaction, transactionfound = mp.orphansPool.getOrphanTransaction(transactionID) + isOrphan = true + } + + return transaction, isOrphan, transactionfound +} + +func (mp *mempool) GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) ( + sendingInTransactionPool map[string]*externalapi.DomainTransaction, + receivingInTransactionPool map[string]*externalapi.DomainTransaction, + sendingInOrphanPool map[string]*externalapi.DomainTransaction, + receivingInOrphanPool map[string]*externalapi.DomainTransaction, + err error) { + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + if includeTransactionPool { + sendingInTransactionPool, receivingInTransactionPool, err = mp.transactionsPool.getTransactionsByAddresses() + if err != nil { + return nil, nil, nil, nil, err + } + } + + if includeOrphanPool { + sendingInTransactionPool, receivingInOrphanPool, err = mp.orphansPool.getOrphanTransactionsByAddresses() + if err != nil { + return nil, nil, nil, nil, err + } + } + + return sendingInTransactionPool, receivingInTransactionPool, sendingInTransactionPool, receivingInOrphanPool, nil +} + +func (mp *mempool) AllTransactions(includeTransactionPool bool, includeOrphanPool bool) ( + transactionPoolTransactions []*externalapi.DomainTransaction, + orphanPoolTransactions []*externalapi.DomainTransaction) { + + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + if includeTransactionPool { + transactionPoolTransactions = mp.transactionsPool.getAllTransactions() + } + + if includeOrphanPool { + orphanPoolTransactions = mp.orphansPool.getAllOrphanTransactions() + } + + return transactionPoolTransactions, orphanPoolTransactions +} + +func (mp *mempool) TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int { + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + transactionCount := 0 + + if includeOrphanPool { + transactionCount += mp.orphansPool.orphanTransactionCount() + } + if includeTransactionPool { + transactionCount += mp.transactionsPool.transactionCount() + } + + return transactionCount +} + +func (mp *mempool) HandleNewBlockTransactions(transactions []*externalapi.DomainTransaction) ( + acceptedOrphans []*externalapi.DomainTransaction, err error) { + + mp.mtx.Lock() + defer mp.mtx.Unlock() + + return mp.handleNewBlockTransactions(transactions) +} + +func (mp *mempool) BlockCandidateTransactions() []*externalapi.DomainTransaction { + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + readyTxs := mp.transactionsPool.allReadyTransactions() + var candidateTxs []*externalapi.DomainTransaction + var spamTx *externalapi.DomainTransaction + var spamTxNewestUTXODaaScore uint64 + for _, tx := range readyTxs { + if len(tx.Outputs) > len(tx.Inputs) { + hasCoinbaseInput := false + for _, input := range tx.Inputs { + if input.UTXOEntry.IsCoinbase() { + hasCoinbaseInput = true + break + } + } + + numExtraOuts := len(tx.Outputs) - len(tx.Inputs) + if !hasCoinbaseInput && numExtraOuts > 2 && tx.Fee < uint64(numExtraOuts)*constants.SompiPerSpectre { + log.Debugf("Filtered spam tx %s", consensushashing.TransactionID(tx)) + continue + } + + if hasCoinbaseInput || tx.Fee > uint64(numExtraOuts)*constants.SompiPerSpectre { + candidateTxs = append(candidateTxs, tx) + } else { + txNewestUTXODaaScore := tx.Inputs[0].UTXOEntry.BlockDAAScore() + for _, input := range tx.Inputs { + if input.UTXOEntry.BlockDAAScore() > txNewestUTXODaaScore { + txNewestUTXODaaScore = input.UTXOEntry.BlockDAAScore() + } + } + + if spamTx != nil { + if txNewestUTXODaaScore < spamTxNewestUTXODaaScore { + spamTx = tx + spamTxNewestUTXODaaScore = txNewestUTXODaaScore + } + } else { + spamTx = tx + spamTxNewestUTXODaaScore = txNewestUTXODaaScore + } + } + } else { + candidateTxs = append(candidateTxs, tx) + } + } + + if spamTx != nil { + log.Debugf("Adding spam tx candidate %s", consensushashing.TransactionID(spamTx)) + candidateTxs = append(candidateTxs, spamTx) + } + + return candidateTxs +} + +func (mp *mempool) RevalidateHighPriorityTransactions() (validTransactions []*externalapi.DomainTransaction, err error) { + mp.mtx.Lock() + defer mp.mtx.Unlock() + + return mp.revalidateHighPriorityTransactions() +} + +func (mp *mempool) RemoveInvalidTransactions(err *ruleerrors.ErrInvalidTransactionsInNewBlock) error { + mp.mtx.Lock() + defer mp.mtx.Unlock() + + for _, tx := range err.InvalidTransactions { + removeRedeemers := !errors.As(tx.Error, &ruleerrors.ErrMissingTxOut{}) + err := mp.removeTransaction(consensushashing.TransactionID(tx.Transaction), removeRedeemers) + if err != nil { + return err + } + } + + return nil +} + +func (mp *mempool) RemoveTransaction(transactionID *externalapi.DomainTransactionID, removeRedeemers bool) error { + mp.mtx.Lock() + defer mp.mtx.Unlock() + + return mp.removeTransaction(transactionID, removeRedeemers) +} diff --git a/domain/miningmanager/mempool/mempool_utxo_set.go b/domain/miningmanager/mempool/mempool_utxo_set.go new file mode 100644 index 0000000..2d346be --- /dev/null +++ b/domain/miningmanager/mempool/mempool_utxo_set.go @@ -0,0 +1,82 @@ +package mempool + +import ( + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" +) + +type mempoolUTXOSet struct { + mempool *mempool + poolUnspentOutputs model.OutpointToUTXOEntryMap + transactionByPreviousOutpoint model.OutpointToTransactionMap +} + +func newMempoolUTXOSet(mp *mempool) *mempoolUTXOSet { + return &mempoolUTXOSet{ + mempool: mp, + poolUnspentOutputs: model.OutpointToUTXOEntryMap{}, + transactionByPreviousOutpoint: model.OutpointToTransactionMap{}, + } +} + +func (mpus *mempoolUTXOSet) addTransaction(transaction *model.MempoolTransaction) { + outpoint := &externalapi.DomainOutpoint{TransactionID: *transaction.TransactionID()} + + for i, input := range transaction.Transaction().Inputs { + outpoint.Index = uint32(i) + + // Delete the output this input spends, in case it was created by mempool. + // If the outpoint doesn't exist in mpus.poolUnspentOutputs - this means + // it was created in the DAG (a.k.a. in consensus). + delete(mpus.poolUnspentOutputs, *outpoint) + + mpus.transactionByPreviousOutpoint[input.PreviousOutpoint] = transaction + } + + for i, output := range transaction.Transaction().Outputs { + outpoint := externalapi.DomainOutpoint{TransactionID: *transaction.TransactionID(), Index: uint32(i)} + + mpus.poolUnspentOutputs[outpoint] = + utxo.NewUTXOEntry(output.Value, output.ScriptPublicKey, false, constants.UnacceptedDAAScore) + } +} + +func (mpus *mempoolUTXOSet) removeTransaction(transaction *model.MempoolTransaction) { + for _, input := range transaction.Transaction().Inputs { + // If the transaction creating the output spent by this input is in the mempool - restore it's UTXO + if _, ok := mpus.mempool.transactionsPool.getTransaction(&input.PreviousOutpoint.TransactionID, false); ok { + mpus.poolUnspentOutputs[input.PreviousOutpoint] = input.UTXOEntry + } + delete(mpus.transactionByPreviousOutpoint, input.PreviousOutpoint) + } + + outpoint := externalapi.DomainOutpoint{TransactionID: *transaction.TransactionID()} + for i := range transaction.Transaction().Outputs { + outpoint.Index = uint32(i) + + delete(mpus.poolUnspentOutputs, outpoint) + } +} + +func (mpus *mempoolUTXOSet) checkDoubleSpends(transaction *externalapi.DomainTransaction) error { + outpoint := externalapi.DomainOutpoint{TransactionID: *consensushashing.TransactionID(transaction)} + + for i, input := range transaction.Inputs { + outpoint.Index = uint32(i) + + if existingTransaction, exists := mpus.transactionByPreviousOutpoint[input.PreviousOutpoint]; exists { + str := fmt.Sprintf("output %s already spent by transaction %s in the memory pool", + input.PreviousOutpoint, existingTransaction.TransactionID()) + return transactionRuleError(RejectDuplicate, str) + } + } + + return nil +} diff --git a/domain/miningmanager/mempool/model/map_types.go b/domain/miningmanager/mempool/model/map_types.go new file mode 100644 index 0000000..f211c39 --- /dev/null +++ b/domain/miningmanager/mempool/model/map_types.go @@ -0,0 +1,20 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// IDToTransactionMap maps transactionID to a MempoolTransaction +type IDToTransactionMap map[externalapi.DomainTransactionID]*MempoolTransaction + +// IDToTransactionsSliceMap maps transactionID to a slice MempoolTransaction +type IDToTransactionsSliceMap map[externalapi.DomainTransactionID][]*MempoolTransaction + +// OutpointToUTXOEntryMap maps an outpoint to a UTXOEntry +type OutpointToUTXOEntryMap map[externalapi.DomainOutpoint]externalapi.UTXOEntry + +// OutpointToTransactionMap maps an outpoint to a MempoolTransaction +type OutpointToTransactionMap map[externalapi.DomainOutpoint]*MempoolTransaction + +// ScriptPublicKeyStringToDomainTransaction maps an outpoint to a DomainTransaction +type ScriptPublicKeyStringToDomainTransaction map[string]*externalapi.DomainTransaction diff --git a/domain/miningmanager/mempool/model/mempool_transaction.go b/domain/miningmanager/mempool/model/mempool_transaction.go new file mode 100644 index 0000000..65882d9 --- /dev/null +++ b/domain/miningmanager/mempool/model/mempool_transaction.go @@ -0,0 +1,59 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// MempoolTransaction represents a transaction inside the main TransactionPool +type MempoolTransaction struct { + transaction *externalapi.DomainTransaction + parentTransactionsInPool IDToTransactionMap + isHighPriority bool + addedAtDAAScore uint64 +} + +// NewMempoolTransaction constructs a new MempoolTransaction +func NewMempoolTransaction( + transaction *externalapi.DomainTransaction, + parentTransactionsInPool IDToTransactionMap, + isHighPriority bool, + addedAtDAAScore uint64, +) *MempoolTransaction { + return &MempoolTransaction{ + transaction: transaction, + parentTransactionsInPool: parentTransactionsInPool, + isHighPriority: isHighPriority, + addedAtDAAScore: addedAtDAAScore, + } +} + +// TransactionID returns the ID of this MempoolTransaction +func (mt *MempoolTransaction) TransactionID() *externalapi.DomainTransactionID { + return consensushashing.TransactionID(mt.transaction) +} + +// Transaction return the DomainTransaction associated with this MempoolTransaction: +func (mt *MempoolTransaction) Transaction() *externalapi.DomainTransaction { + return mt.transaction +} + +// ParentTransactionsInPool a list of parent transactions that exist in the mempool, indexed by outpoint +func (mt *MempoolTransaction) ParentTransactionsInPool() IDToTransactionMap { + return mt.parentTransactionsInPool +} + +// RemoveParentTransactionInPool deletes a transaction from the parentTransactionsInPool set +func (mt *MempoolTransaction) RemoveParentTransactionInPool(transactionID *externalapi.DomainTransactionID) { + delete(mt.parentTransactionsInPool, *transactionID) +} + +// IsHighPriority returns whether this MempoolTransaction is a high-priority one +func (mt *MempoolTransaction) IsHighPriority() bool { + return mt.isHighPriority +} + +// AddedAtDAAScore returns the virtual DAA score at which this MempoolTransaction was added to the mempool +func (mt *MempoolTransaction) AddedAtDAAScore() uint64 { + return mt.addedAtDAAScore +} diff --git a/domain/miningmanager/mempool/model/ordered_transactions_by_fee_rate.go b/domain/miningmanager/mempool/model/ordered_transactions_by_fee_rate.go new file mode 100644 index 0000000..ef3d342 --- /dev/null +++ b/domain/miningmanager/mempool/model/ordered_transactions_by_fee_rate.go @@ -0,0 +1,91 @@ +package model + +import ( + "sort" + + "github.com/pkg/errors" +) + +// TransactionsOrderedByFeeRate represents a set of MempoolTransactions ordered by their fee / mass rate +type TransactionsOrderedByFeeRate struct { + slice []*MempoolTransaction +} + +// GetByIndex returns the transaction in the given index +func (tobf *TransactionsOrderedByFeeRate) GetByIndex(index int) *MempoolTransaction { + return tobf.slice[index] +} + +// Push inserts a transaction into the set, placing it in the correct place to preserve order +func (tobf *TransactionsOrderedByFeeRate) Push(transaction *MempoolTransaction) error { + index, _, err := tobf.findTransactionIndex(transaction) + if err != nil { + return err + } + + tobf.slice = append(tobf.slice[:index], + append([]*MempoolTransaction{transaction}, tobf.slice[index:]...)...) + + return nil +} + +// ErrTransactionNotFound is returned bt tobf.TransactionsOrderedByFeeRate +var ErrTransactionNotFound = errors.New("Couldn't find transaction in mp.orderedTransactionsByFeeRate") + +// Remove removes the given transaction from the set. +// Returns an error if transaction does not exist in the set, or if the given transaction does not have mass +// and fee filled in. +func (tobf *TransactionsOrderedByFeeRate) Remove(transaction *MempoolTransaction) error { + index, wasFound, err := tobf.findTransactionIndex(transaction) + if err != nil { + return err + } + + if !wasFound { + return errors.Wrapf(ErrTransactionNotFound, + "Couldn't find %s in mp.orderedTransactionsByFeeRate", transaction.TransactionID()) + } + + return tobf.RemoveAtIndex(index) +} + +// RemoveAtIndex removes the transaction at the given index. +// Returns an error in case of out-of-bounds index. +func (tobf *TransactionsOrderedByFeeRate) RemoveAtIndex(index int) error { + if index < 0 || index > len(tobf.slice)-1 { + return errors.Errorf("Index %d is out of bound of this TransactionsOrderedByFeeRate", index) + } + tobf.slice = append(tobf.slice[:index], tobf.slice[index+1:]...) + return nil +} + +// findTransactionIndex finds the given transaction inside the list of transactions ordered by fee rate. +// If the transaction was not found, will return wasFound=false and index=the index at which transaction can be inserted +// while preserving the order. +func (tobf *TransactionsOrderedByFeeRate) findTransactionIndex(transaction *MempoolTransaction) (index int, wasFound bool, err error) { + if transaction.Transaction().Fee == 0 || transaction.Transaction().Mass == 0 { + return 0, false, errors.Errorf("findTransactionIndex expects a transaction with " + + "populated fee and mass") + } + txID := transaction.TransactionID() + txFeeRate := float64(transaction.Transaction().Fee) / float64(transaction.Transaction().Mass) + + index = sort.Search(len(tobf.slice), func(i int) bool { + iElement := tobf.slice[i] + elementFeeRate := float64(iElement.Transaction().Fee) / float64(iElement.Transaction().Mass) + if elementFeeRate > txFeeRate { + return true + } + + if elementFeeRate == txFeeRate && txID.LessOrEqual(iElement.TransactionID()) { + return true + } + + return false + }) + + wasFound = index != len(tobf.slice) && // sort.Search returns len(tobf.slice) if nothing was found + tobf.slice[index].TransactionID().Equal(transaction.TransactionID()) + + return index, wasFound, nil +} diff --git a/domain/miningmanager/mempool/model/orphan_transaction.go b/domain/miningmanager/mempool/model/orphan_transaction.go new file mode 100644 index 0000000..b7878fa --- /dev/null +++ b/domain/miningmanager/mempool/model/orphan_transaction.go @@ -0,0 +1,46 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +// OrphanTransaction represents a transaction in the OrphanPool +type OrphanTransaction struct { + transaction *externalapi.DomainTransaction + isHighPriority bool + addedAtDAAScore uint64 +} + +// NewOrphanTransaction constructs a new OrphanTransaction +func NewOrphanTransaction( + transaction *externalapi.DomainTransaction, + isHighPriority bool, + addedAtDAAScore uint64, +) *OrphanTransaction { + return &OrphanTransaction{ + transaction: transaction, + isHighPriority: isHighPriority, + addedAtDAAScore: addedAtDAAScore, + } +} + +// TransactionID returns the ID of this OrphanTransaction +func (ot *OrphanTransaction) TransactionID() *externalapi.DomainTransactionID { + return consensushashing.TransactionID(ot.transaction) +} + +// Transaction return the DomainTransaction associated with this OrphanTransaction: +func (ot *OrphanTransaction) Transaction() *externalapi.DomainTransaction { + return ot.transaction +} + +// IsHighPriority returns whether this OrphanTransaction is a high-priority one +func (ot *OrphanTransaction) IsHighPriority() bool { + return ot.isHighPriority +} + +// AddedAtDAAScore returns the virtual DAA score at which this OrphanTransaction was added to the mempool +func (ot *OrphanTransaction) AddedAtDAAScore() uint64 { + return ot.addedAtDAAScore +} diff --git a/domain/miningmanager/mempool/model/transaction.go b/domain/miningmanager/mempool/model/transaction.go new file mode 100644 index 0000000..30de2be --- /dev/null +++ b/domain/miningmanager/mempool/model/transaction.go @@ -0,0 +1,9 @@ +package model + +import "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + +// Transaction represents a generic transaction either in the mempool's main TransactionPool or OrphanPool +type Transaction interface { + TransactionID() *externalapi.DomainTransactionID + Transaction() *externalapi.DomainTransaction +} diff --git a/domain/miningmanager/mempool/orphan_pool.go b/domain/miningmanager/mempool/orphan_pool.go new file mode 100644 index 0000000..bd57c07 --- /dev/null +++ b/domain/miningmanager/mempool/orphan_pool.go @@ -0,0 +1,375 @@ +package mempool + +import ( + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" +) + +type idToOrphanMap map[externalapi.DomainTransactionID]*model.OrphanTransaction +type previousOutpointToOrphanMap map[externalapi.DomainOutpoint]*model.OrphanTransaction + +type orphansPool struct { + mempool *mempool + allOrphans idToOrphanMap + orphansByPreviousOutpoint previousOutpointToOrphanMap + lastExpireScan uint64 +} + +func newOrphansPool(mp *mempool) *orphansPool { + return &orphansPool{ + mempool: mp, + allOrphans: idToOrphanMap{}, + orphansByPreviousOutpoint: previousOutpointToOrphanMap{}, + lastExpireScan: 0, + } +} + +func (op *orphansPool) maybeAddOrphan(transaction *externalapi.DomainTransaction, isHighPriority bool) error { + if op.mempool.config.MaximumOrphanTransactionCount == 0 { + return nil + } + + err := op.checkOrphanDuplicate(transaction) + if err != nil { + return err + } + + err = op.checkOrphanMass(transaction) + if err != nil { + return err + } + err = op.checkOrphanDoubleSpend(transaction) + if err != nil { + return err + } + + err = op.addOrphan(transaction, isHighPriority) + if err != nil { + return err + } + + err = op.limitOrphanPoolSize() + if err != nil { + return err + } + + return nil +} + +func (op *orphansPool) limitOrphanPoolSize() error { + for uint64(len(op.allOrphans)) > op.mempool.config.MaximumOrphanTransactionCount { + orphanToRemove := op.randomNonHighPriorityOrphan() + if orphanToRemove == nil { // this means all orphans are HighPriority + log.Warnf( + "Number of high-priority transactions in orphanPool (%d) is higher than maximum allowed (%d)", + len(op.allOrphans), + op.mempool.config.MaximumOrphanTransactionCount) + break + } + + // Don't remove redeemers in the case of a random eviction since the evicted transaction is + // not invalid, therefore it's redeemers are as good as any orphan that just arrived. + err := op.removeOrphan(orphanToRemove.TransactionID(), false) + if err != nil { + return err + } + } + return nil +} + +func (op *orphansPool) checkOrphanMass(transaction *externalapi.DomainTransaction) error { + if transaction.Mass > op.mempool.config.MaximumOrphanTransactionMass { + str := fmt.Sprintf("orphan transaction size of %d bytes is "+ + "larger than max allowed size of %d bytes", + transaction.Mass, op.mempool.config.MaximumOrphanTransactionMass) + return transactionRuleError(RejectBadOrphan, str) + } + return nil +} + +func (op *orphansPool) checkOrphanDuplicate(transaction *externalapi.DomainTransaction) error { + if _, ok := op.allOrphans[*consensushashing.TransactionID(transaction)]; ok { + str := fmt.Sprintf("Orphan transacion %s is already in the orphan pool", + consensushashing.TransactionID(transaction)) + return transactionRuleError(RejectDuplicate, str) + } + return nil +} + +func (op *orphansPool) checkOrphanDoubleSpend(transaction *externalapi.DomainTransaction) error { + for _, input := range transaction.Inputs { + if doubleSpendOrphan, ok := op.orphansByPreviousOutpoint[input.PreviousOutpoint]; ok { + str := fmt.Sprintf("Orphan transacion %s is double spending an input from already existing orphan %s", + consensushashing.TransactionID(transaction), doubleSpendOrphan.TransactionID()) + return transactionRuleError(RejectDuplicate, str) + } + } + + return nil +} + +func (op *orphansPool) addOrphan(transaction *externalapi.DomainTransaction, isHighPriority bool) error { + virtualDAAScore, err := op.mempool.consensusReference.Consensus().GetVirtualDAAScore() + if err != nil { + return err + } + orphanTransaction := model.NewOrphanTransaction(transaction, isHighPriority, virtualDAAScore) + + op.allOrphans[*orphanTransaction.TransactionID()] = orphanTransaction + for _, input := range transaction.Inputs { + op.orphansByPreviousOutpoint[input.PreviousOutpoint] = orphanTransaction + } + + return nil +} + +func (op *orphansPool) processOrphansAfterAcceptedTransaction(acceptedTransaction *externalapi.DomainTransaction) ( + acceptedOrphans []*externalapi.DomainTransaction, err error) { + + acceptedOrphans = []*externalapi.DomainTransaction{} + queue := []*externalapi.DomainTransaction{acceptedTransaction} + + for len(queue) > 0 { + var current *externalapi.DomainTransaction + current, queue = queue[0], queue[1:] + + currentTransactionID := consensushashing.TransactionID(current) + outpoint := externalapi.DomainOutpoint{TransactionID: *currentTransactionID} + for i, output := range current.Outputs { + outpoint.Index = uint32(i) + orphan, ok := op.orphansByPreviousOutpoint[outpoint] + if !ok { + continue + } + for _, input := range orphan.Transaction().Inputs { + if input.PreviousOutpoint.Equal(&outpoint) && input.UTXOEntry == nil { + input.UTXOEntry = utxo.NewUTXOEntry(output.Value, output.ScriptPublicKey, false, + constants.UnacceptedDAAScore) + break + } + } + if countUnfilledInputs(orphan) == 0 { + err := op.unorphanTransaction(orphan) + if err != nil { + if errors.As(err, &RuleError{}) { + log.Infof("Failed to unorphan transaction %s due to rule error: %s", + currentTransactionID, err) + continue + } + return nil, err + } + acceptedOrphans = append(acceptedOrphans, orphan.Transaction().Clone()) //these pointers leave the mempool, hence the clone + } + } + } + + return acceptedOrphans, nil +} + +func countUnfilledInputs(orphan *model.OrphanTransaction) int { + unfilledInputs := 0 + for _, input := range orphan.Transaction().Inputs { + if input.UTXOEntry == nil { + unfilledInputs++ + } + } + return unfilledInputs +} + +func (op *orphansPool) unorphanTransaction(transaction *model.OrphanTransaction) error { + err := op.removeOrphan(transaction.TransactionID(), false) + if err != nil { + return err + } + + err = op.mempool.consensusReference.Consensus().ValidateTransactionAndPopulateWithConsensusData(transaction.Transaction()) + if err != nil { + if errors.Is(err, ruleerrors.ErrImmatureSpend) { + return transactionRuleError(RejectImmatureSpend, "one of the transaction inputs spends an immature UTXO") + } + if errors.As(err, &ruleerrors.RuleError{}) { + return newRuleError(err) + } + return err + } + + err = op.mempool.validateTransactionInContext(transaction.Transaction()) + if err != nil { + return err + } + + virtualDAAScore, err := op.mempool.consensusReference.Consensus().GetVirtualDAAScore() + if err != nil { + return err + } + mempoolTransaction := model.NewMempoolTransaction( + transaction.Transaction(), + op.mempool.transactionsPool.getParentTransactionsInPool(transaction.Transaction()), + false, + virtualDAAScore, + ) + err = op.mempool.transactionsPool.addMempoolTransaction(mempoolTransaction) + if err != nil { + return err + } + + return nil +} + +func (op *orphansPool) removeOrphan(orphanTransactionID *externalapi.DomainTransactionID, removeRedeemers bool) error { + orphanTransaction, ok := op.allOrphans[*orphanTransactionID] + if !ok { + return nil + } + + delete(op.allOrphans, *orphanTransactionID) + + for i, input := range orphanTransaction.Transaction().Inputs { + if _, ok := op.orphansByPreviousOutpoint[input.PreviousOutpoint]; !ok { + return errors.Errorf("Input No. %d of %s (%s) doesn't exist in orphansByPreviousOutpoint", + i, orphanTransactionID, input.PreviousOutpoint) + } + delete(op.orphansByPreviousOutpoint, input.PreviousOutpoint) + } + + if removeRedeemers { + err := op.removeRedeemersOf(orphanTransaction) + if err != nil { + return err + } + } + + return nil +} + +func (op *orphansPool) removeRedeemersOf(transaction model.Transaction) error { + outpoint := externalapi.DomainOutpoint{TransactionID: *transaction.TransactionID()} + for i := range transaction.Transaction().Outputs { + outpoint.Index = uint32(i) + if orphan, ok := op.orphansByPreviousOutpoint[outpoint]; ok { + // Recursive call is bound by size of orphan pool (which is very small) + err := op.removeOrphan(orphan.TransactionID(), true) + if err != nil { + return err + } + } + } + return nil +} + +func (op *orphansPool) expireOrphanTransactions() error { + virtualDAAScore, err := op.mempool.consensusReference.Consensus().GetVirtualDAAScore() + if err != nil { + return err + } + + if virtualDAAScore-op.lastExpireScan < op.mempool.config.OrphanExpireScanIntervalDAAScore { + return nil + } + + for _, orphanTransaction := range op.allOrphans { + // Never expire high priority transactions + if orphanTransaction.IsHighPriority() { + continue + } + + // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore + if virtualDAAScore-orphanTransaction.AddedAtDAAScore() > op.mempool.config.OrphanExpireIntervalDAAScore { + err = op.removeOrphan(orphanTransaction.TransactionID(), false) + if err != nil { + return err + } + } + } + + op.lastExpireScan = virtualDAAScore + return nil +} + +func (op *orphansPool) updateOrphansAfterTransactionRemoved( + removedTransaction *model.MempoolTransaction, removeRedeemers bool) error { + + if removeRedeemers { + return op.removeRedeemersOf(removedTransaction) + } + + outpoint := externalapi.DomainOutpoint{TransactionID: *removedTransaction.TransactionID()} + for i := range removedTransaction.Transaction().Outputs { + outpoint.Index = uint32(i) + if orphan, ok := op.orphansByPreviousOutpoint[outpoint]; ok { + for _, input := range orphan.Transaction().Inputs { + if input.PreviousOutpoint.TransactionID.Equal(removedTransaction.TransactionID()) { + input.UTXOEntry = nil + } + } + } + } + + return nil +} + +func (op *orphansPool) randomNonHighPriorityOrphan() *model.OrphanTransaction { + for _, orphan := range op.allOrphans { + if !orphan.IsHighPriority() { + return orphan + } + } + + return nil +} + +func (op *orphansPool) getOrphanTransaction(transactionID *externalapi.DomainTransactionID) (*externalapi.DomainTransaction, bool) { + if orphanTransaction, ok := op.allOrphans[*transactionID]; ok { + return orphanTransaction.Transaction().Clone(), true //this pointer leaves the mempool, hence we clone. + } + return nil, false +} + +func (op *orphansPool) getOrphanTransactionsByAddresses() ( + sending model.ScriptPublicKeyStringToDomainTransaction, + receiving model.ScriptPublicKeyStringToDomainTransaction, + err error) { + sending = make(model.ScriptPublicKeyStringToDomainTransaction) + receiving = make(model.ScriptPublicKeyStringToDomainTransaction, op.orphanTransactionCount()) + var transaction *externalapi.DomainTransaction + for _, mempoolTransaction := range op.allOrphans { + transaction = mempoolTransaction.Transaction().Clone() //these pointers leave the mempool, hence we clone. + for _, input := range transaction.Inputs { + if input.UTXOEntry == nil { //this is not a bug, but a valid state of orphan transactions with missing outpoints. + continue + } + + sending[input.UTXOEntry.ScriptPublicKey().String()] = transaction + } + for _, output := range transaction.Outputs { + receiving[output.ScriptPublicKey.String()] = transaction + + } + } + return sending, receiving, nil +} + +func (op *orphansPool) getAllOrphanTransactions() []*externalapi.DomainTransaction { + allOrphanTransactions := make([]*externalapi.DomainTransaction, len(op.allOrphans)) + i := 0 + for _, mempoolTransaction := range op.allOrphans { + allOrphanTransactions[i] = mempoolTransaction.Transaction().Clone() //these pointers leave the mempool, hence we clone. + i++ + } + return allOrphanTransactions +} + +func (op *orphansPool) orphanTransactionCount() int { + return len(op.allOrphans) +} diff --git a/domain/miningmanager/mempool/remove_transaction.go b/domain/miningmanager/mempool/remove_transaction.go new file mode 100644 index 0000000..d935423 --- /dev/null +++ b/domain/miningmanager/mempool/remove_transaction.go @@ -0,0 +1,59 @@ +package mempool + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" +) + +func (mp *mempool) removeTransaction(transactionID *externalapi.DomainTransactionID, removeRedeemers bool) error { + if _, ok := mp.orphansPool.allOrphans[*transactionID]; ok { + return mp.orphansPool.removeOrphan(transactionID, true) + } + + mempoolTransaction, ok := mp.transactionsPool.allTransactions[*transactionID] + if !ok { + return nil + } + + transactionsToRemove := []*model.MempoolTransaction{mempoolTransaction} + redeemers := mp.transactionsPool.getRedeemers(mempoolTransaction) + if removeRedeemers { + transactionsToRemove = append(transactionsToRemove, redeemers...) + } else { + for _, redeemer := range redeemers { + redeemer.RemoveParentTransactionInPool(transactionID) + } + } + + for _, transactionToRemove := range transactionsToRemove { + err := mp.removeTransactionFromSets(transactionToRemove, removeRedeemers) + if err != nil { + return err + } + } + + if removeRedeemers { + err := mp.orphansPool.removeRedeemersOf(mempoolTransaction) + if err != nil { + return err + } + } + + return nil +} + +func (mp *mempool) removeTransactionFromSets(mempoolTransaction *model.MempoolTransaction, removeRedeemers bool) error { + mp.mempoolUTXOSet.removeTransaction(mempoolTransaction) + + err := mp.transactionsPool.removeTransaction(mempoolTransaction) + if err != nil { + return err + } + + err = mp.orphansPool.updateOrphansAfterTransactionRemoved(mempoolTransaction, removeRedeemers) + if err != nil { + return err + } + + return nil +} diff --git a/domain/miningmanager/mempool/revalidate_high_priority_transactions.go b/domain/miningmanager/mempool/revalidate_high_priority_transactions.go new file mode 100644 index 0000000..ad9db3e --- /dev/null +++ b/domain/miningmanager/mempool/revalidate_high_priority_transactions.go @@ -0,0 +1,115 @@ +package mempool + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func (mp *mempool) revalidateHighPriorityTransactions() ([]*externalapi.DomainTransaction, error) { + type txNode struct { + children map[externalapi.DomainTransactionID]struct{} + nonVisitedParents int + tx *model.MempoolTransaction + visited bool + } + + onEnd := logger.LogAndMeasureExecutionTime(log, "revalidateHighPriorityTransactions") + defer onEnd() + + // We revalidate transactions in topological order in case there are dependencies between them + + // Naturally transactions point to their dependencies, but since we want to start processing the dependencies + // first, we build the opposite DAG. We initially fill `queue` with transactions with no dependencies. + txDAG := make(map[externalapi.DomainTransactionID]*txNode) + + maybeAddNode := func(txID externalapi.DomainTransactionID) *txNode { + if node, ok := txDAG[txID]; ok { + return node + } + + node := &txNode{ + children: make(map[externalapi.DomainTransactionID]struct{}), + nonVisitedParents: 0, + tx: mp.transactionsPool.highPriorityTransactions[txID], + } + txDAG[txID] = node + return node + } + + queue := make([]*txNode, 0, len(mp.transactionsPool.highPriorityTransactions)) + for id, transaction := range mp.transactionsPool.highPriorityTransactions { + node := maybeAddNode(id) + + parents := make(map[externalapi.DomainTransactionID]struct{}) + for _, input := range transaction.Transaction().Inputs { + if _, ok := mp.transactionsPool.highPriorityTransactions[input.PreviousOutpoint.TransactionID]; !ok { + continue + } + + parents[input.PreviousOutpoint.TransactionID] = struct{}{} // To avoid duplicate parents, we first add it to a set and then count it + maybeAddNode(input.PreviousOutpoint.TransactionID).children[id] = struct{}{} + } + node.nonVisitedParents = len(parents) + + if node.nonVisitedParents == 0 { + queue = append(queue, node) + } + } + + validTransactions := []*externalapi.DomainTransaction{} + // Now we iterate the DAG in topological order using BFS + for len(queue) > 0 { + var node *txNode + node, queue = queue[0], queue[1:] + + if node.visited { + continue + } + node.visited = true + + transaction := node.tx + isValid, err := mp.revalidateTransaction(transaction) + if err != nil { + return nil, err + } + for child := range node.children { + childNode := txDAG[child] + childNode.nonVisitedParents-- + if childNode.nonVisitedParents == 0 { + queue = append(queue, txDAG[child]) + } + } + + if isValid { + validTransactions = append(validTransactions, transaction.Transaction().Clone()) + } + } + + return validTransactions, nil +} + +func (mp *mempool) revalidateTransaction(transaction *model.MempoolTransaction) (isValid bool, err error) { + clearInputs(transaction) + + _, missingParents, err := mp.fillInputsAndGetMissingParents(transaction.Transaction()) + if err != nil { + return false, err + } + if len(missingParents) > 0 { + log.Debugf("Removing transaction %s, it failed revalidation", transaction.TransactionID()) + err := mp.removeTransaction(transaction.TransactionID(), false) + if err != nil { + return false, err + } + return false, nil + } + + return true, nil +} + +func clearInputs(transaction *model.MempoolTransaction) { + for _, input := range transaction.Transaction().Inputs { + input.UTXOEntry = nil + } +} diff --git a/domain/miningmanager/mempool/transactions_pool.go b/domain/miningmanager/mempool/transactions_pool.go new file mode 100644 index 0000000..c423ed2 --- /dev/null +++ b/domain/miningmanager/mempool/transactions_pool.go @@ -0,0 +1,252 @@ +package mempool + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/miningmanager/mempool/model" +) + +type transactionsPool struct { + mempool *mempool + allTransactions model.IDToTransactionMap + highPriorityTransactions model.IDToTransactionMap + chainedTransactionsByParentID model.IDToTransactionsSliceMap + transactionsOrderedByFeeRate model.TransactionsOrderedByFeeRate + lastExpireScanDAAScore uint64 + lastExpireScanTime time.Time +} + +func newTransactionsPool(mp *mempool) *transactionsPool { + return &transactionsPool{ + mempool: mp, + allTransactions: model.IDToTransactionMap{}, + highPriorityTransactions: model.IDToTransactionMap{}, + chainedTransactionsByParentID: model.IDToTransactionsSliceMap{}, + transactionsOrderedByFeeRate: model.TransactionsOrderedByFeeRate{}, + lastExpireScanDAAScore: 0, + lastExpireScanTime: time.Now(), + } +} + +func (tp *transactionsPool) addTransaction(transaction *externalapi.DomainTransaction, + parentTransactionsInPool model.IDToTransactionMap, isHighPriority bool) (*model.MempoolTransaction, error) { + + virtualDAAScore, err := tp.mempool.consensusReference.Consensus().GetVirtualDAAScore() + if err != nil { + return nil, err + } + + mempoolTransaction := model.NewMempoolTransaction( + transaction, parentTransactionsInPool, isHighPriority, virtualDAAScore) + + err = tp.addMempoolTransaction(mempoolTransaction) + if err != nil { + return nil, err + } + + return mempoolTransaction, nil +} + +func (tp *transactionsPool) addMempoolTransaction(transaction *model.MempoolTransaction) error { + tp.allTransactions[*transaction.TransactionID()] = transaction + + for _, parentTransactionInPool := range transaction.ParentTransactionsInPool() { + parentTransactionID := *parentTransactionInPool.TransactionID() + if tp.chainedTransactionsByParentID[parentTransactionID] == nil { + tp.chainedTransactionsByParentID[parentTransactionID] = []*model.MempoolTransaction{} + } + tp.chainedTransactionsByParentID[parentTransactionID] = + append(tp.chainedTransactionsByParentID[parentTransactionID], transaction) + } + + tp.mempool.mempoolUTXOSet.addTransaction(transaction) + + err := tp.transactionsOrderedByFeeRate.Push(transaction) + if err != nil { + return err + } + + if transaction.IsHighPriority() { + tp.highPriorityTransactions[*transaction.TransactionID()] = transaction + } + + return nil +} + +func (tp *transactionsPool) removeTransaction(transaction *model.MempoolTransaction) error { + delete(tp.allTransactions, *transaction.TransactionID()) + + err := tp.transactionsOrderedByFeeRate.Remove(transaction) + if err != nil { + if errors.Is(err, model.ErrTransactionNotFound) { + log.Errorf("Transaction %s not found in tp.transactionsOrderedByFeeRate. This should never happen but sometime does", + transaction.TransactionID()) + } else { + return err + } + } + + delete(tp.highPriorityTransactions, *transaction.TransactionID()) + + delete(tp.chainedTransactionsByParentID, *transaction.TransactionID()) + + return nil +} + +func (tp *transactionsPool) expireOldTransactions() error { + virtualDAAScore, err := tp.mempool.consensusReference.Consensus().GetVirtualDAAScore() + if err != nil { + return err + } + + if virtualDAAScore-tp.lastExpireScanDAAScore < tp.mempool.config.TransactionExpireScanIntervalDAAScore || + time.Since(tp.lastExpireScanTime).Seconds() < float64(tp.mempool.config.TransactionExpireScanIntervalSeconds) { + return nil + } + + for _, mempoolTransaction := range tp.allTransactions { + // Never expire high priority transactions + if mempoolTransaction.IsHighPriority() { + continue + } + + // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore + daaScoreSinceAdded := virtualDAAScore - mempoolTransaction.AddedAtDAAScore() + if daaScoreSinceAdded > tp.mempool.config.TransactionExpireIntervalDAAScore { + log.Debugf("Removing transaction %s, because it expired. DAAScore moved by %d, expire interval: %d", + mempoolTransaction.TransactionID(), daaScoreSinceAdded, tp.mempool.config.TransactionExpireIntervalDAAScore) + err = tp.mempool.removeTransaction(mempoolTransaction.TransactionID(), true) + if err != nil { + return err + } + } + } + + tp.lastExpireScanDAAScore = virtualDAAScore + tp.lastExpireScanTime = time.Now() + return nil +} + +func (tp *transactionsPool) allReadyTransactions() []*externalapi.DomainTransaction { + result := []*externalapi.DomainTransaction{} + + for _, mempoolTransaction := range tp.allTransactions { + if len(mempoolTransaction.ParentTransactionsInPool()) == 0 { + result = append(result, mempoolTransaction.Transaction().Clone()) //this pointer leaves the mempool, and gets its utxo set to nil, hence we clone. + } + } + + return result +} + +func (tp *transactionsPool) getParentTransactionsInPool( + transaction *externalapi.DomainTransaction) model.IDToTransactionMap { + + parentsTransactionsInPool := model.IDToTransactionMap{} + + for _, input := range transaction.Inputs { + if transaction, ok := tp.allTransactions[input.PreviousOutpoint.TransactionID]; ok { + parentsTransactionsInPool[*transaction.TransactionID()] = transaction + } + } + + return parentsTransactionsInPool +} + +func (tp *transactionsPool) getRedeemers(transaction *model.MempoolTransaction) []*model.MempoolTransaction { + stack := []*model.MempoolTransaction{transaction} + redeemers := []*model.MempoolTransaction{} + for len(stack) > 0 { + var current *model.MempoolTransaction + last := len(stack) - 1 + current, stack = stack[last], stack[:last] + + for _, redeemerTransaction := range tp.chainedTransactionsByParentID[*current.TransactionID()] { + stack = append(stack, redeemerTransaction) + redeemers = append(redeemers, redeemerTransaction) + } + } + return redeemers +} + +func (tp *transactionsPool) limitTransactionCount() error { + currentIndex := 0 + + for uint64(len(tp.allTransactions)) > tp.mempool.config.MaximumTransactionCount { + var transactionToRemove *model.MempoolTransaction + for { + transactionToRemove = tp.transactionsOrderedByFeeRate.GetByIndex(currentIndex) + if !transactionToRemove.IsHighPriority() { + break + } + currentIndex++ + if currentIndex >= len(tp.allTransactions) { + log.Warnf( + "Number of high-priority transactions in mempool (%d) is higher than maximum allowed (%d)", + len(tp.allTransactions), tp.mempool.config.MaximumTransactionCount) + return nil + } + } + + log.Debugf("Removing transaction %s, because mempoolTransaction count (%d) exceeded the limit (%d)", + transactionToRemove.TransactionID(), len(tp.allTransactions), tp.mempool.config.MaximumTransactionCount) + err := tp.mempool.removeTransaction(transactionToRemove.TransactionID(), true) + if err != nil { + return err + } + if currentIndex >= len(tp.allTransactions) { + break + } + } + return nil +} + +func (tp *transactionsPool) getTransaction(transactionID *externalapi.DomainTransactionID, clone bool) (*externalapi.DomainTransaction, bool) { + if mempoolTransaction, ok := tp.allTransactions[*transactionID]; ok { + if clone { + return mempoolTransaction.Transaction().Clone(), true //this pointer leaves the mempool, hence we clone. + } + return mempoolTransaction.Transaction(), true + } + return nil, false +} + +func (tp *transactionsPool) getTransactionsByAddresses() ( + sending model.ScriptPublicKeyStringToDomainTransaction, + receiving model.ScriptPublicKeyStringToDomainTransaction, + err error) { + sending = make(model.ScriptPublicKeyStringToDomainTransaction, tp.transactionCount()) + receiving = make(model.ScriptPublicKeyStringToDomainTransaction, tp.transactionCount()) + var transaction *externalapi.DomainTransaction + for _, mempoolTransaction := range tp.allTransactions { + transaction = mempoolTransaction.Transaction().Clone() //this pointer leaves the mempool, hence we clone. + for _, input := range transaction.Inputs { + if input.UTXOEntry == nil { + return nil, nil, errors.Errorf("Mempool transaction %s is missing an UTXOEntry. This should be fixed, and not happen", consensushashing.TransactionID(transaction).String()) + } + sending[input.UTXOEntry.ScriptPublicKey().String()] = transaction + } + for _, output := range transaction.Outputs { + receiving[output.ScriptPublicKey.String()] = transaction + } + } + return sending, receiving, nil +} + +func (tp *transactionsPool) getAllTransactions() []*externalapi.DomainTransaction { + allTransactions := make([]*externalapi.DomainTransaction, len(tp.allTransactions)) + i := 0 + for _, mempoolTransaction := range tp.allTransactions { + allTransactions[i] = mempoolTransaction.Transaction().Clone() //this pointer leaves the mempool, hence we clone. + i++ + } + return allTransactions +} + +func (tp *transactionsPool) transactionCount() int { + return len(tp.allTransactions) +} diff --git a/domain/miningmanager/mempool/validate_and_insert_transaction.go b/domain/miningmanager/mempool/validate_and_insert_transaction.go new file mode 100644 index 0000000..28bc571 --- /dev/null +++ b/domain/miningmanager/mempool/validate_and_insert_transaction.go @@ -0,0 +1,65 @@ +package mempool + +import ( + "fmt" + + "github.com/spectre-project/spectred/infrastructure/logger" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +func (mp *mempool) validateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, + allowOrphan bool) (acceptedTransactions []*externalapi.DomainTransaction, err error) { + + onEnd := logger.LogAndMeasureExecutionTime(log, + fmt.Sprintf("validateAndInsertTransaction %s", consensushashing.TransactionID(transaction))) + defer onEnd() + + // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. + mp.consensusReference.Consensus().PopulateMass(transaction) + + err = mp.validateTransactionPreUTXOEntry(transaction) + if err != nil { + return nil, err + } + + parentsInPool, missingOutpoints, err := mp.fillInputsAndGetMissingParents(transaction) + if err != nil { + return nil, err + } + + if len(missingOutpoints) > 0 { + if !allowOrphan { + str := fmt.Sprintf("Transaction %s is an orphan, where allowOrphan = false", + consensushashing.TransactionID(transaction)) + return nil, transactionRuleError(RejectBadOrphan, str) + } + + return nil, mp.orphansPool.maybeAddOrphan(transaction, isHighPriority) + } + + err = mp.validateTransactionInContext(transaction) + if err != nil { + return nil, err + } + + mempoolTransaction, err := mp.transactionsPool.addTransaction(transaction, parentsInPool, isHighPriority) + if err != nil { + return nil, err + } + + acceptedOrphans, err := mp.orphansPool.processOrphansAfterAcceptedTransaction(mempoolTransaction.Transaction()) + if err != nil { + return nil, err + } + + acceptedTransactions = append([]*externalapi.DomainTransaction{transaction.Clone()}, acceptedOrphans...) //these pointer leave the mempool, hence we clone. + + err = mp.transactionsPool.limitTransactionCount() + if err != nil { + return nil, err + } + + return acceptedTransactions, nil +} diff --git a/domain/miningmanager/mempool/validate_transaction.go b/domain/miningmanager/mempool/validate_transaction.go new file mode 100644 index 0000000..867c172 --- /dev/null +++ b/domain/miningmanager/mempool/validate_transaction.go @@ -0,0 +1,80 @@ +package mempool + +import ( + "fmt" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +func (mp *mempool) validateTransactionPreUTXOEntry(transaction *externalapi.DomainTransaction) error { + err := mp.validateTransactionInIsolation(transaction) + if err != nil { + return err + } + + if err := mp.mempoolUTXOSet.checkDoubleSpends(transaction); err != nil { + return err + } + return nil +} + +func (mp *mempool) validateTransactionInIsolation(transaction *externalapi.DomainTransaction) error { + transactionID := consensushashing.TransactionID(transaction) + if _, ok := mp.transactionsPool.allTransactions[*transactionID]; ok { + return transactionRuleError(RejectDuplicate, + fmt.Sprintf("transaction %s is already in the mempool", transactionID)) + } + + if !mp.config.AcceptNonStandard { + if err := mp.checkTransactionStandardInIsolation(transaction); err != nil { + // Attempt to extract a reject code from the error so + // it can be retained. When not possible, fall back to + // a non standard error. + rejectCode, found := extractRejectCode(err) + if !found { + rejectCode = RejectNonstandard + } + str := fmt.Sprintf("transaction %s is not standard: %s", transactionID, err) + return transactionRuleError(rejectCode, str) + } + } + + return nil +} + +func (mp *mempool) validateTransactionInContext(transaction *externalapi.DomainTransaction) error { + hasCoinbaseInput := false + for _, input := range transaction.Inputs { + if input.UTXOEntry.IsCoinbase() { + hasCoinbaseInput = true + break + } + } + + numExtraOuts := len(transaction.Outputs) - len(transaction.Inputs) + if !hasCoinbaseInput && numExtraOuts > 2 && transaction.Fee < uint64(numExtraOuts)*constants.SompiPerSpectre { + log.Warnf("Rejected spam tx %s from mempool (%d outputs)", consensushashing.TransactionID(transaction), len(transaction.Outputs)) + return transactionRuleError(RejectSpamTx, fmt.Sprintf("Rejected spam tx %s from mempool", consensushashing.TransactionID(transaction))) + } + + if !mp.config.AcceptNonStandard { + err := mp.checkTransactionStandardInContext(transaction) + if err != nil { + // Attempt to extract a reject code from the error so + // it can be retained. When not possible, fall back to + // a non standard error. + rejectCode, found := extractRejectCode(err) + if !found { + rejectCode = RejectNonstandard + } + str := fmt.Sprintf("transaction inputs %s are not standard: %s", + consensushashing.TransactionID(transaction), err) + return transactionRuleError(rejectCode, str) + } + } + + return nil +} diff --git a/domain/miningmanager/miningmanager.go b/domain/miningmanager/miningmanager.go new file mode 100644 index 0000000..5cbfa8a --- /dev/null +++ b/domain/miningmanager/miningmanager.go @@ -0,0 +1,156 @@ +package miningmanager + +import ( + "sync" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensusreference" + miningmanagermodel "github.com/spectre-project/spectred/domain/miningmanager/model" +) + +// MiningManager creates block templates for mining as well as maintaining +// known transactions that have no yet been added to any block +type MiningManager interface { + GetBlockTemplate(coinbaseData *externalapi.DomainCoinbaseData) (block *externalapi.DomainBlock, isNearlySynced bool, err error) + ClearBlockTemplate() + GetBlockTemplateBuilder() miningmanagermodel.BlockTemplateBuilder + GetTransaction(transactionID *externalapi.DomainTransactionID, includeTransactionPool bool, includeOrphanPool bool) ( + transactionPoolTransaction *externalapi.DomainTransaction, + isOrphan bool, + found bool) + GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) ( + sendingInTransactionPool map[string]*externalapi.DomainTransaction, + receivingInTransactionPool map[string]*externalapi.DomainTransaction, + sendingInOrphanPool map[string]*externalapi.DomainTransaction, + receivingInOrphanPool map[string]*externalapi.DomainTransaction, + err error) + AllTransactions(includeTransactionPool bool, includeOrphanPool bool) ( + transactionPoolTransactions []*externalapi.DomainTransaction, + orphanPoolTransactions []*externalapi.DomainTransaction) + TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int + HandleNewBlockTransactions(txs []*externalapi.DomainTransaction) ([]*externalapi.DomainTransaction, error) + ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, allowOrphan bool) ( + acceptedTransactions []*externalapi.DomainTransaction, err error) + RevalidateHighPriorityTransactions() (validTransactions []*externalapi.DomainTransaction, err error) +} + +type miningManager struct { + consensusReference consensusreference.ConsensusReference + mempool miningmanagermodel.Mempool + blockTemplateBuilder miningmanagermodel.BlockTemplateBuilder + cachedBlockTemplate *externalapi.DomainBlockTemplate + cachingTime time.Time + cacheLock *sync.Mutex +} + +// GetBlockTemplate obtains a block template for a miner to consume +func (mm *miningManager) GetBlockTemplate(coinbaseData *externalapi.DomainCoinbaseData) (block *externalapi.DomainBlock, isNearlySynced bool, err error) { + mm.cacheLock.Lock() + immutableCachedTemplate := mm.getImmutableCachedTemplate() + // We first try and use a cached template + if immutableCachedTemplate != nil { + mm.cacheLock.Unlock() + if immutableCachedTemplate.CoinbaseData.Equal(coinbaseData) { + return immutableCachedTemplate.Block, immutableCachedTemplate.IsNearlySynced, nil + } + // Coinbase data is new -- make the minimum changes required + // Note we first clone the block template since it is modified by the call + modifiedBlockTemplate, err := mm.blockTemplateBuilder.ModifyBlockTemplate(coinbaseData, immutableCachedTemplate.Clone()) + if err != nil { + return nil, false, err + } + + // No point in updating cache since we have no reason to believe this coinbase will be used more + // than the previous one, and we want to maintain the original template caching time + return modifiedBlockTemplate.Block, modifiedBlockTemplate.IsNearlySynced, nil + } + defer mm.cacheLock.Unlock() + // No relevant cache, build a template + blockTemplate, err := mm.blockTemplateBuilder.BuildBlockTemplate(coinbaseData) + if err != nil { + return nil, false, err + } + // Cache the built template + mm.setImmutableCachedTemplate(blockTemplate) + return blockTemplate.Block, blockTemplate.IsNearlySynced, nil +} + +func (mm *miningManager) ClearBlockTemplate() { + mm.cacheLock.Lock() + mm.cachingTime = time.Time{} + mm.cachedBlockTemplate = nil + mm.cacheLock.Unlock() +} + +func (mm *miningManager) getImmutableCachedTemplate() *externalapi.DomainBlockTemplate { + if time.Since(mm.cachingTime) > time.Second { + // No point in cache optimizations if queries are more than a second apart -- we prefer rechecking the mempool. + // Full explanation: On the one hand this is a sub-millisecond optimization, so there is no harm in doing the full block building + // every ~1 second. Additionally, we would like to refresh the mempool access even if virtual info was + // unmodified for a while. All in all, caching for max 1 second is a good compromise. + mm.cachedBlockTemplate = nil + } + return mm.cachedBlockTemplate +} + +func (mm *miningManager) setImmutableCachedTemplate(blockTemplate *externalapi.DomainBlockTemplate) { + mm.cachingTime = time.Now() + mm.cachedBlockTemplate = blockTemplate +} + +func (mm *miningManager) GetBlockTemplateBuilder() miningmanagermodel.BlockTemplateBuilder { + return mm.blockTemplateBuilder +} + +// HandleNewBlockTransactions handles the transactions for a new block that was just added to the DAG +func (mm *miningManager) HandleNewBlockTransactions(txs []*externalapi.DomainTransaction) ([]*externalapi.DomainTransaction, error) { + return mm.mempool.HandleNewBlockTransactions(txs) +} + +// ValidateAndInsertTransaction validates the given transaction, and +// adds it to the set of known transactions that have not yet been +// added to any block +func (mm *miningManager) ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, + isHighPriority bool, allowOrphan bool) (acceptedTransactions []*externalapi.DomainTransaction, err error) { + + return mm.mempool.ValidateAndInsertTransaction(transaction, isHighPriority, allowOrphan) +} + +func (mm *miningManager) GetTransaction( + transactionID *externalapi.DomainTransactionID, + includeTransactionPool bool, + includeOrphanPool bool) ( + transactionPoolTransaction *externalapi.DomainTransaction, + isOrphan bool, + found bool) { + + return mm.mempool.GetTransaction(transactionID, includeTransactionPool, includeOrphanPool) +} + +func (mm *miningManager) AllTransactions(includeTransactionPool bool, includeOrphanPool bool) ( + transactionPoolTransactions []*externalapi.DomainTransaction, + orphanPoolTransactions []*externalapi.DomainTransaction) { + + return mm.mempool.AllTransactions(includeTransactionPool, includeOrphanPool) +} + +func (mm *miningManager) GetTransactionsByAddresses(includeTransactionPool bool, includeOrphanPool bool) ( + sendingInTransactionPool map[string]*externalapi.DomainTransaction, + receivingInTransactionPool map[string]*externalapi.DomainTransaction, + sendingInOrphanPool map[string]*externalapi.DomainTransaction, + receivingInOrphanPool map[string]*externalapi.DomainTransaction, + err error) { + + return mm.mempool.GetTransactionsByAddresses(includeTransactionPool, includeOrphanPool) +} + +func (mm *miningManager) TransactionCount(includeTransactionPool bool, includeOrphanPool bool) int { + return mm.mempool.TransactionCount(includeTransactionPool, includeOrphanPool) +} + +func (mm *miningManager) RevalidateHighPriorityTransactions() ( + validTransactions []*externalapi.DomainTransaction, err error) { + + return mm.mempool.RevalidateHighPriorityTransactions() +} diff --git a/domain/miningmanager/miningmanager_test.go b/domain/miningmanager/miningmanager_test.go new file mode 100644 index 0000000..bd104c9 --- /dev/null +++ b/domain/miningmanager/miningmanager_test.go @@ -0,0 +1,1056 @@ +package miningmanager_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/spectre-project/spectred/cmd/spectrewallet/libspectrewallet" + "github.com/spectre-project/spectred/domain/consensusreference" + "github.com/spectre-project/spectred/domain/miningmanager/model" + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/version" + + "github.com/spectre-project/spectred/domain/miningmanager/mempool" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/miningmanager" +) + +// TestValidateAndInsertTransaction verifies that valid transactions were successfully inserted into the mempool. +func TestValidateAndInsertTransaction(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertTransaction") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + transactionsToInsert := make([]*externalapi.DomainTransaction, 10) + for i := range transactionsToInsert { + transactionsToInsert[i] = createTransactionWithUTXOEntry(t, i, 0) + _, err = miningManager.ValidateAndInsertTransaction(transactionsToInsert[i], false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + } + // The UTXOEntry was filled manually for those transactions, so the transactions won't be considered orphans. + // Therefore, all the transactions expected to be contained in the mempool. + transactionsFromMempool, _ := miningManager.AllTransactions(true, false) + if len(transactionsToInsert) != len(transactionsFromMempool) { + t.Fatalf("Wrong number of transactions in mempool: expected: %d, got: %d", len(transactionsToInsert), len(transactionsFromMempool)) + } + for _, transactionToInsert := range transactionsToInsert { + if !contains(transactionToInsert, transactionsFromMempool) { + t.Fatalf("Missing transaction %s in the mempool", consensushashing.TransactionID(transactionToInsert)) + } + } + + // The parent's transaction was inserted by consensus(AddBlock), and we want to verify that + // the transaction is not considered an orphan and inserted into the mempool. + transactionNotAnOrphan, err := createChildAndParentTxsAndAddParentToConsensus(tc) + if err != nil { + t.Fatalf("Error in createParentAndChildrenTransaction: %v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(transactionNotAnOrphan, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + transactionsFromMempool, _ = miningManager.AllTransactions(true, false) + if !contains(transactionNotAnOrphan, transactionsFromMempool) { + t.Fatalf("Missing transaction %s in the mempool", consensushashing.TransactionID(transactionNotAnOrphan)) + } + }) +} + +func TestImmatureSpend(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertTransaction") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + tx := createTransactionWithUTXOEntry(t, 0, consensusConfig.GenesisBlock.Header.DAAScore()) + _, err = miningManager.ValidateAndInsertTransaction(tx, false, false) + txRuleError := &mempool.TxRuleError{} + if !errors.As(err, txRuleError) || txRuleError.RejectCode != mempool.RejectImmatureSpend { + t.Fatalf("Unexpected error %+v", err) + } + transactionsFromMempool, _ := miningManager.AllTransactions(true, false) + if contains(tx, transactionsFromMempool) { + t.Fatalf("Mempool contains a transaction with immature coinbase") + } + }) +} + +// TestInsertDoubleTransactionsToMempool verifies that an attempt to insert a transaction +// more than once into the mempool will result in raising an appropriate error. +func TestInsertDoubleTransactionsToMempool(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestInsertDoubleTransactionsToMempool") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + transaction := createTransactionWithUTXOEntry(t, 0, 0) + _, err = miningManager.ValidateAndInsertTransaction(transaction, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(transaction, false, true) + if err == nil || !strings.Contains(err.Error(), "is already in the mempool") { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + }) +} + +// TestDoubleSpendInMempool verifies that an attempt to insert a transaction double-spending +// another transaction already in the mempool will result in raising an appropriate error. +func TestDoubleSpendInMempool(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDoubleSpendInMempool") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + transaction, err := createChildAndParentTxsAndAddParentToConsensus(tc) + if err != nil { + t.Fatalf("Error creating transaction: %+v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(transaction, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + + doubleSpendingTransaction := transaction.Clone() + doubleSpendingTransaction.ID = nil + doubleSpendingTransaction.Outputs[0].Value-- // do some minor change so that txID is different + + _, err = miningManager.ValidateAndInsertTransaction(doubleSpendingTransaction, false, true) + if err == nil || !strings.Contains(err.Error(), "already spent by transaction") { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + }) +} + +// TestHandleNewBlockTransactions verifies that all the transactions in the block were successfully removed from the mempool. +func TestHandleNewBlockTransactions(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestHandleNewBlockTransactions") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + transactionsToInsert := make([]*externalapi.DomainTransaction, 10) + for i := range transactionsToInsert { + transaction := createTransactionWithUTXOEntry(t, i, 0) + transactionsToInsert[i] = transaction + _, err = miningManager.ValidateAndInsertTransaction(transaction, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + } + + const partialLength = 3 + blockWithFirstPartOfTheTransactions := append([]*externalapi.DomainTransaction{nil}, transactionsToInsert[0:partialLength]...) + blockWithRestOfTheTransactions := append([]*externalapi.DomainTransaction{nil}, transactionsToInsert[partialLength:]...) + _, err = miningManager.HandleNewBlockTransactions(blockWithFirstPartOfTheTransactions) + if err != nil { + t.Fatalf("HandleNewBlockTransactions: %v", err) + } + mempoolTransactions, _ := miningManager.AllTransactions(true, false) + for _, removedTransaction := range blockWithFirstPartOfTheTransactions { + if contains(removedTransaction, mempoolTransactions) { + t.Fatalf("This transaction shouldnt be in mempool: %s", consensushashing.TransactionID(removedTransaction)) + } + } + + // There are no chained/double-spends transactions, and hence it is expected that all the other + // transactions, will still be included in the mempool. + mempoolTransactions, _ = miningManager.AllTransactions(true, false) + for _, transaction := range blockWithRestOfTheTransactions[transactionhelper.CoinbaseTransactionIndex+1:] { + if !contains(transaction, mempoolTransactions) { + t.Fatalf("This transaction %s should be in mempool.", consensushashing.TransactionID(transaction)) + } + } + // Handle all the other transactions. + _, err = miningManager.HandleNewBlockTransactions(blockWithRestOfTheTransactions) + if err != nil { + t.Fatalf("HandleNewBlockTransactions: %v", err) + } + mempoolTransactions, _ = miningManager.AllTransactions(true, false) + if len(mempoolTransactions) != 0 { + blockIDs := domainBlocksToBlockIds(mempoolTransactions) + t.Fatalf("The mempool contains unexpected transactions: %s", blockIDs) + } + }) +} + +func domainBlocksToBlockIds(blocks []*externalapi.DomainTransaction) []*externalapi.DomainTransactionID { + blockIDs := make([]*externalapi.DomainTransactionID, len(blocks)) + for i := range blockIDs { + blockIDs[i] = consensushashing.TransactionID(blocks[i]) + } + return blockIDs +} + +// TestDoubleSpendWithBlock verifies that any transactions which are now double spends as a result of the block's new transactions +// will be removed from the mempool. +func TestDoubleSpendWithBlock(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDoubleSpendWithBlock") + if err != nil { + t.Fatalf("Failed setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + transactionInTheMempool := createTransactionWithUTXOEntry(t, 0, 0) + _, err = miningManager.ValidateAndInsertTransaction(transactionInTheMempool, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + doubleSpendTransactionInTheBlock := createTransactionWithUTXOEntry(t, 0, 0) + doubleSpendTransactionInTheBlock.Inputs[0].PreviousOutpoint = transactionInTheMempool.Inputs[0].PreviousOutpoint + blockTransactions := []*externalapi.DomainTransaction{nil, doubleSpendTransactionInTheBlock} + _, err = miningManager.HandleNewBlockTransactions(blockTransactions) + if err != nil { + t.Fatalf("HandleNewBlockTransactions: %v", err) + } + mempoolTransactions, _ := miningManager.AllTransactions(true, false) + if contains(transactionInTheMempool, mempoolTransactions) { + t.Fatalf("The transaction %s, shouldn't be in the mempool, since at least one "+ + "output was already spent.", consensushashing.TransactionID(transactionInTheMempool)) + } + }) +} + +// TestOrphanTransactions verifies that a transaction could be a part of a new block template, only if it's not an orphan. +func TestOrphanTransactions(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestOrphanTransactions") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + // Before each parent transaction, We will add two blocks by consensus in order to fund the parent transactions. + parentTransactions, childTransactions, err := createArraysOfParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("Error in createArraysOfParentAndChildrenTransactions: %v", err) + } + for _, orphanTransaction := range childTransactions { + _, err = miningManager.ValidateAndInsertTransaction(orphanTransaction, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + } + transactionsMempool, _ := miningManager.AllTransactions(true, false) + for _, transaction := range transactionsMempool { + if contains(transaction, childTransactions) { + t.Fatalf("Error: an orphan transaction is exist in the mempool") + } + } + + block, _, err := miningManager.GetBlockTemplate(&externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + ExtraData: nil}) + if err != nil { + t.Fatalf("Failed get a block template: %v", err) + } + for _, transactionFromBlock := range block.Transactions[1:] { + for _, orphanTransaction := range childTransactions { + if consensushashing.TransactionID(transactionFromBlock) == consensushashing.TransactionID(orphanTransaction) { + t.Fatalf("Tranasaction with unknown parents is exist in a block that was built from GetTemplate option.") + } + } + } + tips, err := tc.Tips() + if err != nil { + t.Fatalf("Tips: %v.", err) + } + blockParentsTransactionsHash, _, err := tc.AddBlock(tips, nil, parentTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + _, _, err = tc.AddBlock([]*externalapi.DomainHash{blockParentsTransactionsHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + blockParentsTransactions, _, err := tc.GetBlock(blockParentsTransactionsHash) + if err != nil { + t.Fatalf("GetBlock: %v", err) + } + _, err = miningManager.HandleNewBlockTransactions(blockParentsTransactions.Transactions) + if err != nil { + t.Fatalf("HandleNewBlockTransactions: %+v", err) + } + transactionsMempool, _ = miningManager.AllTransactions(true, false) + if len(transactionsMempool) != len(childTransactions) { + t.Fatalf("Expected %d transactions in the mempool but got %d", len(childTransactions), len(transactionsMempool)) + } + + for _, transaction := range transactionsMempool { + if !contains(transaction, childTransactions) { + t.Fatalf("Error: the transaction %s, should be in the mempool since its not "+ + "oprhan anymore.", consensushashing.TransactionID(transaction)) + } + } + block, _, err = miningManager.GetBlockTemplate(&externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + ExtraData: nil}) + if err != nil { + t.Fatalf("GetBlockTemplate: %v", err) + } + for _, transactionFromBlock := range block.Transactions[1:] { + isContained := false + for _, childTransaction := range childTransactions { + if *consensushashing.TransactionID(transactionFromBlock) == *consensushashing.TransactionID(childTransaction) { + isContained = true + break + } + } + if !isContained { + t.Fatalf("Error: Unknown Transaction %s in a block.", consensushashing.TransactionID(transactionFromBlock)) + } + } + }) +} + +func TestHighPriorityTransactions(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestDoubleSpendWithBlock") + if err != nil { + t.Fatalf("Failed setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params) + mempoolConfig.MaximumTransactionCount = 1 + mempoolConfig.MaximumOrphanTransactionCount = 1 + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempoolConfig) + + // Create 3 pairs of transaction parent-and-child pairs: 1 low priority and 2 high priority + lowPriorityParentTransaction, lowPriorityChildTransaction, err := createParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("error creating low-priority transaction pair: %+v", err) + } + firstHighPriorityParentTransaction, firstHighPriorityChildTransaction, err := createParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("error creating first high-priority transaction pair: %+v", err) + } + secondHighPriorityParentTransaction, secondHighPriorityChildTransaction, err := createParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("error creating second high-priority transaction pair: %+v", err) + } + + // Submit all the children, make sure the 2 highPriority ones remain in the orphan pool + _, err = miningManager.ValidateAndInsertTransaction(lowPriorityChildTransaction, false, true) + if err != nil { + t.Fatalf("error submitting low-priority transaction: %+v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(firstHighPriorityChildTransaction, true, true) + if err != nil { + t.Fatalf("error submitting first high-priority transaction: %+v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(secondHighPriorityChildTransaction, true, true) + if err != nil { + t.Fatalf("error submitting second high-priority transaction: %+v", err) + } + // There's no API to check what stayed in the orphan pool, but we'll find it out when we begin to unorphan + + // Submit all the parents. + // Low priority transaction will only accept the parent, since the child was evicted from orphanPool + lowPriorityAcceptedTransactions, err := + miningManager.ValidateAndInsertTransaction(lowPriorityParentTransaction, false, true) + if err != nil { + t.Fatalf("error submitting low-priority transaction: %+v", err) + } + expectedLowPriorityAcceptedTransactions := []*externalapi.DomainTransaction{lowPriorityParentTransaction} + if !reflect.DeepEqual(lowPriorityAcceptedTransactions, expectedLowPriorityAcceptedTransactions) { + t.Errorf("Expected only lowPriorityParent (%v) to be in lowPriorityAcceptedTransactions, but got %v", + consensushashing.TransactionIDs(expectedLowPriorityAcceptedTransactions), + consensushashing.TransactionIDs(lowPriorityAcceptedTransactions)) + } + + // Both high priority transactions should accept parent and child + + // Insert firstHighPriorityParentTransaction + firstHighPriorityAcceptedTransactions, err := + miningManager.ValidateAndInsertTransaction(firstHighPriorityParentTransaction, true, true) + if err != nil { + t.Fatalf("error submitting first high-priority transaction: %+v", err) + } + expectedFirstHighPriorityAcceptedTransactions := + []*externalapi.DomainTransaction{firstHighPriorityParentTransaction, firstHighPriorityChildTransaction} + if !reflect.DeepEqual(firstHighPriorityAcceptedTransactions, expectedFirstHighPriorityAcceptedTransactions) { + t.Errorf( + "Expected both firstHighPriority transaction (%v) to be in firstHighPriorityAcceptedTransactions, but got %v", + consensushashing.TransactionIDs(firstHighPriorityAcceptedTransactions), + consensushashing.TransactionIDs(expectedFirstHighPriorityAcceptedTransactions)) + } + // Insert secondHighPriorityParentTransaction + secondHighPriorityAcceptedTransactions, err := + miningManager.ValidateAndInsertTransaction(secondHighPriorityParentTransaction, true, true) + if err != nil { + t.Fatalf("error submitting second high-priority transaction: %+v", err) + } + expectedSecondHighPriorityAcceptedTransactions := + []*externalapi.DomainTransaction{secondHighPriorityParentTransaction, secondHighPriorityChildTransaction} + if !reflect.DeepEqual(secondHighPriorityAcceptedTransactions, expectedSecondHighPriorityAcceptedTransactions) { + t.Errorf( + "Expected both secondHighPriority transaction (%v) to be in secondHighPriorityAcceptedTransactions, but got %v", + consensushashing.TransactionIDs(secondHighPriorityAcceptedTransactions), + consensushashing.TransactionIDs(expectedSecondHighPriorityAcceptedTransactions)) + } + }) +} + +func TestRevalidateHighPriorityTransactions(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestRevalidateHighPriorityTransactions") + if err != nil { + t.Fatalf("Failed setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params) + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempoolConfig) + + // Create two valid transactions that double-spend each other (childTransaction1, childTransaction2) + parentTransaction, childTransaction1, err := createParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("Error creating parentTransaction and childTransaction1: %+v", err) + } + tips, err := tc.Tips() + if err != nil { + t.Fatalf("Error getting tips: %+v", err) + } + + fundingBlock, _, err := tc.AddBlock(tips, nil, []*externalapi.DomainTransaction{parentTransaction}) + if err != nil { + t.Fatalf("Error getting function block: %+v", err) + } + + childTransaction2 := childTransaction1.Clone() + childTransaction2.Outputs[0].Value-- // decrement value to change id + + // Mine 1 block with confirming childTransaction1 and 2 blocks confirming childTransaction2, so that + // childTransaction2 is accepted + tip1, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock}, nil, + []*externalapi.DomainTransaction{childTransaction1}) + if err != nil { + t.Fatalf("Error adding tip1: %+v", err) + } + tip2, _, err := tc.AddBlock([]*externalapi.DomainHash{fundingBlock}, nil, + []*externalapi.DomainTransaction{childTransaction2}) + if err != nil { + t.Fatalf("Error adding tip2: %+v", err) + } + _, _, err = tc.AddBlock([]*externalapi.DomainHash{tip2}, nil, nil) + if err != nil { + t.Fatalf("Error mining on top of tip2: %+v", err) + } + + // Add to mempool transaction that spends childTransaction2 (as high priority) + spendingTransaction, err := testutils.CreateTransaction(childTransaction2, 1000) + if err != nil { + t.Fatalf("Error creating spendingTransaction: %+v", err) + } + _, err = miningManager.ValidateAndInsertTransaction(spendingTransaction, true, false) + if err != nil { + t.Fatalf("Error inserting spendingTransaction: %+v", err) + } + + // Revalidate, to make sure spendingTransaction is still valid + validTransactions, err := miningManager.RevalidateHighPriorityTransactions() + if err != nil { + t.Fatalf("Error from first RevalidateHighPriorityTransactions: %+v", err) + } + if len(validTransactions) != 1 || !validTransactions[0].Equal(spendingTransaction) { + t.Fatalf("Expected to have spendingTransaction as only validTransaction returned from "+ + "RevalidateHighPriorityTransactions, but got %v instead", validTransactions) + } + + // Mine 2 more blocks on top of tip1, to re-org out childTransaction1, thus making spendingTransaction invalid + for i := 0; i < 2; i++ { + tip1, _, err = tc.AddBlock([]*externalapi.DomainHash{tip1}, nil, nil) + if err != nil { + t.Fatalf("Error mining on top of tip1: %+v", err) + } + } + + // Make sure spendingTransaction is still in mempool + mempoolTransactions, _ := miningManager.AllTransactions(true, false) + if len(mempoolTransactions) != 1 || !mempoolTransactions[0].Equal(spendingTransaction) { + t.Fatalf("Expected to have spendingTransaction as only validTransaction returned from "+ + "RevalidateHighPriorityTransactions, but got %v instead", validTransactions) + } + + // Revalidate again, this time validTransactions should be empty + validTransactions, err = miningManager.RevalidateHighPriorityTransactions() + if err != nil { + t.Fatalf("Error from first RevalidateHighPriorityTransactions: %+v", err) + } + if len(validTransactions) != 0 { + t.Fatalf("Expected to have empty validTransactions, but got %v instead", validTransactions) + } + // And also AllTransactions should be empty as well + mempoolTransactions, _ = miningManager.AllTransactions(true, false) + if len(mempoolTransactions) != 0 { + t.Fatalf("Expected to have empty allTransactions, but got %v instead", mempoolTransactions) + } + }) +} + +func TestRevalidateHighPriorityTransactionsWithChain(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestRevalidateHighPriorityTransactions") + if err != nil { + t.Fatalf("Failed setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + mempoolConfig := mempool.DefaultConfig(&consensusConfig.Params) + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempoolConfig) + + const chainSize = 10 + chain, err := createTxChain(tc, chainSize) + if err != nil { + t.Fatal(err) + } + + _, err = miningManager.ValidateAndInsertTransaction(chain[0], true, false) + if err != nil { + t.Fatal(err) + } + + blockHash, _, err := tc.AddBlockOnTips(nil, []*externalapi.DomainTransaction{chain[0].Clone()}) + if err != nil { + t.Fatal(err) + } + + block, _, err := tc.GetBlock(blockHash) + if err != nil { + t.Fatal(err) + } + + _, err = miningManager.HandleNewBlockTransactions(block.Transactions) + if err != nil { + t.Fatal(err) + } + + for _, transaction := range chain[1:] { + _, err = miningManager.ValidateAndInsertTransaction(transaction, true, false) + if err != nil { + t.Fatal(err) + } + } + + _, _, err = tc.AddBlockOnTips(nil, []*externalapi.DomainTransaction{chain[1].Clone()}) + if err != nil { + t.Fatal(err) + } + + revalidated, err := miningManager.RevalidateHighPriorityTransactions() + if err != nil { + t.Fatal(err) + } + + if len(revalidated) != chainSize-2 { + t.Fatalf("expected %d transactions to revalidate but instead only %d revalidated", chainSize-2, len(revalidated)) + } + }) +} + +// TestModifyBlockTemplate verifies that modifying a block template changes coinbase data correctly. +func TestModifyBlockTemplate(t *testing.T) { + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + consensusConfig.BlockCoinbaseMaturity = 0 + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(consensusConfig, "TestModifyBlockTemplate") + if err != nil { + t.Fatalf("Error setting up TestConsensus: %+v", err) + } + defer teardown(false) + + miningFactory := miningmanager.NewFactory() + tcAsConsensus := tc.(externalapi.Consensus) + tcAsConsensusPointer := &tcAsConsensus + consensusReference := consensusreference.NewConsensusReference(&tcAsConsensusPointer) + miningManager := miningFactory.NewMiningManager(consensusReference, &consensusConfig.Params, mempool.DefaultConfig(&consensusConfig.Params)) + + // Create some complex transactions. Logic taken from TestOrphanTransactions + + // Before each parent transaction, We will add two blocks by consensus in order to fund the parent transactions. + parentTransactions, childTransactions, err := createArraysOfParentAndChildrenTransactions(tc) + if err != nil { + t.Fatalf("Error in createArraysOfParentAndChildrenTransactions: %v", err) + } + for _, orphanTransaction := range childTransactions { + _, err = miningManager.ValidateAndInsertTransaction(orphanTransaction, false, true) + if err != nil { + t.Fatalf("ValidateAndInsertTransaction: %v", err) + } + } + transactionsMempool, _ := miningManager.AllTransactions(true, false) + for _, transaction := range transactionsMempool { + if contains(transaction, childTransactions) { + t.Fatalf("Error: an orphan transaction is exist in the mempool") + } + } + + emptyCoinbaseData := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + ExtraData: nil} + block, _, err := miningManager.GetBlockTemplate(emptyCoinbaseData) + if err != nil { + t.Fatalf("Failed get a block template: %v", err) + } + + for _, transactionFromBlock := range block.Transactions[1:] { + for _, orphanTransaction := range childTransactions { + if consensushashing.TransactionID(transactionFromBlock) == consensushashing.TransactionID(orphanTransaction) { + t.Fatalf("Tranasaction with unknown parents is exist in a block that was built from GetTemplate option.") + } + } + } + + // Run the purpose of this test, compare modified block templates + sweepCompareModifiedTemplateToBuilt(t, consensusConfig, miningManager.GetBlockTemplateBuilder()) + + // Create some more complex blocks and transactions. Logic taken from TestOrphanTransactions + tips, err := tc.Tips() + if err != nil { + t.Fatalf("Tips: %v.", err) + } + blockParentsTransactionsHash, _, err := tc.AddBlock(tips, nil, parentTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + _, _, err = tc.AddBlock([]*externalapi.DomainHash{blockParentsTransactionsHash}, nil, nil) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + blockParentsTransactions, _, err := tc.GetBlock(blockParentsTransactionsHash) + if err != nil { + t.Fatalf("GetBlock: %v", err) + } + _, err = miningManager.HandleNewBlockTransactions(blockParentsTransactions.Transactions) + if err != nil { + t.Fatalf("HandleNewBlockTransactions: %+v", err) + } + transactionsMempool, _ = miningManager.AllTransactions(true, false) + if len(transactionsMempool) != len(childTransactions) { + t.Fatalf("Expected %d transactions in the mempool but got %d", len(childTransactions), len(transactionsMempool)) + } + + for _, transaction := range transactionsMempool { + if !contains(transaction, childTransactions) { + t.Fatalf("Error: the transaction %s, should be in the mempool since its not "+ + "oprhan anymore.", consensushashing.TransactionID(transaction)) + } + } + block, _, err = miningManager.GetBlockTemplate(emptyCoinbaseData) + if err != nil { + t.Fatalf("GetBlockTemplate: %v", err) + } + + for _, transactionFromBlock := range block.Transactions[1:] { + isContained := false + for _, childTransaction := range childTransactions { + if *consensushashing.TransactionID(transactionFromBlock) == *consensushashing.TransactionID(childTransaction) { + isContained = true + break + } + } + if !isContained { + t.Fatalf("Error: Unknown Transaction %s in a block.", consensushashing.TransactionID(transactionFromBlock)) + } + } + + // Run the purpose of this test, compare modified block templates + sweepCompareModifiedTemplateToBuilt(t, consensusConfig, miningManager.GetBlockTemplateBuilder()) + + // Create a real coinbase to use + coinbaseUsual, err := generateNewCoinbase(consensusConfig.Prefix, opUsual) + if err != nil { + t.Fatalf("Generate coinbase: %v.", err) + } + var emptyTransactions []*externalapi.DomainTransaction + + // Create interesting DAG structures and rerun the template comparisons + tips, err = tc.Tips() + if err != nil { + t.Fatalf("Tips: %v.", err) + } + // Create a fork + _, _, err = tc.AddBlock(tips[:1], coinbaseUsual, emptyTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + chainTip, _, err := tc.AddBlock(tips[:1], coinbaseUsual, emptyTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + + sweepCompareModifiedTemplateToBuilt(t, consensusConfig, miningManager.GetBlockTemplateBuilder()) + + // Create some blue blocks + for i := externalapi.KType(0); i < consensusConfig.K-2; i++ { + chainTip, _, err = tc.AddBlock([]*externalapi.DomainHash{chainTip}, coinbaseUsual, emptyTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + } + + sweepCompareModifiedTemplateToBuilt(t, consensusConfig, miningManager.GetBlockTemplateBuilder()) + + // Mine more such that we have a merged red + for i := externalapi.KType(0); i < consensusConfig.K; i++ { + chainTip, _, err = tc.AddBlock([]*externalapi.DomainHash{chainTip}, coinbaseUsual, emptyTransactions) + if err != nil { + t.Fatalf("AddBlock: %v", err) + } + } + blockTemplate, err := miningManager.GetBlockTemplateBuilder().BuildBlockTemplate(emptyCoinbaseData) + if err != nil { + t.Fatalf("BuildBlockTemplate: %v", err) + } + if !blockTemplate.CoinbaseHasRedReward { + t.Fatalf("Expected block template to have red reward") + } + + sweepCompareModifiedTemplateToBuilt(t, consensusConfig, miningManager.GetBlockTemplateBuilder()) + }) +} + +func sweepCompareModifiedTemplateToBuilt( + t *testing.T, consensusConfig *consensus.Config, builder model.BlockTemplateBuilder) { + for i := 0; i < 4; i++ { + // Run a few times to get more randomness + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opUsual, opUsual) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opECDSA, opECDSA) + } + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opTrue, opUsual) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opUsual, opTrue) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opECDSA, opUsual) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opUsual, opECDSA) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opEmpty, opUsual) + compareModifiedTemplateToBuilt(t, consensusConfig, builder, opUsual, opEmpty) +} + +type opType uint8 + +const ( + opUsual opType = iota + opECDSA + opTrue + opEmpty +) + +func compareModifiedTemplateToBuilt( + t *testing.T, consensusConfig *consensus.Config, builder model.BlockTemplateBuilder, + firstCoinbaseOp, secondCoinbaseOp opType) { + coinbase1, err := generateNewCoinbase(consensusConfig.Params.Prefix, firstCoinbaseOp) + if err != nil { + t.Fatalf("Failed to generate new coinbase: %v", err) + } + coinbase2, err := generateNewCoinbase(consensusConfig.Params.Prefix, secondCoinbaseOp) + if err != nil { + t.Fatalf("Failed to generate new coinbase: %v", err) + } + + // Build a fresh template for coinbase2 as a reference + expectedTemplate, err := builder.BuildBlockTemplate(coinbase2) + if err != nil { + t.Fatalf("Failed to build block template: %v", err) + } + // Modify to coinbase1 + modifiedTemplate, err := builder.ModifyBlockTemplate(coinbase1, expectedTemplate.Clone()) + if err != nil { + t.Fatalf("Failed to modify block template: %v", err) + } + // And modify back to coinbase2 + modifiedTemplate, err = builder.ModifyBlockTemplate(coinbase2, modifiedTemplate.Clone()) + if err != nil { + t.Fatalf("Failed to modify block template: %v", err) + } + + // Make sure timestamps are equal before comparing the hash + mutableHeader := modifiedTemplate.Block.Header.ToMutable() + mutableHeader.SetTimeInMilliseconds(expectedTemplate.Block.Header.TimeInMilliseconds()) + modifiedTemplate.Block.Header = mutableHeader.ToImmutable() + + // Assert hashes are equal + expectedTemplateHash := consensushashing.BlockHash(expectedTemplate.Block) + modifiedTemplateHash := consensushashing.BlockHash(modifiedTemplate.Block) + if !expectedTemplateHash.Equal(modifiedTemplateHash) { + t.Fatalf("Expected block hashes %s, %s to be equal", expectedTemplateHash, modifiedTemplateHash) + } +} + +func generateNewCoinbase(addressPrefix util.Bech32Prefix, op opType) (*externalapi.DomainCoinbaseData, error) { + if op == opTrue { + scriptPublicKey, _ := testutils.OpTrueScript() + return &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version()), + }, nil + } + if op == opEmpty { + return &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{Script: nil, Version: 0}, + ExtraData: nil, + }, nil + } + _, publicKey, err := libspectrewallet.CreateKeyPair(op == opECDSA) + if err != nil { + return nil, err + } + var address string + if op == opECDSA { + addressPublicKeyECDSA, err := util.NewAddressPublicKeyECDSA(publicKey, addressPrefix) + if err != nil { + return nil, err + } + address = addressPublicKeyECDSA.EncodeAddress() + } else { + addressPublicKey, err := util.NewAddressPublicKey(publicKey, addressPrefix) + if err != nil { + return nil, err + } + address = addressPublicKey.EncodeAddress() + } + payAddress, err := util.DecodeAddress(address, addressPrefix) + if err != nil { + return nil, err + } + scriptPublicKey, err := txscript.PayToAddrScript(payAddress) + if err != nil { + return nil, err + } + return &externalapi.DomainCoinbaseData{ + ScriptPublicKey: scriptPublicKey, ExtraData: []byte(version.Version()), + }, nil +} + +func createTransactionWithUTXOEntry(t *testing.T, i int, daaScore uint64) *externalapi.DomainTransaction { + prevOutTxID := externalapi.DomainTransactionID{} + prevOutPoint := externalapi.DomainOutpoint{TransactionID: prevOutTxID, Index: uint32(i)} + scriptPublicKey, redeemScript := testutils.OpTrueScript() + signatureScript, err := txscript.PayToScriptHashSignatureScript(redeemScript, nil) + if err != nil { + t.Fatalf("PayToScriptHashSignatureScript: %v", err) + } + txInput := externalapi.DomainTransactionInput{ + PreviousOutpoint: prevOutPoint, + SignatureScript: signatureScript, + Sequence: constants.MaxTxInSequenceNum, + UTXOEntry: utxo.NewUTXOEntry( + 100000000, // 1 SPR + scriptPublicKey, + true, + daaScore), + } + txOut := externalapi.DomainTransactionOutput{ + Value: 10000, + ScriptPublicKey: scriptPublicKey, + } + tx := externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: []*externalapi.DomainTransactionInput{&txInput}, + Outputs: []*externalapi.DomainTransactionOutput{&txOut}, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Fee: 289, + Mass: 1, + LockTime: 0} + + return &tx +} + +func createArraysOfParentAndChildrenTransactions(tc testapi.TestConsensus) ([]*externalapi.DomainTransaction, + []*externalapi.DomainTransaction, error) { + + const numOfTransactions = 5 + transactions := make([]*externalapi.DomainTransaction, numOfTransactions) + parentTransactions := make([]*externalapi.DomainTransaction, len(transactions)) + var err error + for i := range transactions { + parentTransactions[i], transactions[i], err = createParentAndChildrenTransactions(tc) + if err != nil { + return nil, nil, err + } + } + return parentTransactions, transactions, nil +} + +func createParentAndChildrenTransactions(tc testapi.TestConsensus) (txParent *externalapi.DomainTransaction, + txChild *externalapi.DomainTransaction, err error) { + + chain, err := createTxChain(tc, 2) + if err != nil { + return nil, nil, err + } + + return chain[0], chain[1], nil +} + +func createTxChain(tc testapi.TestConsensus, numTxs int) ([]*externalapi.DomainTransaction, error) { + + // We will add two blocks by consensus before the parent transactions, in order to fund the parent transactions. + tips, err := tc.Tips() + if err != nil { + return nil, err + } + + _, _, err = tc.AddBlock(tips, nil, nil) + if err != nil { + return nil, errors.Wrapf(err, "AddBlock: %v", err) + } + + tips, err = tc.Tips() + if err != nil { + return nil, err + } + + fundingBlockHashForParent, _, err := tc.AddBlock(tips, nil, nil) + if err != nil { + return nil, errors.Wrap(err, "AddBlock: ") + } + fundingBlockForParent, _, err := tc.GetBlock(fundingBlockHashForParent) + if err != nil { + return nil, errors.Wrap(err, "GetBlock: ") + } + fundingTransactionForParent := fundingBlockForParent.Transactions[transactionhelper.CoinbaseTransactionIndex] + + transactions := make([]*externalapi.DomainTransaction, numTxs) + transactions[0], err = testutils.CreateTransaction(fundingTransactionForParent, 1000) + if err != nil { + return nil, err + } + + txParent := transactions[0] + for i := 1; i < numTxs; i++ { + transactions[i], err = testutils.CreateTransaction(txParent, 1000) + if err != nil { + return nil, err + } + + txParent = transactions[i] + } + return transactions, nil +} + +func createChildAndParentTxsAndAddParentToConsensus(tc testapi.TestConsensus) (*externalapi.DomainTransaction, error) { + firstBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{tc.DAGParams().GenesisHash}, nil, nil) + if err != nil { + return nil, errors.Wrapf(err, "AddBlock: %v", err) + } + ParentBlockHash, _, err := tc.AddBlock([]*externalapi.DomainHash{firstBlockHash}, nil, nil) + if err != nil { + return nil, errors.Wrap(err, "AddBlock: ") + } + ParentBlock, _, err := tc.GetBlock(ParentBlockHash) + if err != nil { + return nil, errors.Wrap(err, "GetBlock: ") + } + parentTransaction := ParentBlock.Transactions[transactionhelper.CoinbaseTransactionIndex] + txChild, err := testutils.CreateTransaction(parentTransaction, 1000) + if err != nil { + return nil, err + } + return txChild, nil +} + +func contains(transaction *externalapi.DomainTransaction, transactions []*externalapi.DomainTransaction) bool { + for _, candidateTransaction := range transactions { + if candidateTransaction.Equal(transaction) { + return true + } + } + return false +} diff --git a/domain/miningmanager/model/interface_blocktemplatebuilder.go b/domain/miningmanager/model/interface_blocktemplatebuilder.go new file mode 100644 index 0000000..9921338 --- /dev/null +++ b/domain/miningmanager/model/interface_blocktemplatebuilder.go @@ -0,0 +1,12 @@ +package model + +import ( + consensusexternalapi "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// BlockTemplateBuilder builds block templates for miners to consume +type BlockTemplateBuilder interface { + BuildBlockTemplate(coinbaseData *consensusexternalapi.DomainCoinbaseData) (*consensusexternalapi.DomainBlockTemplate, error) + ModifyBlockTemplate(newCoinbaseData *consensusexternalapi.DomainCoinbaseData, + blockTemplateToModify *consensusexternalapi.DomainBlockTemplate) (*consensusexternalapi.DomainBlockTemplate, error) +} diff --git a/domain/miningmanager/model/interface_mempool.go b/domain/miningmanager/model/interface_mempool.go new file mode 100644 index 0000000..c7a7ac5 --- /dev/null +++ b/domain/miningmanager/model/interface_mempool.go @@ -0,0 +1,43 @@ +package model + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/ruleerrors" +) + +// Mempool maintains a set of known transactions that +// are intended to be mined into new blocks +type Mempool interface { + HandleNewBlockTransactions(txs []*externalapi.DomainTransaction) ([]*externalapi.DomainTransaction, error) + BlockCandidateTransactions() []*externalapi.DomainTransaction + ValidateAndInsertTransaction(transaction *externalapi.DomainTransaction, isHighPriority bool, allowOrphan bool) ( + acceptedTransactions []*externalapi.DomainTransaction, err error) + RemoveInvalidTransactions(err *ruleerrors.ErrInvalidTransactionsInNewBlock) error + GetTransaction( + transactionID *externalapi.DomainTransactionID, + includeTransactionPool bool, + includeOrphanPool bool, + ) ( + transactionPoolTransaction *externalapi.DomainTransaction, + isOrphan bool, + found bool) + GetTransactionsByAddresses( + includeTransactionPool bool, + includeOrphanPool bool) ( + sendingInTransactionPool map[string]*externalapi.DomainTransaction, + receivingInTransactionPool map[string]*externalapi.DomainTransaction, + sendingInOrphanPool map[string]*externalapi.DomainTransaction, + receivingInOrphanPool map[string]*externalapi.DomainTransaction, + err error) + AllTransactions( + includeTransactionPool bool, + includeOrphanPool bool, + ) ( + transactionPoolTransactions []*externalapi.DomainTransaction, + orphanPoolTransactions []*externalapi.DomainTransaction) + TransactionCount( + includeTransactionPool bool, + includeOrphanPool bool) int + RevalidateHighPriorityTransactions() (validTransactions []*externalapi.DomainTransaction, err error) + IsTransactionOutputDust(output *externalapi.DomainTransactionOutput) bool +} diff --git a/domain/prefixmanager/log.go b/domain/prefixmanager/log.go new file mode 100644 index 0000000..8084487 --- /dev/null +++ b/domain/prefixmanager/log.go @@ -0,0 +1,9 @@ +package prefixmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("PRFX") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/domain/prefixmanager/prefix.go b/domain/prefixmanager/prefix.go new file mode 100644 index 0000000..35d5ac8 --- /dev/null +++ b/domain/prefixmanager/prefix.go @@ -0,0 +1,112 @@ +package prefixmanager + +import ( + "github.com/spectre-project/spectred/domain/prefixmanager/prefix" + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +var activePrefixKey = database.MakeBucket(nil).Key([]byte("active-prefix")) +var inactivePrefixKey = database.MakeBucket(nil).Key([]byte("inactive-prefix")) + +// ActivePrefix returns the current active database prefix, and whether it exists +func ActivePrefix(dataAccessor database.DataAccessor) (*prefix.Prefix, bool, error) { + prefixBytes, err := dataAccessor.Get(activePrefixKey) + if database.IsNotFoundError(err) { + return nil, false, nil + } + + if err != nil { + return nil, false, err + } + + prefix, err := prefix.Deserialize(prefixBytes) + if err != nil { + return nil, false, err + } + + return prefix, true, nil +} + +// InactivePrefix returns the current inactive database prefix, and whether it exists +func InactivePrefix(dataAccessor database.DataAccessor) (*prefix.Prefix, bool, error) { + prefixBytes, err := dataAccessor.Get(inactivePrefixKey) + if database.IsNotFoundError(err) { + return nil, false, nil + } + + if err != nil { + return nil, false, err + } + + prefix, err := prefix.Deserialize(prefixBytes) + if err != nil { + return nil, false, err + } + + return prefix, true, nil +} + +// DeleteInactivePrefix deletes all data associated with the inactive database prefix, including itself. +func DeleteInactivePrefix(db database.Database) error { + prefixBytes, err := db.Get(inactivePrefixKey) + if database.IsNotFoundError(err) { + return nil + } + + if err != nil { + return err + } + + prefix, err := prefix.Deserialize(prefixBytes) + if err != nil { + return err + } + + err = deletePrefix(db, prefix) + if err != nil { + return err + } + + err = db.Delete(inactivePrefixKey) + if err != nil { + return err + } + + log.Infof("Compacting database after prefix delete") + return db.Compact() +} + +func deletePrefix(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error { + log.Infof("Deleting database prefix %x", prefix) + prefixBucket := database.MakeBucket(prefix.Serialize()) + cursor, err := dataAccessor.Cursor(prefixBucket) + if err != nil { + return err + } + + defer cursor.Close() + + for ok := cursor.First(); ok; ok = cursor.Next() { + key, err := cursor.Key() + if err != nil { + return err + } + + err = dataAccessor.Delete(key) + if err != nil { + return err + } + } + + return nil +} + +// SetPrefixAsActive sets the given prefix as the active prefix +func SetPrefixAsActive(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error { + return dataAccessor.Put(activePrefixKey, prefix.Serialize()) +} + +// SetPrefixAsInactive sets the given prefix as the inactive prefix +func SetPrefixAsInactive(dataAccessor database.DataAccessor, prefix *prefix.Prefix) error { + return dataAccessor.Put(inactivePrefixKey, prefix.Serialize()) +} diff --git a/domain/prefixmanager/prefix/prefix.go b/domain/prefixmanager/prefix/prefix.go new file mode 100644 index 0000000..348ad1a --- /dev/null +++ b/domain/prefixmanager/prefix/prefix.go @@ -0,0 +1,46 @@ +package prefix + +import "github.com/pkg/errors" + +const ( + prefixZero byte = 0 + prefixOne byte = 1 +) + +// Prefix is a database prefix that is used to manage more than one database at once. +type Prefix struct { + value byte +} + +// Serialize serializes the prefix into a byte slice +func (p *Prefix) Serialize() []byte { + return []byte{p.value} +} + +// Equal returns whether p equals to other +func (p *Prefix) Equal(other *Prefix) bool { + return p.value == other.value +} + +// Flip returns the opposite of the current prefix +func (p *Prefix) Flip() *Prefix { + value := prefixZero + if p.value == prefixZero { + value = prefixOne + } + + return &Prefix{value: value} +} + +// Deserialize deserializes a prefix from a byte slice +func Deserialize(prefixBytes []byte) (*Prefix, error) { + if len(prefixBytes) > 1 { + return nil, errors.Errorf("invalid length %d for prefix", len(prefixBytes)) + } + + if prefixBytes[0] != prefixZero && prefixBytes[0] != prefixOne { + return nil, errors.Errorf("invalid prefix %x", prefixBytes) + } + + return &Prefix{value: prefixBytes[0]}, nil +} diff --git a/domain/utxoindex/log.go b/domain/utxoindex/log.go new file mode 100644 index 0000000..44f8274 --- /dev/null +++ b/domain/utxoindex/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package utxoindex + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("UTIN") diff --git a/domain/utxoindex/model.go b/domain/utxoindex/model.go new file mode 100644 index 0000000..eb8f043 --- /dev/null +++ b/domain/utxoindex/model.go @@ -0,0 +1,23 @@ +package utxoindex + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// ScriptPublicKeyString is a script public key represented as a string +// We use this type rather than just a byte slice because Go maps don't +// support slices as keys. See: UTXOChanges +type ScriptPublicKeyString string + +// UTXOOutpointEntryPairs is a map between UTXO outpoints to UTXO entries +type UTXOOutpointEntryPairs map[externalapi.DomainOutpoint]externalapi.UTXOEntry + +// UTXOOutpoints is a set of UTXO outpoints +type UTXOOutpoints map[externalapi.DomainOutpoint]interface{} + +// UTXOChanges is the set of changes made to the UTXO index after +// a successful update +type UTXOChanges struct { + Added map[ScriptPublicKeyString]UTXOOutpointEntryPairs + Removed map[ScriptPublicKeyString]UTXOOutpointEntryPairs +} diff --git a/domain/utxoindex/serialization.go b/domain/utxoindex/serialization.go new file mode 100644 index 0000000..ba63a52 --- /dev/null +++ b/domain/utxoindex/serialization.go @@ -0,0 +1,73 @@ +package utxoindex + +import ( + "encoding/binary" + "io" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/serialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "google.golang.org/protobuf/proto" +) + +func serializeOutpoint(outpoint *externalapi.DomainOutpoint) ([]byte, error) { + dbOutpoint := serialization.DomainOutpointToDbOutpoint(outpoint) + return proto.Marshal(dbOutpoint) +} + +func deserializeOutpoint(serializedOutpoint []byte) (*externalapi.DomainOutpoint, error) { + var dbOutpoint serialization.DbOutpoint + err := proto.Unmarshal(serializedOutpoint, &dbOutpoint) + if err != nil { + return nil, err + } + return serialization.DbOutpointToDomainOutpoint(&dbOutpoint) +} + +func serializeUTXOEntry(utxoEntry externalapi.UTXOEntry) ([]byte, error) { + dbUTXOEntry := serialization.UTXOEntryToDBUTXOEntry(utxoEntry) + return proto.Marshal(dbUTXOEntry) +} + +func deserializeUTXOEntry(serializedUTXOEntry []byte) (externalapi.UTXOEntry, error) { + var dbUTXOEntry serialization.DbUtxoEntry + err := proto.Unmarshal(serializedUTXOEntry, &dbUTXOEntry) + if err != nil { + return nil, err + } + return serialization.DBUTXOEntryToUTXOEntry(&dbUTXOEntry) +} + +const hashesLengthSize = 8 + +func serializeHashes(hashes []*externalapi.DomainHash) []byte { + serializedHashes := make([]byte, hashesLengthSize+externalapi.DomainHashSize*len(hashes)) + binary.LittleEndian.PutUint64(serializedHashes[:hashesLengthSize], uint64(len(hashes))) + for i, hash := range hashes { + start := hashesLengthSize + externalapi.DomainHashSize*i + end := start + externalapi.DomainHashSize + copy(serializedHashes[start:end], hash.ByteSlice()) + } + return serializedHashes +} + +func deserializeHashes(serializedHashes []byte) ([]*externalapi.DomainHash, error) { + length := binary.LittleEndian.Uint64(serializedHashes[:hashesLengthSize]) + hashes := make([]*externalapi.DomainHash, length) + for i := uint64(0); i < length; i++ { + start := hashesLengthSize + externalapi.DomainHashSize*i + end := start + externalapi.DomainHashSize + + if end > uint64(len(serializedHashes)) { + return nil, errors.Wrapf(io.ErrUnexpectedEOF, "unexpected EOF while deserializing hashes") + } + + var err error + hashes[i], err = externalapi.NewDomainHashFromByteSlice(serializedHashes[start:end]) + if err != nil { + return nil, err + } + } + + return hashes, nil +} diff --git a/domain/utxoindex/serialization_test.go b/domain/utxoindex/serialization_test.go new file mode 100644 index 0000000..64bb59f --- /dev/null +++ b/domain/utxoindex/serialization_test.go @@ -0,0 +1,45 @@ +package utxoindex + +import ( + "encoding/binary" + "io" + "math/rand" + "testing" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func Test_serializeHashes(t *testing.T) { + r := rand.New(rand.NewSource(0)) + + for length := 0; length < 32; length++ { + hashes := make([]*externalapi.DomainHash, length) + for i := range hashes { + var hashBytes [32]byte + r.Read(hashBytes[:]) + hashes[i] = externalapi.NewDomainHashFromByteArray(&hashBytes) + } + result, err := deserializeHashes(serializeHashes(hashes)) + if err != nil { + t.Fatalf("Failed deserializing hashes: %v", err) + } + if !externalapi.HashesEqual(hashes, result) { + t.Fatalf("Expected \n %s \n==\n %s\n", hashes, result) + } + } +} + +func Test_deserializeHashesFailure(t *testing.T) { + hashes := []*externalapi.DomainHash{ + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{1}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{2}), + externalapi.NewDomainHashFromByteArray(&[externalapi.DomainHashSize]byte{3}), + } + serialized := serializeHashes(hashes) + binary.LittleEndian.PutUint64(serialized[:8], uint64(len(hashes)+1)) + _, err := deserializeHashes(serialized) + if !errors.Is(err, io.ErrUnexpectedEOF) { + t.Fatalf("Expected error to be EOF, instead got: %v", err) + } +} diff --git a/domain/utxoindex/store.go b/domain/utxoindex/store.go new file mode 100644 index 0000000..e3ea6a8 --- /dev/null +++ b/domain/utxoindex/store.go @@ -0,0 +1,433 @@ +package utxoindex + +import ( + "encoding/binary" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/database/binaryserialization" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var utxoIndexBucket = database.MakeBucket([]byte("utxo-index")) +var virtualParentsKey = database.MakeBucket([]byte("")).Key([]byte("utxo-index-virtual-parents")) +var circulatingSupplyKey = database.MakeBucket([]byte("")).Key([]byte("utxo-index-circulating-supply")) + +type utxoIndexStore struct { + database database.Database + toAdd map[ScriptPublicKeyString]UTXOOutpointEntryPairs + toRemove map[ScriptPublicKeyString]UTXOOutpointEntryPairs + + virtualParents []*externalapi.DomainHash +} + +func newUTXOIndexStore(database database.Database) *utxoIndexStore { + return &utxoIndexStore{ + database: database, + toAdd: make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs), + toRemove: make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs), + } +} + +func (uis *utxoIndexStore) add(scriptPublicKey *externalapi.ScriptPublicKey, outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry) error { + + key := ScriptPublicKeyString(scriptPublicKey.String()) + log.Tracef("Adding outpoint %s:%d to scriptPublicKey %s", + outpoint.TransactionID, outpoint.Index, key) + + // If the outpoint exists in `toRemove` simply remove it from there and return + if toRemoveOutpointsOfKey, ok := uis.toRemove[key]; ok { + if _, ok := toRemoveOutpointsOfKey[*outpoint]; ok { + log.Tracef("Outpoint %s:%d exists in `toRemove`. Deleting it from there", + outpoint.TransactionID, outpoint.Index) + delete(toRemoveOutpointsOfKey, *outpoint) + return nil + } + } + + // Create a UTXOOutpointEntryPairs entry in `toAdd` if it doesn't exist + if _, ok := uis.toAdd[key]; !ok { + log.Tracef("Creating key %s in `toAdd`", key) + uis.toAdd[key] = make(UTXOOutpointEntryPairs) + } + + // Return an error if the outpoint already exists in `toAdd` + toAddPairsOfKey := uis.toAdd[key] + if _, ok := toAddPairsOfKey[*outpoint]; ok { + return errors.Errorf("cannot add outpoint %s because it's being added already", outpoint) + } + + toAddPairsOfKey[*outpoint] = utxoEntry + + log.Tracef("Added outpoint %s:%d to scriptPublicKey %s", + outpoint.TransactionID, outpoint.Index, key) + return nil +} + +func (uis *utxoIndexStore) remove(scriptPublicKey *externalapi.ScriptPublicKey, outpoint *externalapi.DomainOutpoint, utxoEntry externalapi.UTXOEntry) error { + key := ScriptPublicKeyString(scriptPublicKey.String()) + log.Tracef("Removing outpoint %s:%d from scriptPublicKey %s", + outpoint.TransactionID, outpoint.Index, key) + + // If the outpoint exists in `toAdd` simply remove it from there and return + if toAddPairsOfKey, ok := uis.toAdd[key]; ok { + if _, ok := toAddPairsOfKey[*outpoint]; ok { + log.Tracef("Outpoint %s:%d exists in `toAdd`. Deleting it from there", + outpoint.TransactionID, outpoint.Index) + delete(toAddPairsOfKey, *outpoint) + return nil + } + } + + // Create a UTXOOutpointEntryPair in `toRemove` if it doesn't exist + if _, ok := uis.toRemove[key]; !ok { + log.Tracef("Creating key %s in `toRemove`", key) + uis.toRemove[key] = make(UTXOOutpointEntryPairs) + } + + // Return an error if the outpoint already exists in `toRemove` + toRemovePairsOfKey := uis.toRemove[key] + if _, ok := toRemovePairsOfKey[*outpoint]; ok { + return errors.Errorf("cannot remove outpoint %s because it's being removed already", outpoint) + } + + toRemovePairsOfKey[*outpoint] = utxoEntry + + log.Tracef("Removed outpoint %s:%d from scriptPublicKey %s", + outpoint.TransactionID, outpoint.Index, key) + return nil +} + +func (uis *utxoIndexStore) updateVirtualParents(virtualParents []*externalapi.DomainHash) { + uis.virtualParents = virtualParents +} + +func (uis *utxoIndexStore) discard() { + uis.toAdd = make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs) + uis.toRemove = make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs) + uis.virtualParents = nil +} + +func (uis *utxoIndexStore) commit() error { + onEnd := logger.LogAndMeasureExecutionTime(log, "utxoIndexStore.commit") + defer onEnd() + + dbTransaction, err := uis.database.Begin() + if err != nil { + return err + } + defer dbTransaction.RollbackUnlessClosed() + + toRemoveSompiSupply := uint64(0) + + for scriptPublicKeyString, toRemoveUTXOOutpointEntryPairs := range uis.toRemove { + scriptPublicKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString)) + bucket := uis.bucketForScriptPublicKey(scriptPublicKey) + for outpointToRemove, utxoEntryToRemove := range toRemoveUTXOOutpointEntryPairs { + key, err := uis.convertOutpointToKey(bucket, &outpointToRemove) + if err != nil { + return err + } + err = dbTransaction.Delete(key) + if err != nil { + return err + } + toRemoveSompiSupply = toRemoveSompiSupply + utxoEntryToRemove.Amount() + } + } + + toAddSompiSupply := uint64(0) + + for scriptPublicKeyString, toAddUTXOOutpointEntryPairs := range uis.toAdd { + scriptPublicKey := externalapi.NewScriptPublicKeyFromString(string(scriptPublicKeyString)) + bucket := uis.bucketForScriptPublicKey(scriptPublicKey) + for outpointToAdd, utxoEntryToAdd := range toAddUTXOOutpointEntryPairs { + key, err := uis.convertOutpointToKey(bucket, &outpointToAdd) + if err != nil { + return err + } + serializedUTXOEntry, err := serializeUTXOEntry(utxoEntryToAdd) + if err != nil { + return err + } + err = dbTransaction.Put(key, serializedUTXOEntry) + if err != nil { + return err + } + toAddSompiSupply = toAddSompiSupply + utxoEntryToAdd.Amount() + } + } + + serializeParentHashes := serializeHashes(uis.virtualParents) + err = dbTransaction.Put(virtualParentsKey, serializeParentHashes) + if err != nil { + return err + } + + err = uis.updateCirculatingSompiSupply(dbTransaction, toAddSompiSupply, toRemoveSompiSupply) + if err != nil { + return err + } + + err = dbTransaction.Commit() + if err != nil { + return err + } + + uis.discard() + return nil +} + +func (uis *utxoIndexStore) addAndCommitOutpointsWithoutTransaction(utxoPairs []*externalapi.OutpointAndUTXOEntryPair) error { + toAddSompiSupply := uint64(0) + for _, pair := range utxoPairs { + bucket := uis.bucketForScriptPublicKey(pair.UTXOEntry.ScriptPublicKey()) + key, err := uis.convertOutpointToKey(bucket, pair.Outpoint) + if err != nil { + return err + } + + serializedUTXOEntry, err := serializeUTXOEntry(pair.UTXOEntry) + if err != nil { + return err + } + + err = uis.database.Put(key, serializedUTXOEntry) + if err != nil { + return err + } + toAddSompiSupply = toAddSompiSupply + pair.UTXOEntry.Amount() + } + + err := uis.updateCirculatingSompiSupplyWithoutTransaction(toAddSompiSupply, uint64(0)) + if err != nil { + return err + } + + return nil +} + +func (uis *utxoIndexStore) updateAndCommitVirtualParentsWithoutTransaction(virtualParents []*externalapi.DomainHash) error { + serializeParentHashes := serializeHashes(virtualParents) + return uis.database.Put(virtualParentsKey, serializeParentHashes) +} + +func (uis *utxoIndexStore) bucketForScriptPublicKey(scriptPublicKey *externalapi.ScriptPublicKey) *database.Bucket { + var scriptPublicKeyBytes = make([]byte, 2+len(scriptPublicKey.Script)) // uint16 + binary.LittleEndian.PutUint16(scriptPublicKeyBytes[:2], scriptPublicKey.Version) + copy(scriptPublicKeyBytes[2:], scriptPublicKey.Script) + return utxoIndexBucket.Bucket(scriptPublicKeyBytes) +} + +func (uis *utxoIndexStore) convertOutpointToKey(bucket *database.Bucket, outpoint *externalapi.DomainOutpoint) (*database.Key, error) { + serializedOutpoint, err := serializeOutpoint(outpoint) + if err != nil { + return nil, err + } + return bucket.Key(serializedOutpoint), nil +} + +func (uis *utxoIndexStore) convertKeyToOutpoint(key *database.Key) (*externalapi.DomainOutpoint, error) { + serializedOutpoint := key.Suffix() + return deserializeOutpoint(serializedOutpoint) +} + +func (uis *utxoIndexStore) stagedData() ( + toAdd map[ScriptPublicKeyString]UTXOOutpointEntryPairs, + toRemove map[ScriptPublicKeyString]UTXOOutpointEntryPairs, + virtualParents []*externalapi.DomainHash) { + + toAddClone := make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs, len(uis.toAdd)) + for scriptPublicKeyString, toAddUTXOOutpointEntryPairs := range uis.toAdd { + toAddUTXOOutpointEntryPairsClone := make(UTXOOutpointEntryPairs, len(toAddUTXOOutpointEntryPairs)) + for outpoint, utxoEntry := range toAddUTXOOutpointEntryPairs { + toAddUTXOOutpointEntryPairsClone[outpoint] = utxoEntry + } + toAddClone[scriptPublicKeyString] = toAddUTXOOutpointEntryPairsClone + } + + toRemoveClone := make(map[ScriptPublicKeyString]UTXOOutpointEntryPairs, len(uis.toRemove)) + for scriptPublicKeyString, toRemoveUTXOOutpointEntryPairs := range uis.toRemove { + toRemoveUTXOOutpointEntryPairsClone := make(UTXOOutpointEntryPairs, len(toRemoveUTXOOutpointEntryPairs)) + for outpoint, utxoEntry := range toRemoveUTXOOutpointEntryPairs { + toRemoveUTXOOutpointEntryPairsClone[outpoint] = utxoEntry + } + toRemoveClone[scriptPublicKeyString] = toRemoveUTXOOutpointEntryPairsClone + } + + return toAddClone, toRemoveClone, uis.virtualParents +} + +func (uis *utxoIndexStore) isAnythingStaged() bool { + return len(uis.toAdd) > 0 || len(uis.toRemove) > 0 +} + +func (uis *utxoIndexStore) getUTXOOutpointEntryPairs(scriptPublicKey *externalapi.ScriptPublicKey) (UTXOOutpointEntryPairs, error) { + if uis.isAnythingStaged() { + return nil, errors.Errorf("cannot get utxo outpoint entry pairs while staging isn't empty") + } + + bucket := uis.bucketForScriptPublicKey(scriptPublicKey) + cursor, err := uis.database.Cursor(bucket) + if err != nil { + return nil, err + } + defer cursor.Close() + utxoOutpointEntryPairs := make(UTXOOutpointEntryPairs) + for cursor.Next() { + key, err := cursor.Key() + if err != nil { + return nil, err + } + outpoint, err := uis.convertKeyToOutpoint(key) + if err != nil { + return nil, err + } + serializedUTXOEntry, err := cursor.Value() + if err != nil { + return nil, err + } + utxoEntry, err := deserializeUTXOEntry(serializedUTXOEntry) + if err != nil { + return nil, err + } + utxoOutpointEntryPairs[*outpoint] = utxoEntry + } + return utxoOutpointEntryPairs, nil +} + +func (uis *utxoIndexStore) getVirtualParents() ([]*externalapi.DomainHash, error) { + if uis.isAnythingStaged() { + return nil, errors.Errorf("cannot get the virtual parents while staging isn't empty") + } + + serializedHashes, err := uis.database.Get(virtualParentsKey) + if err != nil { + return nil, err + } + + return deserializeHashes(serializedHashes) +} + +func (uis *utxoIndexStore) deleteAll() error { + // First we delete the virtual parents, so if anything goes wrong, the UTXO index will be marked as "not synced" + // and will be reset. + err := uis.database.Delete(virtualParentsKey) + if err != nil { + return err + } + + err = uis.database.Delete(circulatingSupplyKey) + if err != nil { + return err + } + + cursor, err := uis.database.Cursor(utxoIndexBucket) + if err != nil { + return err + } + defer cursor.Close() + for cursor.Next() { + key, err := cursor.Key() + if err != nil { + return err + } + + err = uis.database.Delete(key) + if err != nil { + return err + } + } + + return nil +} + +func (uis *utxoIndexStore) initializeCirculatingSompiSupply() error { + + cursor, err := uis.database.Cursor(utxoIndexBucket) + if err != nil { + return err + } + defer cursor.Close() + + circulatingSompiSupplyInDatabase := uint64(0) + for cursor.Next() { + serializedUTXOEntry, err := cursor.Value() + if err != nil { + return err + } + utxoEntry, err := deserializeUTXOEntry(serializedUTXOEntry) + if err != nil { + return err + } + + circulatingSompiSupplyInDatabase = circulatingSompiSupplyInDatabase + utxoEntry.Amount() + } + + err = uis.database.Put( + circulatingSupplyKey, + binaryserialization.SerializeUint64(circulatingSompiSupplyInDatabase), + ) + + if err != nil { + return err + } + + return nil +} + +func (uis *utxoIndexStore) updateCirculatingSompiSupply(dbTransaction database.Transaction, toAddSompiSupply uint64, toRemoveSompiSupply uint64) error { + if toAddSompiSupply != toRemoveSompiSupply { + circulatingSupplyBytes, err := dbTransaction.Get(circulatingSupplyKey) + if err != nil { + return err + } + + circulatingSupply, err := binaryserialization.DeserializeUint64(circulatingSupplyBytes) + if err != nil { + return err + } + err = dbTransaction.Put( + circulatingSupplyKey, + binaryserialization.SerializeUint64(circulatingSupply+toAddSompiSupply-toRemoveSompiSupply), + ) + if err != nil { + return err + } + } + return nil +} + +func (uis *utxoIndexStore) updateCirculatingSompiSupplyWithoutTransaction(toAddSompiSupply uint64, toRemoveSompiSupply uint64) error { + if toAddSompiSupply != toRemoveSompiSupply { + circulatingSupplyBytes, err := uis.database.Get(circulatingSupplyKey) + if err != nil { + return err + } + + circulatingSupply, err := binaryserialization.DeserializeUint64(circulatingSupplyBytes) + if err != nil { + return err + } + err = uis.database.Put( + circulatingSupplyKey, + binaryserialization.SerializeUint64(circulatingSupply+toAddSompiSupply-toRemoveSompiSupply), + ) + if err != nil { + return err + } + } + return nil +} + +func (uis *utxoIndexStore) getCirculatingSompiSupply() (uint64, error) { + if uis.isAnythingStaged() { + return 0, errors.Errorf("cannot get circulatingSupply while staging isn't empty") + } + circulatingSupply, err := uis.database.Get(circulatingSupplyKey) + if err != nil { + return 0, err + } + return binaryserialization.DeserializeUint64(circulatingSupply) +} diff --git a/domain/utxoindex/utxoindex.go b/domain/utxoindex/utxoindex.go new file mode 100644 index 0000000..2502d68 --- /dev/null +++ b/domain/utxoindex/utxoindex.go @@ -0,0 +1,210 @@ +package utxoindex + +import ( + "sync" + + "github.com/spectre-project/spectred/domain" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// UTXOIndex maintains an index between transaction scriptPublicKeys +// and UTXOs +type UTXOIndex struct { + domain domain.Domain + store *utxoIndexStore + + mutex sync.Mutex +} + +// New creates a new UTXO index. +// +// NOTE: While this is called no new blocks can be added to the consensus. +func New(domain domain.Domain, database database.Database) (*UTXOIndex, error) { + utxoIndex := &UTXOIndex{ + domain: domain, + store: newUTXOIndexStore(database), + } + isSynced, err := utxoIndex.isSynced() + if err != nil { + return nil, err + } + + ///Has check is for migration to circulating supply, can be removed eventually. + hasCirculatingSupplyKey, err := utxoIndex.store.database.Has(circulatingSupplyKey) + if err != nil { + return nil, err + } + + if !isSynced || !hasCirculatingSupplyKey { + + err := utxoIndex.Reset() + if err != nil { + return nil, err + } + } + + return utxoIndex, nil +} + +// Reset deletes the whole UTXO index and resyncs it from consensus. +func (ui *UTXOIndex) Reset() error { + ui.mutex.Lock() + defer ui.mutex.Unlock() + + log.Infof("Starting UTXO index reset") + + err := ui.store.deleteAll() + if err != nil { + return err + } + + virtualInfo, err := ui.domain.Consensus().GetVirtualInfo() + if err != nil { + return err + } + + err = ui.store.initializeCirculatingSompiSupply() //At this point the database is empty, so the sole purpose of this call is to initialize the circulating supply key + if err != nil { + return err + } + + var fromOutpoint *externalapi.DomainOutpoint + for { + const step = 1000 + virtualUTXOs, err := ui.domain.Consensus().GetVirtualUTXOs(virtualInfo.ParentHashes, fromOutpoint, step) + if err != nil { + return err + } + + err = ui.store.addAndCommitOutpointsWithoutTransaction(virtualUTXOs) + if err != nil { + return err + } + + if len(virtualUTXOs) < step { + break + } + + fromOutpoint = virtualUTXOs[len(virtualUTXOs)-1].Outpoint + } + + // This has to be done last to mark that the reset went smoothly and no reset has to be called next time. + err = ui.store.updateAndCommitVirtualParentsWithoutTransaction(virtualInfo.ParentHashes) + if err != nil { + return err + } + + log.Infof("Finished UTXO index reset") + return nil +} + +func (ui *UTXOIndex) isSynced() (bool, error) { + utxoIndexVirtualParents, err := ui.store.getVirtualParents() + if err != nil { + if database.IsNotFoundError(err) { + return false, nil + } + return false, err + } + + virtualInfo, err := ui.domain.Consensus().GetVirtualInfo() + if err != nil { + return false, err + } + + return externalapi.HashesEqual(virtualInfo.ParentHashes, utxoIndexVirtualParents), nil +} + +// Update updates the UTXO index with the given DAG selected parent chain changes +func (ui *UTXOIndex) Update(virtualChangeSet *externalapi.VirtualChangeSet) (*UTXOChanges, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "UTXOIndex.Update") + defer onEnd() + + ui.mutex.Lock() + defer ui.mutex.Unlock() + + log.Tracef("Updating UTXO index with VirtualUTXODiff: %+v", virtualChangeSet.VirtualUTXODiff) + err := ui.removeUTXOs(virtualChangeSet.VirtualUTXODiff.ToRemove()) + if err != nil { + return nil, err + } + + err = ui.addUTXOs(virtualChangeSet.VirtualUTXODiff.ToAdd()) + if err != nil { + return nil, err + } + + ui.store.updateVirtualParents(virtualChangeSet.VirtualParents) + + added, removed, _ := ui.store.stagedData() + utxoIndexChanges := &UTXOChanges{ + Added: added, + Removed: removed, + } + + err = ui.store.commit() + if err != nil { + return nil, err + } + + log.Tracef("UTXO index updated with the UTXOChanged: %+v", utxoIndexChanges) + return utxoIndexChanges, nil +} + +func (ui *UTXOIndex) addUTXOs(toAdd externalapi.UTXOCollection) error { + iterator := toAdd.Iterator() + defer iterator.Close() + for ok := iterator.First(); ok; ok = iterator.Next() { + outpoint, entry, err := iterator.Get() + if err != nil { + return err + } + + log.Tracef("Adding outpoint %s to UTXO index", outpoint) + err = ui.store.add(entry.ScriptPublicKey(), outpoint, entry) + if err != nil { + return err + } + } + return nil +} + +func (ui *UTXOIndex) removeUTXOs(toRemove externalapi.UTXOCollection) error { + iterator := toRemove.Iterator() + defer iterator.Close() + for ok := iterator.First(); ok; ok = iterator.Next() { + outpoint, entry, err := iterator.Get() + if err != nil { + return err + } + + log.Tracef("Removing outpoint %s from UTXO index", outpoint) + err = ui.store.remove(entry.ScriptPublicKey(), outpoint, entry) + if err != nil { + return err + } + } + return nil +} + +// UTXOs returns all the UTXOs for the given scriptPublicKey +func (ui *UTXOIndex) UTXOs(scriptPublicKey *externalapi.ScriptPublicKey) (UTXOOutpointEntryPairs, error) { + onEnd := logger.LogAndMeasureExecutionTime(log, "UTXOIndex.UTXOs") + defer onEnd() + + ui.mutex.Lock() + defer ui.mutex.Unlock() + + return ui.store.getUTXOOutpointEntryPairs(scriptPublicKey) +} + +// GetCirculatingSompiSupply returns the current circulating supply of sompis in the network +func (ui *UTXOIndex) GetCirculatingSompiSupply() (uint64, error) { + + ui.mutex.Lock() + defer ui.mutex.Unlock() + + return ui.store.getCirculatingSompiSupply() +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1b86d9e --- /dev/null +++ b/go.mod @@ -0,0 +1,35 @@ +module github.com/spectre-project/spectred + +go 1.18 + +require ( + github.com/btcsuite/btcutil v1.0.2 + github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd + github.com/btcsuite/winsvc v1.0.0 + github.com/davecgh/go-spew v1.1.1 + github.com/gofrs/flock v0.8.1 + github.com/golang/protobuf v1.5.3 + github.com/jessevdk/go-flags v1.5.0 + github.com/jrick/logrotate v1.0.0 + github.com/spectre-project/go-muhash v0.0.1 + github.com/spectre-project/go-secp256k1 v0.0.1 + github.com/pkg/errors v0.9.1 + github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d + github.com/tyler-smith/go-bip39 v1.1.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/term v0.17.0 + google.golang.org/grpc v1.61.1 + google.golang.org/protobuf v1.32.0 +) + +require ( + github.com/golang/snappy v0.0.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..71c3be1 --- /dev/null +++ b/go.sum @@ -0,0 +1,215 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/spectre-project/go-muhash v0.0.1 h1:UxikIJ+NL/9TVqvqexVBoQkkrnmTzSeiJocMA48R764= +github.com/spectre-project/go-muhash v0.0.1/go.mod h1:KprvUX7HcxTrhCY21bSRyufUKB2LRdX3syPOpVAbSJI= +github.com/spectre-project/go-secp256k1 v0.0.1 h1:dWznplP1UtUFajy9HGqzywb7v7mHHZ4WVEZMwoqJ0+s= +github.com/spectre-project/go-secp256k1 v0.0.1/go.mod h1:fuNi550cMAuNz6YnhmnH0zJ8Ro6OZiKhpItEUWE7HCQ= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= +github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210317152858-513c2a44f670/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd h1:zVFyTKZN/Q7mNRWSs1GOYnHM9NiFSJ54YVRsD0rNWT4= +golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/infrastructure/config/config.go b/infrastructure/config/config.go new file mode 100644 index 0000000..830663f --- /dev/null +++ b/infrastructure/config/config.go @@ -0,0 +1,597 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package config + +import ( + // _ "embed" is necessary for the go:embed feature. + _ "embed" + "fmt" + "net" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/btcsuite/go-socks/socks" + "github.com/jessevdk/go-flags" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/util/network" + "github.com/spectre-project/spectred/version" +) + +const ( + defaultConfigFilename = "spectred.conf" + defaultLogLevel = "info" + defaultLogDirname = "logs" + defaultLogFilename = "spectred.log" + defaultErrLogFilename = "spectred_err.log" + defaultTargetOutboundPeers = 8 + defaultMaxInboundPeers = 117 + defaultBanDuration = time.Hour * 24 + defaultBanThreshold = 100 + //DefaultConnectTimeout is the default connection timeout when dialing + DefaultConnectTimeout = time.Second * 30 + //DefaultMaxRPCClients is the default max number of RPC clients + DefaultMaxRPCClients = 128 + defaultMaxRPCWebsockets = 25 + defaultMaxRPCConcurrentReqs = 20 + defaultBlockMaxMass = 10_000_000 + blockMaxMassMin = 1000 + blockMaxMassMax = 10_000_000 + defaultMinRelayTxFee = 1e-5 // 1 sompi per byte + defaultMaxOrphanTransactions = 100 + //DefaultMaxOrphanTxSize is the default maximum size for an orphan transaction + DefaultMaxOrphanTxSize = 100_000 + defaultSigCacheMaxSize = 100_000 + sampleConfigFilename = "sample-spectred.conf" + defaultMaxUTXOCacheSize = 5_000_000_000 + defaultProtocolVersion = 5 +) + +var ( + // DefaultAppDir is the default home directory for spectred. + DefaultAppDir = util.AppDir("spectred", false) + + defaultConfigFile = filepath.Join(DefaultAppDir, defaultConfigFilename) + defaultDataDir = filepath.Join(DefaultAppDir) + defaultRPCKeyFile = filepath.Join(DefaultAppDir, "rpc.key") + defaultRPCCertFile = filepath.Join(DefaultAppDir, "rpc.cert") +) + +//go:embed sample-spectred.conf +var sampleConfig string + +// RunServiceCommand is only set to a real function on Windows. It is used +// to parse and execute service commands specified via the -s flag. +var RunServiceCommand func(string) error + +// Flags defines the configuration options for spectred. +// +// See loadConfig for details on the configuration load process. +type Flags struct { + ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` + ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` + AppDir string `short:"b" long:"appdir" description:"Directory to store data"` + LogDir string `long:"logdir" description:"Directory to log output."` + AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"` + ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` + DisableListen bool `long:"nolisten" description:"Disable listening for incoming connections -- NOTE: Listening is automatically disabled if the --connect or --proxy options are used without also specifying listen interfaces via --listen"` + Listeners []string `long:"listen" description:"Add an interface/port to listen for connections (default all interfaces port: 18111, testnet: 18211)"` + TargetOutboundPeers int `long:"outpeers" description:"Target number of outbound peers"` + MaxInboundPeers int `long:"maxinpeers" description:"Max number of inbound peers"` + EnableBanning bool `long:"enablebanning" description:"Enable banning of misbehaving peers"` + BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` + BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` + Whitelists []string `long:"whitelist" description:"Add an IP network or IP that will not be banned. (eg. 192.168.1.0/24 or ::1)"` + RPCListeners []string `long:"rpclisten" description:"Add an interface/port to listen for RPC connections (default port: 18110, testnet: 18210)"` + RPCCert string `long:"rpccert" description:"File containing the certificate file"` + RPCKey string `long:"rpckey" description:"File containing the certificate key"` + RPCMaxClients int `long:"rpcmaxclients" description:"Max number of RPC clients for standard connections"` + RPCMaxWebsockets int `long:"rpcmaxwebsockets" description:"Max number of RPC websocket connections"` + RPCMaxConcurrentReqs int `long:"rpcmaxconcurrentreqs" description:"Max number of concurrent RPC requests that may be processed concurrently"` + DisableRPC bool `long:"norpc" description:"Disable built-in RPC server"` + SafeRPC bool `long:"saferpc" description:"Disable RPC commands which affect the state of the node"` + DisableDNSSeed bool `long:"nodnsseed" description:"Disable DNS seeding for peers"` + DNSSeed string `long:"dnsseed" description:"Override DNS seeds with specified hostname (Only 1 hostname allowed)"` + GRPCSeed string `long:"grpcseed" description:"Hostname of gRPC server for seeding peers"` + ExternalIPs []string `long:"externalip" description:"Add an ip to the list of local addresses we claim to listen on to peers"` + Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"` + ProxyUser string `long:"proxyuser" description:"Username for proxy server"` + ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"` + DbType string `long:"dbtype" description:"Database backend to use for the Block DAG"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + LogLevel string `short:"d" long:"loglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` + Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"` + MinRelayTxFee float64 `long:"minrelaytxfee" description:"The minimum transaction fee in SPR/kB to be considered a non-zero fee."` + MaxOrphanTxs uint64 `long:"maxorphantx" description:"Max number of orphan transactions to keep in memory"` + BlockMaxMass uint64 `long:"blockmaxmass" description:"Maximum transaction mass to be used when creating a block"` + UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."` + NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"` + SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"` + BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."` + RelayNonStd bool `long:"relaynonstd" description:"Relay non-standard transactions regardless of the default settings for the active network."` + RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."` + ResetDatabase bool `long:"reset-db" description:"Reset database before starting node. It's needed when switching between subnetworks."` + MaxUTXOCacheSize uint64 `long:"maxutxocachesize" description:"Max size of loaded UTXO into ram from the disk in bytes"` + UTXOIndex bool `long:"utxoindex" description:"Enable the UTXO index"` + IsArchivalNode bool `long:"archival" description:"Run as an archival node: don't delete old block data when moving the pruning point (Warning: heavy disk usage)'"` + AllowSubmitBlockWhenNotSynced bool `long:"allow-submit-block-when-not-synced" hidden:"true" description:"Allow the node to accept blocks from RPC while not synced (this flag is mainly used for testing)"` + EnableSanityCheckPruningUTXOSet bool `long:"enable-sanity-check-pruning-utxo" hidden:"true" description:"When moving the pruning point - check that the utxo set matches the utxo commitment"` + ProtocolVersion uint32 `long:"protocol-version" description:"Use non default p2p protocol version"` + NetworkFlags + ServiceOptions *ServiceOptions +} + +// Config defines the configuration options for spectred. +// +// See loadConfig for details on the configuration load process. +type Config struct { + *Flags + Lookup func(string) ([]net.IP, error) + Dial func(string, string, time.Duration) (net.Conn, error) + MiningAddrs []util.Address + MinRelayTxFee util.Amount + Whitelists []*net.IPNet + SubnetworkID *externalapi.DomainSubnetworkID // nil in full nodes +} + +// ServiceOptions defines the configuration options for the daemon as a service on +// Windows. +type ServiceOptions struct { + ServiceCommand string `short:"s" long:"service" description:"Service command {install, remove, start, stop}"` +} + +// cleanAndExpandPath expands environment variables and leading ~ in the +// passed path, cleans the result, and returns it. +func cleanAndExpandPath(path string) string { + // Expand initial ~ to OS specific home directory. + if strings.HasPrefix(path, "~") { + homeDir := filepath.Dir(DefaultAppDir) + path = strings.Replace(path, "~", homeDir, 1) + } + + // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, + // but they variables can still be expanded via POSIX-style $VARIABLE. + return filepath.Clean(os.ExpandEnv(path)) +} + +// newConfigParser returns a new command line flags parser. +func newConfigParser(cfgFlags *Flags, options flags.Options) *flags.Parser { + parser := flags.NewParser(cfgFlags, options) + if runtime.GOOS == "windows" { + parser.AddGroup("Service Options", "Service Options", cfgFlags.ServiceOptions) + } + return parser +} + +func defaultFlags() *Flags { + return &Flags{ + ConfigFile: defaultConfigFile, + LogLevel: defaultLogLevel, + TargetOutboundPeers: defaultTargetOutboundPeers, + MaxInboundPeers: defaultMaxInboundPeers, + BanDuration: defaultBanDuration, + BanThreshold: defaultBanThreshold, + RPCMaxClients: DefaultMaxRPCClients, + RPCMaxWebsockets: defaultMaxRPCWebsockets, + RPCMaxConcurrentReqs: defaultMaxRPCConcurrentReqs, + AppDir: defaultDataDir, + RPCKey: defaultRPCKeyFile, + RPCCert: defaultRPCCertFile, + BlockMaxMass: defaultBlockMaxMass, + MaxOrphanTxs: defaultMaxOrphanTransactions, + SigCacheMaxSize: defaultSigCacheMaxSize, + MinRelayTxFee: defaultMinRelayTxFee, + MaxUTXOCacheSize: defaultMaxUTXOCacheSize, + ServiceOptions: &ServiceOptions{}, + ProtocolVersion: defaultProtocolVersion, + } +} + +// DefaultConfig returns the default spectred configuration +func DefaultConfig() *Config { + config := &Config{Flags: defaultFlags()} + config.NetworkFlags.ActiveNetParams = &dagconfig.MainnetParams + return config +} + +// LoadConfig initializes and parses the config using a config file and command +// line options. +// +// The configuration proceeds as follows: +// 1. Start with a default config with sane settings +// 2. Pre-parse the command line to check for an alternative config file +// 3. Load configuration file overwriting defaults with any specified options +// 4. Parse CLI options and overwrite/add any specified options +// +// The above results in spectred functioning properly without any config settings +// while still allowing the user to override settings with config files and +// command line options. Command line options always take precedence. +func LoadConfig() (*Config, error) { + cfgFlags := defaultFlags() + + // Pre-parse the command line options to see if an alternative config + // file or the version flag was specified. Any errors aside from the + // help message error can be ignored here since they will be caught by + // the final parse below. + preCfg := cfgFlags + preParser := newConfigParser(preCfg, flags.HelpFlag) + _, err := preParser.Parse() + if err != nil { + var flagsErr *flags.Error + if ok := errors.As(err, &flagsErr); ok && flagsErr.Type == flags.ErrHelp { + return nil, err + } + } + + appName := filepath.Base(os.Args[0]) + appName = strings.TrimSuffix(appName, filepath.Ext(appName)) + usageMessage := fmt.Sprintf("Use %s -h to show usage", appName) + + // Show the version and exit if the version flag was specified. + if preCfg.ShowVersion { + fmt.Println(appName, "version", version.Version()) + os.Exit(0) + } + + // Load additional config from file. + var configFileError error + parser := newConfigParser(cfgFlags, flags.Default) + cfg := &Config{ + Flags: cfgFlags, + } + if !preCfg.Simnet || preCfg.ConfigFile != defaultConfigFile { + if _, err := os.Stat(preCfg.ConfigFile); os.IsNotExist(err) { + err := createDefaultConfigFile(preCfg.ConfigFile) + if err != nil { + return nil, errors.Wrap(err, "Error creating a default config file") + } + } + + err := flags.NewIniParser(parser).ParseFile(preCfg.ConfigFile) + if err != nil { + if pErr := &(os.PathError{}); !errors.As(err, &pErr) { + return nil, errors.Wrapf(err, "Error parsing config file: %s\n\n%s", err, usageMessage) + } + configFileError = err + } + } + + // Parse command line options again to ensure they take precedence. + _, err = parser.Parse() + if err != nil { + var flagsErr *flags.Error + if ok := errors.As(err, &flagsErr); !ok || flagsErr.Type != flags.ErrHelp { + return nil, errors.Wrapf(err, "Error parsing command line arguments: %s\n\n%s", err, usageMessage) + } + return nil, err + } + + // Create the home directory if it doesn't already exist. + funcName := "loadConfig" + err = os.MkdirAll(DefaultAppDir, 0700) + if err != nil { + // Show a nicer error message if it's because a symlink is + // linked to a directory that does not exist (probably because + // it's not mounted). + var e *os.PathError + if ok := errors.As(err, &e); ok && os.IsExist(err) { + if link, lerr := os.Readlink(e.Path); lerr == nil { + str := "is symlink %s -> %s mounted?" + err = errors.Errorf(str, e.Path, link) + } + } + + str := "%s: Failed to create home directory: %s" + err := errors.Errorf(str, funcName, err) + return nil, err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return nil, err + } + + // Set the default policy for relaying non-standard transactions + // according to the default of the active network. The set + // configuration value takes precedence over the default value for the + // selected network. + relayNonStd := cfg.NetParams().RelayNonStdTxs + switch { + case cfg.RelayNonStd && cfg.RejectNonStd: + str := "%s: rejectnonstd and relaynonstd cannot be used " + + "together -- choose only one" + err := errors.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + case cfg.RejectNonStd: + relayNonStd = false + case cfg.RelayNonStd: + relayNonStd = true + } + cfg.RelayNonStd = relayNonStd + + cfg.AppDir = cleanAndExpandPath(cfg.AppDir) + // Append the network type to the app directory so it is "namespaced" + // per network. + // All data is specific to a network, so namespacing the data directory + // means each individual piece of serialized data does not have to + // worry about changing names per network and such. + cfg.AppDir = filepath.Join(cfg.AppDir, cfg.NetParams().Name) + + // Logs directory is usually under the home directory, unless otherwise specified + if cfg.LogDir == "" { + cfg.LogDir = filepath.Join(cfg.AppDir, defaultLogDirname) + } + cfg.LogDir = cleanAndExpandPath(cfg.LogDir) + + // Special show command to list supported subsystems and exit. + if cfg.LogLevel == "show" { + fmt.Println("Supported subsystems", logger.SupportedSubsystems()) + os.Exit(0) + } + + // Initialize log rotation. After log rotation has been initialized, the + // logger variables may be used. + logger.InitLog(filepath.Join(cfg.LogDir, defaultLogFilename), filepath.Join(cfg.LogDir, defaultErrLogFilename)) + + // Parse, validate, and set debug log level(s). + if err := logger.ParseAndSetLogLevels(cfg.LogLevel); err != nil { + err := errors.Errorf("%s: %s", funcName, err.Error()) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Validate profile port number + if cfg.Profile != "" { + profilePort, err := strconv.Atoi(cfg.Profile) + if err != nil || profilePort < 1024 || profilePort > 65535 { + str := "%s: The profile port must be between 1024 and 65535" + err := errors.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + } + + // Don't allow ban durations that are too short. + if cfg.BanDuration < time.Second { + str := "%s: The banduration option may not be less than 1s -- parsed [%s]" + err := errors.Errorf(str, funcName, cfg.BanDuration) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Validate any given whitelisted IP addresses and networks. + if len(cfg.Whitelists) > 0 { + var ip net.IP + cfg.Whitelists = make([]*net.IPNet, 0, len(cfg.Flags.Whitelists)) + + for _, addr := range cfg.Flags.Whitelists { + _, ipnet, err := net.ParseCIDR(addr) + if err != nil { + ip = net.ParseIP(addr) + if ip == nil { + str := "%s: The whitelist value of '%s' is invalid" + err = errors.Errorf(str, funcName, addr) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + var bits int + if ip.To4() == nil { + // IPv6 + bits = 128 + } else { + bits = 32 + } + ipnet = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(bits, bits), + } + } + cfg.Whitelists = append(cfg.Whitelists, ipnet) + } + } + + // --addPeer and --connect do not mix. + if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 { + str := "%s: the --addpeer and --connect options can not be " + + "mixed" + err := errors.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // --proxy or --connect without --listen disables listening. + if (cfg.Proxy != "" || len(cfg.ConnectPeers) > 0) && + len(cfg.Listeners) == 0 { + cfg.DisableListen = true + } + + // ConnectPeers means no DNS seeding and no outbound peers + if len(cfg.ConnectPeers) > 0 { + cfg.DisableDNSSeed = true + cfg.TargetOutboundPeers = 0 + } + + // Add the default listener if none were specified. The default + // listener is all addresses on the listen port for the network + // we are to connect to. + if len(cfg.Listeners) == 0 { + cfg.Listeners = []string{ + net.JoinHostPort("", cfg.NetParams().DefaultPort), + } + } + + if cfg.DisableRPC { + log.Infof("RPC service is disabled") + } + + // Add the default RPC listener if none were specified. The default + // RPC listener is all addresses on the RPC listen port for the + // network we are to connect to. + if !cfg.DisableRPC && len(cfg.RPCListeners) == 0 { + cfg.RPCListeners = []string{ + net.JoinHostPort("", cfg.NetParams().RPCPort), + } + } + + if cfg.RPCMaxConcurrentReqs < 0 { + str := "%s: The rpcmaxwebsocketconcurrentrequests option may " + + "not be less than 0 -- parsed [%d]" + err := errors.Errorf(str, funcName, cfg.RPCMaxConcurrentReqs) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Validate the the minrelaytxfee. + cfg.MinRelayTxFee, err = util.NewAmount(cfg.Flags.MinRelayTxFee) + if err != nil { + str := "%s: invalid minrelaytxfee: %s" + err := errors.Errorf(str, funcName, err) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Disallow 0 and negative min tx fees. + if cfg.MinRelayTxFee == 0 { + str := "%s: The minrelaytxfee option must be greater than 0 -- parsed [%d]" + err := errors.Errorf(str, funcName, cfg.MinRelayTxFee) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Limit the max block mass to a sane value. + if cfg.BlockMaxMass < blockMaxMassMin || cfg.BlockMaxMass > + blockMaxMassMax { + + str := "%s: The blockmaxmass option must be in between %d " + + "and %d -- parsed [%d]" + err := errors.Errorf(str, funcName, blockMaxMassMin, + blockMaxMassMax, cfg.BlockMaxMass) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Look for illegal characters in the user agent comments. + for _, uaComment := range cfg.UserAgentComments { + if strings.ContainsAny(uaComment, "/:()") { + err := errors.Errorf("%s: The following characters must not "+ + "appear in user agent comments: '/', ':', '(', ')'", + funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + } + + // Add default port to all listener addresses if needed and remove + // duplicate addresses. + cfg.Listeners, err = network.NormalizeAddresses(cfg.Listeners, + cfg.NetParams().DefaultPort) + if err != nil { + return nil, err + } + + // Add default port to all rpc listener addresses if needed and remove + // duplicate addresses. + cfg.RPCListeners, err = network.NormalizeAddresses(cfg.RPCListeners, + cfg.NetParams().RPCPort) + if err != nil { + return nil, err + } + + // Disallow --addpeer and --connect used together + if len(cfg.AddPeers) > 0 && len(cfg.ConnectPeers) > 0 { + str := "%s: --addpeer and --connect can not be used together" + err := errors.Errorf(str, funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + // Add default port to all added peer addresses if needed and remove + // duplicate addresses. + cfg.AddPeers, err = network.NormalizeAddresses(cfg.AddPeers, + cfg.NetParams().DefaultPort) + if err != nil { + return nil, err + } + + cfg.ConnectPeers, err = network.NormalizeAddresses(cfg.ConnectPeers, + cfg.NetParams().DefaultPort) + if err != nil { + return nil, err + } + + // Setup dial and DNS resolution (lookup) functions depending on the + // specified options. The default is to use the standard + // net.DialTimeout function as well as the system DNS resolver. When a + // proxy is specified, the dial function is set to the proxy specific + // dial function. + cfg.Dial = net.DialTimeout + cfg.Lookup = net.LookupIP + if cfg.Proxy != "" { + _, _, err := net.SplitHostPort(cfg.Proxy) + if err != nil { + str := "%s: Proxy address '%s' is invalid: %s" + err := errors.Errorf(str, funcName, cfg.Proxy, err) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, err + } + + proxy := &socks.Proxy{ + Addr: cfg.Proxy, + Username: cfg.ProxyUser, + Password: cfg.ProxyPass, + } + cfg.Dial = proxy.DialTimeout + } + + // Warn about missing config file only after all other configuration is + // done. This prevents the warning on help messages and invalid + // options. Note this should go directly before the return. + if configFileError != nil { + log.Warnf("%s", configFileError) + } + return cfg, nil +} + +// createDefaultConfig copies the file sample-spectred.conf to the given destination path, +// and populates it with some randomly generated RPC username and password. +func createDefaultConfigFile(destinationPath string) error { + // Create the destination directory if it does not exists + err := os.MkdirAll(filepath.Dir(destinationPath), 0700) + if err != nil { + return err + } + + dest, err := os.OpenFile(destinationPath, + os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer dest.Close() + + _, err = dest.WriteString(sampleConfig) + + return err +} diff --git a/infrastructure/config/config_test.go b/infrastructure/config/config_test.go new file mode 100644 index 0000000..7f12643 --- /dev/null +++ b/infrastructure/config/config_test.go @@ -0,0 +1,72 @@ +package config + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func TestCreateDefaultConfigFile(t *testing.T) { + // find out where the sample config lives + _, path, _, ok := runtime.Caller(0) + if !ok { + t.Fatalf("Failed finding config file path") + } + sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-spectred.conf") + + // Setup a temporary directory + tmpDir, err := ioutil.TempDir("", "spectred") + if err != nil { + t.Fatalf("Failed creating a temporary directory: %v", err) + } + testpath := filepath.Join(tmpDir, "test.conf") + + // copy config file to location of spectred binary + data, err := ioutil.ReadFile(sampleConfigFile) + if err != nil { + t.Fatalf("Failed reading sample config file: %v", err) + } + appPath, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + t.Fatalf("Failed obtaining app path: %v", err) + } + tmpConfigFile := filepath.Join(appPath, "sample-spectred.conf") + err = ioutil.WriteFile(tmpConfigFile, data, 0644) + if err != nil { + t.Fatalf("Failed copying sample config file: %v", err) + } + + // Clean-up + defer func() { + os.Remove(testpath) + os.Remove(tmpConfigFile) + os.Remove(tmpDir) + }() + + err = createDefaultConfigFile(testpath) + if err != nil { + t.Fatalf("Failed to create a default config file: %v", err) + } +} + +// TestConstants makes sure that all constants hard-coded into the help text were not modified. +func TestConstants(t *testing.T) { + zero := externalapi.DomainSubnetworkID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + if subnetworks.SubnetworkIDNative != zero { + t.Errorf("subnetworks.SubnetworkIDNative value was changed from 0, therefore you probably need to update the help text for SubnetworkID") + } + one := externalapi.DomainSubnetworkID{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + if subnetworks.SubnetworkIDCoinbase != one { + t.Errorf("subnetworks.SubnetworkIDCoinbase value was changed from 1, therefore you probably need to update the help text for SubnetworkID") + } + two := externalapi.DomainSubnetworkID{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + if subnetworks.SubnetworkIDRegistry != two { + t.Errorf("subnetworks.SubnetworkIDRegistry value was changed from 2, therefore you probably need to update the help text for SubnetworkID") + } +} diff --git a/infrastructure/config/log.go b/infrastructure/config/log.go new file mode 100644 index 0000000..8476f20 --- /dev/null +++ b/infrastructure/config/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package config + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("CNFG") diff --git a/infrastructure/config/network.go b/infrastructure/config/network.go new file mode 100644 index 0000000..7e38d82 --- /dev/null +++ b/infrastructure/config/network.go @@ -0,0 +1,218 @@ +package config + +import ( + "encoding/json" + "fmt" + "math/big" + "os" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/jessevdk/go-flags" + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util/difficulty" +) + +// NetworkFlags holds the network configuration, that is which network is selected. +type NetworkFlags struct { + Testnet bool `long:"testnet" description:"Use the test network"` + Simnet bool `long:"simnet" description:"Use the simulation test network"` + Devnet bool `long:"devnet" description:"Use the development test network"` + OverrideDAGParamsFile string `long:"override-dag-params-file" description:"Overrides DAG params (allowed only on devnet)"` + + ActiveNetParams *dagconfig.Params +} + +type overrideDAGParamsConfig struct { + K *externalapi.KType `json:"k"` + MaxBlockParents *externalapi.KType `json:"maxBlockParents"` + MergeSetSizeLimit *uint64 `json:"mergeSetSizeLimit"` + MaxBlockMass *uint64 `json:"maxBlockMass"` + MaxCoinbasePayloadLength *uint64 `json:"maxCoinbasePayloadLength"` + MassPerTxByte *uint64 `json:"massPerTxByte"` + MassPerScriptPubKeyByte *uint64 `json:"massPerScriptPubKeyByte"` + MassPerSigOp *uint64 `json:"massPerSigOp"` + CoinbasePayloadScriptPublicKeyMaxLength *uint8 `json:"coinbasePayloadScriptPublicKeyMaxLength"` + PowMax *string `json:"powMax"` + BlockCoinbaseMaturity *uint64 `json:"blockCoinbaseMaturity"` + SubsidyGenesisReward *uint64 `json:"subsidyGenesisReward"` + SubsidyPastRewardMultiplier *float64 `json:"subsidyPastRewardMultiplier"` + SubsidyMergeSetRewardMultiplier *float64 `json:"subsidyMergeSetRewardMultiplier"` + TargetTimePerBlockInMilliSeconds *int64 `json:"targetTimePerBlockInMilliSeconds"` + FinalityDuration *int64 `json:"finalityDuration"` + TimestampDeviationTolerance *int `json:"timestampDeviationTolerance"` + DifficultyAdjustmentWindowSize *int `json:"difficultyAdjustmentWindowSize"` + RelayNonStdTxs *bool `json:"relayNonStdTxs"` + AcceptUnroutable *bool `json:"acceptUnroutable"` + EnableNonNativeSubnetworks *bool `json:"enableNonNativeSubnetworks"` + DisableDifficultyAdjustment *bool `json:"disableDifficultyAdjustment"` + SkipProofOfWork *bool `json:"skipProofOfWork"` + HardForkOmitGenesisFromParentsDAAScore *uint64 `json:"hardForkOmitGenesisFromParentsDaaScore"` +} + +// ResolveNetwork parses the network command line argument and sets NetParams accordingly. +// It returns error if more than one network was selected, nil otherwise. +func (networkFlags *NetworkFlags) ResolveNetwork(parser *flags.Parser) error { + //NetParams holds the selected network parameters. Default value is main-net. + networkFlags.ActiveNetParams = &dagconfig.MainnetParams + // Multiple networks can't be selected simultaneously. + numNets := 0 + // default net is main net + // Count number of network flags passed; assign active network params + // while we're at it + if networkFlags.Testnet { + numNets++ + networkFlags.ActiveNetParams = &dagconfig.TestnetParams + } + if networkFlags.Simnet { + numNets++ + networkFlags.ActiveNetParams = &dagconfig.SimnetParams + } + if networkFlags.Devnet { + numNets++ + networkFlags.ActiveNetParams = &dagconfig.DevnetParams + } + if numNets > 1 { + message := "Multiple networks parameters (testnet, simnet, devnet, etc.) cannot be used" + + "together. Please choose only one network" + err := errors.Errorf(message) + fmt.Fprintln(os.Stderr, err) + parser.WriteHelp(os.Stderr) + return err + } + + err := networkFlags.overrideDAGParams() + if err != nil { + return err + } + + return nil +} + +// NetParams returns the ActiveNetParams +func (networkFlags *NetworkFlags) NetParams() *dagconfig.Params { + return networkFlags.ActiveNetParams +} + +func (networkFlags *NetworkFlags) overrideDAGParams() error { + + if networkFlags.OverrideDAGParamsFile == "" { + return nil + } + + if !networkFlags.Devnet { + return errors.Errorf("override-dag-params-file is allowed only when using devnet") + } + + overrideDAGParamsFile, err := os.Open(networkFlags.OverrideDAGParamsFile) + if err != nil { + return err + } + defer overrideDAGParamsFile.Close() + + decoder := json.NewDecoder(overrideDAGParamsFile) + config := &overrideDAGParamsConfig{} + err = decoder.Decode(config) + if err != nil { + return err + } + + if config.K != nil { + networkFlags.ActiveNetParams.K = *config.K + } + + if config.MaxBlockParents != nil { + networkFlags.ActiveNetParams.MaxBlockParents = *config.MaxBlockParents + } + + if config.MergeSetSizeLimit != nil { + networkFlags.ActiveNetParams.MergeSetSizeLimit = *config.MergeSetSizeLimit + } + + if config.MaxBlockMass != nil { + networkFlags.ActiveNetParams.MaxBlockMass = *config.MaxBlockMass + } + + if config.MaxCoinbasePayloadLength != nil { + networkFlags.ActiveNetParams.MaxCoinbasePayloadLength = *config.MaxCoinbasePayloadLength + } + + if config.MassPerTxByte != nil { + networkFlags.ActiveNetParams.MassPerTxByte = *config.MassPerTxByte + } + + if config.MassPerScriptPubKeyByte != nil { + networkFlags.ActiveNetParams.MassPerScriptPubKeyByte = *config.MassPerScriptPubKeyByte + } + + if config.MassPerSigOp != nil { + networkFlags.ActiveNetParams.MassPerSigOp = *config.MassPerSigOp + } + + if config.CoinbasePayloadScriptPublicKeyMaxLength != nil { + networkFlags.ActiveNetParams.CoinbasePayloadScriptPublicKeyMaxLength = *config.CoinbasePayloadScriptPublicKeyMaxLength + } + + if config.PowMax != nil { + powMax, ok := big.NewInt(0).SetString(*config.PowMax, 16) + if !ok { + return errors.Errorf("couldn't convert %s to big int", *config.PowMax) + } + + genesisTarget := difficulty.CompactToBig(networkFlags.ActiveNetParams.GenesisBlock.Header.Bits()) + if powMax.Cmp(genesisTarget) > 0 { + return errors.Errorf("powMax (%s) is smaller than genesis's target (%s)", powMax.Text(16), + genesisTarget.Text(16)) + } + networkFlags.ActiveNetParams.PowMax = powMax + } + + if config.BlockCoinbaseMaturity != nil { + networkFlags.ActiveNetParams.BlockCoinbaseMaturity = *config.BlockCoinbaseMaturity + } + + if config.SubsidyGenesisReward != nil { + networkFlags.ActiveNetParams.SubsidyGenesisReward = *config.SubsidyGenesisReward + } + + if config.TargetTimePerBlockInMilliSeconds != nil { + networkFlags.ActiveNetParams.TargetTimePerBlock = time.Duration(*config.TargetTimePerBlockInMilliSeconds) * + time.Millisecond + } + + if config.FinalityDuration != nil { + networkFlags.ActiveNetParams.FinalityDuration = time.Duration(*config.FinalityDuration) * time.Millisecond + } + + if config.TimestampDeviationTolerance != nil { + networkFlags.ActiveNetParams.TimestampDeviationTolerance = *config.TimestampDeviationTolerance + } + + if config.DifficultyAdjustmentWindowSize != nil { + networkFlags.ActiveNetParams.DifficultyAdjustmentWindowSize = *config.DifficultyAdjustmentWindowSize + } + + if config.TimestampDeviationTolerance != nil { + networkFlags.ActiveNetParams.TimestampDeviationTolerance = *config.TimestampDeviationTolerance + } + + if config.RelayNonStdTxs != nil { + networkFlags.ActiveNetParams.RelayNonStdTxs = *config.RelayNonStdTxs + } + + if config.AcceptUnroutable != nil { + networkFlags.ActiveNetParams.AcceptUnroutable = *config.AcceptUnroutable + } + + if config.EnableNonNativeSubnetworks != nil { + networkFlags.ActiveNetParams.EnableNonNativeSubnetworks = *config.EnableNonNativeSubnetworks + } + + if config.SkipProofOfWork != nil { + networkFlags.ActiveNetParams.SkipProofOfWork = *config.SkipProofOfWork + } + + return nil +} diff --git a/infrastructure/config/sample-spectred.conf b/infrastructure/config/sample-spectred.conf new file mode 100644 index 0000000..91ecbb9 --- /dev/null +++ b/infrastructure/config/sample-spectred.conf @@ -0,0 +1,234 @@ +[Application Options] + +; ------------------------------------------------------------------------------ +; Data settings +; ------------------------------------------------------------------------------ + +; The directory to store data such as the block DAG and peer addresses. The +; block DAG takes several GB, so this location must have a lot of free space. +; The default is ~/.spectred/data on POSIX OSes, $LOCALAPPDATA/Spectred/data on Windows, +; ~/Library/Application Support/Spectred/data on Mac OS, and $home/spectred/data on +; Plan9. Environment variables are expanded so they may be used. NOTE: Windows +; environment variables are typically %VARIABLE%, but they must be accessed with +; $VARIABLE here. Also, ~ is expanded to $LOCALAPPDATA on Windows. +; datadir=~/.spectred/data + + +; ------------------------------------------------------------------------------ +; Network settings +; ------------------------------------------------------------------------------ + +; Use testnet. +; testnet=1 + +; Connect via a SOCKS5 proxy. NOTE: Specifying a proxy will disable listening +; for incoming connections unless listen addresses are provided via the 'listen' +; option. +; proxy=127.0.0.1:9050 +; proxyuser= +; proxypass= + +; Use Universal Plug and Play (UPnP) to automatically open the listen port +; and obtain the external IP address from supported devices. NOTE: This option +; will have no effect if external IP addresses are specified. +; upnp=1 + +; Specify the external IP addresses your node is listening on. One address per +; line. spectred will not contact 3rd-party sites to obtain external ip addresses. +; This means if you are behind NAT, your node will not be able to advertise a +; reachable address unless you specify it here or enable the 'upnp' option (and +; have a supported device). +; externalip=1.2.3.4 +; externalip=2002::1234 + +; ****************************************************************************** +; Summary of 'addpeer' versus 'connect'. +; +; Only one of the following two options, 'addpeer' and 'connect', may be +; specified. Both allow you to specify peers that you want to stay connected +; with, but the behavior is slightly different. By default, spectred will query DNS +; to find peers to connect to, so unless you have a specific reason such as +; those described below, you probably won't need to modify anything here. +; +; 'addpeer' does not prevent connections to other peers discovered from +; the peers you are connected to and also lets the remote peers know you are +; available so they can notify other peers they can to connect to you. This +; option might be useful if you are having problems finding a node for some +; reason (perhaps due to a firewall). +; +; 'connect', on the other hand, will ONLY connect to the specified peers and +; no others. It also disables listening (unless you explicitly set listen +; addresses via the 'listen' option) and DNS seeding, so you will not be +; advertised as an available peer to the peers you connect to and won't accept +; connections from any other peers. So, the 'connect' option effectively allows +; you to only connect to "trusted" peers. +; ****************************************************************************** + +; Add persistent peers to connect to as desired. One peer per line. +; You may specify each IP address with or without a port. The default port will +; be added automatically if one is not specified here. +; addpeer=192.168.1.1 +; addpeer=10.0.0.2:18111 +; addpeer=fe80::1 +; addpeer=[fe80::2]:18111 + +; Add persistent peers that you ONLY want to connect to as desired. One peer +; per line. You may specify each IP address with or without a port. The +; default port will be added automatically if one is not specified here. +; NOTE: Specifying this option has other side effects as described above in +; the 'addpeer' versus 'connect' summary section. +; connect=192.168.1.1 +; connect=10.0.0.2:18111 +; connect=fe80::1 +; connect=[fe80::2]:18111 + +; Maximum number of inbound and outbound peers. +; maxinpeers=125 + +; Enable banning of misbehaving peers. +; enablebanning=1 + +; Maximum allowed ban score before disconnecting and banning misbehaving peers. +; banthreshold=100 + +; How long to ban misbehaving peers. Valid time units are {s, m, h}. +; Minimum 1s. +; banduration=24h +; banduration=11h30m15s + +; Add whitelisted IP networks and IPs. Connected peers whose IP matches a +; whitelist will not have their ban score increased. +; whitelist=127.0.0.1 +; whitelist=::1 +; whitelist=192.168.0.0/24 +; whitelist=fd00::/16 + +; Disable DNS seeding for peers. By default, when spectred starts, it will use +; DNS to query for available peers to connect with. +; nodnsseed=1 + +; Specify the interfaces to listen on. One listen address per line. +; NOTE: The default port is modified by some options such as 'testnet', so it is +; recommended to not specify a port and allow a proper default to be chosen +; unless you have a specific reason to do otherwise. +; All interfaces on default port (this is the default): +; listen= +; All ipv4 interfaces on default port: +; listen=0.0.0.0 +; All ipv6 interfaces on default port: +; listen=:: +; All interfaces on port 18111: +; listen=:18111 +; All ipv4 interfaces on port 18111: +; listen=0.0.0.0:18111 +; All ipv6 interfaces on port 18111: +; listen=[::]:18111 +; Only ipv4 localhost on port 8333: +; listen=127.0.0.1:8333 +; Only ipv6 localhost on port 8333: +; listen=[::1]:8333 +; Only ipv4 localhost on non-standard port 8336: +; listen=127.0.0.1:8336 +; All interfaces on non-standard port 8336: +; listen=:8336 +; All ipv4 interfaces on non-standard port 8336: +; listen=0.0.0.0:8336 +; All ipv6 interfaces on non-standard port 8336: +; listen=[::]:8336 + +; Disable listening for incoming connections. This will override all listeners. +; nolisten=1 + +; Disable peer bloom filtering. See BIP0111. +; nopeerbloomfilters=1 + +; Add comments to the user agent that is advertised to peers. +; Must not include characters '/', ':', '(' and ')'. +; uacomment= + +; ------------------------------------------------------------------------------ +; RPC server options - The following options control the built-in RPC server +; which is used to control and query information from a running spectred process. +; ------------------------------------------------------------------------------ + +; Specify the interfaces for the RPC server listen on. One listen address per +; line. NOTE: The default port is modified by some options such as 'testnet', +; so it is recommended to not specify a port and allow a proper default to be +; chosen unless you have a specific reason to do otherwise. By default, the +; RPC server will only listen on localhost for IPv4 and IPv6. +; All interfaces on default port: +; rpclisten= +; All ipv4 interfaces on default port: +; rpclisten=0.0.0.0 +; All ipv6 interfaces on default port: +; rpclisten=:: +; All interfaces on port 18110: +; rpclisten=:18110 +; All ipv4 interfaces on port 18110: +; rpclisten=0.0.0.0:18110 +; All ipv6 interfaces on port 18110: +; rpclisten=[::]:18110 +; Only ipv4 localhost on port 18110: +; rpclisten=127.0.0.1:18110 +; Only ipv6 localhost on port 18110: +; rpclisten=[::1]:18110 +; Only ipv4 localhost on non-standard port 8337: +; rpclisten=127.0.0.1:8337 +; All interfaces on non-standard port 8337: +; rpclisten=:8337 +; All ipv4 interfaces on non-standard port 8337: +; rpclisten=0.0.0.0:8337 +; All ipv6 interfaces on non-standard port 8337: +; rpclisten=[::]:8337 + +; Specify the maximum number of concurrent RPC clients for standard connections. +; rpcmaxclients=10 + +; Use the following setting to disable the RPC server. +; norpc=1 + + +; ------------------------------------------------------------------------------ +; Mempool Settings - The following options +; ------------------------------------------------------------------------------ + +; Set the minimum transaction fee to be considered a non-zero fee, +; minrelaytxfee=0.00001 + +; Limit orphan transaction pool to 100 transactions. +; maxorphantx=100 + +; Do not accept transactions from remote peers. +; blocksonly=1 + +; Relay non-standard transactions regardless of default network settings. +; relaynonstd=1 + +; Reject non-standard transactions regardless of default network settings. +; rejectnonstd=1 + + +; ------------------------------------------------------------------------------ +; Signature Verification Cache +; ------------------------------------------------------------------------------ + +; Limit the signature cache to a max of 50000 entries. +; sigcachemaxsize=50000 + + +; ------------------------------------------------------------------------------ +; Debug +; ------------------------------------------------------------------------------ + +; Debug logging level. +; Valid levels are {trace, debug, info, warn, error, critical} +; You may also specify =,=,... to set +; log level for individual subsystems. Use spectred --loglevel=show to list +; available subsystems. +; loglevel=info + +; The port used to listen for HTTP profile requests. The profile server will +; be disabled if this option is not specified. The profile information can be +; accessed at http://localhost:/debug/pprof once running. +; profile=6061 + diff --git a/infrastructure/db/database/README.md b/infrastructure/db/database/README.md new file mode 100644 index 0000000..45f9408 --- /dev/null +++ b/infrastructure/db/database/README.md @@ -0,0 +1,41 @@ +# Database + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/spectre-project/spectred/database) + +Package database provides a database for spectred. + +## Overview + +This package provides a database layer to store and retrieve data in +a simple and efficient manner. + +The current backend is ffldb, which makes use of leveldb, flat files, +and strict checksums in key areas to ensure data integrity. + +Implementors of additional backends are required to implement the +following interfaces: + +## DataAccessor + +This defines the common interface by which data gets accessed in a +generic spectred database. Both the Database and the Transaction +interfaces (see below) implement it. + +## Database + +This defines the interface of a database that can begin transactions +and close itself. + +## Transaction + +This defines the interface of a generic spectred database transaction. + +Note: Transactions provide data consistency over the state of the +database as it was when the transaction started. There is NO guarantee +that if one puts data into the transaction then it will be available +to get within the same transaction. + +## Cursor + +This iterates over database entries given some bucket. diff --git a/infrastructure/db/database/common_test.go b/infrastructure/db/database/common_test.go new file mode 100644 index 0000000..aef4dbf --- /dev/null +++ b/infrastructure/db/database/common_test.go @@ -0,0 +1,85 @@ +package database_test + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" +) + +type databasePrepareFunc func(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) + +// databasePrepareFuncs is a set of functions, in which each function +// prepares a separate database type for testing. +// See testForAllDatabaseTypes for further details. +var databasePrepareFuncs = []databasePrepareFunc{ + prepareLDBForTest, +} + +func prepareLDBForTest(t *testing.T, testName string) (db database.Database, name string, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) + if err != nil { + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) + } + db, err = ldb.NewLevelDB(path, 8) + if err != nil { + t.Fatalf("%s: Open unexpectedly "+ + "failed: %s", testName, err) + } + teardownFunc = func() { + err = db.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + } + return db, "ldb", teardownFunc +} + +// testForAllDatabaseTypes runs the given testFunc for every database +// type defined in databasePrepareFuncs. This is to make sure that +// all supported database types adhere to the assumptions defined in +// the interfaces in this package. +func testForAllDatabaseTypes(t *testing.T, testName string, + testFunc func(t *testing.T, db database.Database, testName string)) { + + for _, prepareDatabase := range databasePrepareFuncs { + func() { + db, dbType, teardownFunc := prepareDatabase(t, testName) + defer teardownFunc() + + testName := fmt.Sprintf("%s: %s", dbType, testName) + testFunc(t, db, testName) + }() + } +} + +type keyValuePair struct { + key *database.Key + value []byte +} + +func populateDatabaseForTest(t *testing.T, db database.Database, testName string) []keyValuePair { + // Prepare a list of key/value pairs + entries := make([]keyValuePair, 10) + for i := 0; i < 10; i++ { + key := database.MakeBucket(nil).Key([]byte(fmt.Sprintf("key%d", i))) + value := []byte("value") + entries[i] = keyValuePair{key: key, value: value} + } + + // Put the pairs into the database + for _, entry := range entries { + err := db.Put(entry.key, entry.value) + if err != nil { + t.Fatalf("%s: Put unexpectedly "+ + "failed: %s", testName, err) + } + } + + return entries +} diff --git a/infrastructure/db/database/cursor.go b/infrastructure/db/database/cursor.go new file mode 100644 index 0000000..0727f47 --- /dev/null +++ b/infrastructure/db/database/cursor.go @@ -0,0 +1,30 @@ +package database + +// Cursor iterates over database entries given some bucket. +type Cursor interface { + // Next moves the iterator to the next key/value pair. It returns whether the + // iterator is exhausted. Panics if the cursor is closed. + Next() bool + + // First moves the iterator to the first key/value pair. It returns false if + // such a pair does not exist. Panics if the cursor is closed. + First() bool + + // Seek moves the iterator to the first key/value pair whose key is greater + // than or equal to the given key. It returns ErrNotFound if such pair does not + // exist. + Seek(key *Key) error + + // Key returns the key of the current key/value pair, or ErrNotFound if done. + // The caller should not modify the contents of the returned key, and + // its contents may change on the next call to Next. + Key() (*Key, error) + + // Value returns the value of the current key/value pair, or ErrNotFound if done. + // The caller should not modify the contents of the returned slice, and its + // contents may change on the next call to Next. + Value() ([]byte, error) + + // Close releases associated resources. + Close() error +} diff --git a/infrastructure/db/database/cursor_test.go b/infrastructure/db/database/cursor_test.go new file mode 100644 index 0000000..772fbd9 --- /dev/null +++ b/infrastructure/db/database/cursor_test.go @@ -0,0 +1,346 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor { + cursor, err := db.Cursor(database.MakeBucket(nil)) + if err != nil { + t.Fatalf("%s: Cursor unexpectedly "+ + "failed: %s", testName, err) + } + + return cursor +} + +func recoverFromClosedCursorPanic(t *testing.T, testName string) { + panicErr := recover() + if panicErr == nil { + t.Fatalf("%s: cursor unexpectedly "+ + "didn't panic after being closed", testName) + } + expectedPanicErr := "closed cursor" + if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) { + t.Fatalf("%s: cursor panicked "+ + "with wrong message. Want: %v, got: %s", + testName, expectedPanicErr, panicErr) + } +} + +func TestCursorNext(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorNext", testCursorNext) +} + +func testCursorNext(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Make sure that all the entries exist in the cursor, in their + // correct order + for _, entry := range entries { + hasNext := cursor.Next() + if !hasNext { + t.Fatalf("%s: cursor unexpectedly "+ + "done", testName) + } + cursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(cursorKey, entry.key) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, entry.key, cursorKey) + } + cursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(cursorValue, entry.value) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, entry.value, cursorValue) + } + } + + // The cursor should now be exhausted. Make sure Next now + // returns false + hasNext := cursor.Next() + if hasNext { + t.Fatalf("%s: cursor unexpectedly "+ + "not done", testName) + } + + // Rewind the cursor and close it + cursor.First() + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + + // Call Next on the cursor. This time it should panic + // because it's closed. + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.Next() + }() +} + +func TestCursorFirst(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorFirst", testCursorFirst) +} + +func testCursorFirst(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Make sure that First returns true when the cursor is not empty + exists := cursor.First() + if !exists { + t.Fatalf("%s: Cursor unexpectedly "+ + "returned false", testName) + } + + // Make sure that the first key and value are as expected + firstEntryKey := entries[0].key + firstCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(firstCursorKey, firstEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, firstEntryKey, firstCursorKey) + } + firstEntryValue := entries[0].value + firstCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(firstCursorValue, firstEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, firstEntryValue, firstCursorValue) + } + + // Exhaust the cursor + for cursor.Next() { + // Do nothing + } + + // Call first again and make sure it still returns true + exists = cursor.First() + if !exists { + t.Fatalf("%s: First unexpectedly "+ + "returned false", testName) + } + + // Call next and make sure it returns true as well + exists = cursor.Next() + if !exists { + t.Fatalf("%s: Next unexpectedly "+ + "returned false", testName) + } + + // Remove all the entries from the database + for _, entry := range entries { + err := db.Delete(entry.key) + if err != nil { + t.Fatalf("%s: Delete unexpectedly "+ + "failed: %s", testName, err) + } + } + + // Create a new cursor over an empty dataset + cursor = prepareCursorForTest(t, db, testName) + + // Make sure that First returns false when the cursor is empty + exists = cursor.First() + if exists { + t.Fatalf("%s: Cursor unexpectedly "+ + "returned true", testName) + } +} + +func TestCursorSeek(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorSeek", testCursorSeek) +} + +func testCursorSeek(t *testing.T, db database.Database, testName string) { + entries := populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Seek to the fourth entry and make sure it exists + fourthEntry := entries[3] + err := cursor.Seek(fourthEntry.key) + if err != nil { + t.Fatalf("%s: Cursor unexpectedly "+ + "failed: %s", testName, err) + } + + // Make sure that the key and value are as expected + fourthEntryKey := entries[3].key + fourthCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(fourthCursorKey, fourthEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, fourthEntryKey, fourthCursorKey) + } + fourthEntryValue := entries[3].value + fourthCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(fourthCursorValue, fourthEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, fourthEntryValue, fourthCursorValue) + } + + // Call Next and make sure that we are now on the fifth entry + exists := cursor.Next() + if !exists { + t.Fatalf("%s: Next unexpectedly "+ + "returned false", testName) + } + fifthEntryKey := entries[4].key + fifthCursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key unexpectedly "+ + "failed: %s", testName, err) + } + if !reflect.DeepEqual(fifthCursorKey, fifthEntryKey) { + t.Fatalf("%s: Cursor returned "+ + "wrong key. Want: %s, got: %s", testName, fifthEntryKey, fifthCursorKey) + } + fifthEntryValue := entries[4].value + fifthCursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value unexpectedly "+ + "failed: %s", testName, err) + } + if !bytes.Equal(fifthCursorValue, fifthEntryValue) { + t.Fatalf("%s: Cursor returned "+ + "wrong value. Want: %s, got: %s", testName, fifthEntryValue, fifthCursorValue) + } + + // Seek to a value that doesn't exist and make sure that + // the returned error is ErrNotFound + err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Seek unexpectedly "+ + "succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Seek returned "+ + "wrong error: %s", testName, err) + } +} + +func TestCursorCloseErrors(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorCloseErrors", testCursorCloseErrors) +} + +func testCursorCloseErrors(t *testing.T, db database.Database, testName string) { + populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close "+ + "unexpectedly failed: %s", testName, err) + } + + tests := []struct { + name string + function func() error + }{ + { + name: "Seek", + function: func() error { + return cursor.Seek(database.MakeBucket(nil).Key([]byte{})) + }, + }, + { + name: "Key", + function: func() error { + _, err := cursor.Key() + return err + }, + }, + { + name: "Value", + function: func() error { + _, err := cursor.Value() + return err + }, + }, + { + name: "Close", + function: func() error { + return cursor.Close() + }, + }, + } + + for _, test := range tests { + expectedErrContainsString := "closed cursor" + + // Make sure that the test function returns a "closed cursor" error + err = test.function() + if err == nil { + t.Fatalf("%s: %s "+ + "unexpectedly succeeded", testName, test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("%s: %s "+ + "returned wrong error. Want: %s, got: %s", + testName, test.name, expectedErrContainsString, err) + } + } +} + +func TestCursorCloseFirstAndNext(t *testing.T) { + testForAllDatabaseTypes(t, "TestCursorCloseFirstAndNext", testCursorCloseFirstAndNext) +} + +func testCursorCloseFirstAndNext(t *testing.T, db database.Database, testName string) { + populateDatabaseForTest(t, db, testName) + cursor := prepareCursorForTest(t, db, testName) + + // Close the cursor + err := cursor.Close() + if err != nil { + t.Fatalf("%s: Close "+ + "unexpectedly failed: %s", testName, err) + } + + // We expect First to panic + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.First() + }() + + // We expect Next to panic + func() { + defer recoverFromClosedCursorPanic(t, testName) + cursor.Next() + }() +} diff --git a/infrastructure/db/database/dataaccessor.go b/infrastructure/db/database/dataaccessor.go new file mode 100644 index 0000000..7cf8491 --- /dev/null +++ b/infrastructure/db/database/dataaccessor.go @@ -0,0 +1,24 @@ +package database + +// DataAccessor defines the common interface by which data gets +// accessed in a generic spectred database. +type DataAccessor interface { + // Put sets the value for the given key. It overwrites + // any previous value for that key. + Put(key *Key, value []byte) error + + // Get gets the value for the given key. It returns + // ErrNotFound if the given key does not exist. + Get(key *Key) ([]byte, error) + + // Has returns true if the database does contains the + // given key. + Has(key *Key) (bool, error) + + // Delete deletes the value for the given key. Will not + // return an error if the key doesn't exist. + Delete(key *Key) error + + // Cursor begins a new cursor over the given bucket. + Cursor(bucket *Bucket) (Cursor, error) +} diff --git a/infrastructure/db/database/database.go b/infrastructure/db/database/database.go new file mode 100644 index 0000000..93770ba --- /dev/null +++ b/infrastructure/db/database/database.go @@ -0,0 +1,22 @@ +package database + +// Database defines the interface of a database that can begin +// transactions and close itself. +// +// Important: This is not part of the DataAccessor interface +// because the Transaction interface includes it. Were we to +// merge Database with DataAccessor, implementors of the +// Transaction interface would be forced to implement methods +// such as Begin and Close, which is undesirable. +type Database interface { + DataAccessor + + // Begin begins a new database transaction. + Begin() (Transaction, error) + + // Compact compacts the database instance. + Compact() error + + // Close closes the database. + Close() error +} diff --git a/infrastructure/db/database/database_test.go b/infrastructure/db/database/database_test.go new file mode 100644 index 0000000..8e84d80 --- /dev/null +++ b/infrastructure/db/database/database_test.go @@ -0,0 +1,169 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func TestDatabasePut(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabasePut", testDatabasePut) +} + +func testDatabasePut(t *testing.T, db database.Database, testName string) { + // Put value1 into the database + key := database.MakeBucket(nil).Key([]byte("key")) + value1 := []byte("value1") + err := db.Put(key, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value1 + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value1) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value1), string(returnedValue)) + } + + // Put value2 into the database with the same key + value2 := []byte("value2") + err = db.Put(key, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value2 + returnedValue, err = db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value2) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value2), string(returnedValue)) + } +} + +func TestDatabaseGet(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseGet", testDatabaseGet) +} + +func testDatabaseGet(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Get the value back and make sure it's the same one + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value), string(returnedValue)) + } + + // Try getting a non-existent value and make sure + // the returned error is ErrNotFound + _, err = db.Get(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } +} + +func TestDatabaseHas(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseHas", testDatabaseHas) +} + +func testDatabaseHas(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns true for the value we just put + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value does not exist", testName) + } + + // Make sure that Has returns false for a non-existent value + exists, err = db.Has(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} + +func TestDatabaseDelete(t *testing.T) { + testForAllDatabaseTypes(t, "TestDatabaseDelete", testDatabaseDelete) +} + +func testDatabaseDelete(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Delete the value + err = db.Delete(key) + if err != nil { + t.Fatalf("%s: Delete "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns false for the deleted value + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} diff --git a/infrastructure/db/database/doc.go b/infrastructure/db/database/doc.go new file mode 100644 index 0000000..09db0bf --- /dev/null +++ b/infrastructure/db/database/doc.go @@ -0,0 +1,34 @@ +/* +Package database provides a database for spectred. + +# Overview + +This package provides a database layer to store and retrieve data in a simple +and efficient manner. + +The current backend is ffldb, which makes use of leveldb, flat files, and strict +checksums in key areas to ensure data integrity. + +Implementors of additional backends are required to implement the following interfaces: + +# DataAccessor + +This defines the common interface by which data gets accessed in a generic spectred +database. Both the Database and the Transaction interfaces (see below) implement it. + +# Database + +This defines the interface of a database that can begin transactions and close itself. + +# Transaction + +This defines the interface of a generic spectred database transaction. +Note: transactions provide data consistency over the state of the database as it was +when the transaction started. There is NO guarantee that if one puts data into the +transaction then it will be available to get within the same transaction. + +# Cursor + +This iterates over database entries given some bucket. +*/ +package database diff --git a/infrastructure/db/database/errors.go b/infrastructure/db/database/errors.go new file mode 100644 index 0000000..d341ce3 --- /dev/null +++ b/infrastructure/db/database/errors.go @@ -0,0 +1,12 @@ +package database + +import "errors" + +// ErrNotFound denotes that the requested item was not +// found in the database. +var ErrNotFound = errors.New("not found") + +// IsNotFoundError checks whether an error is an ErrNotFound. +func IsNotFoundError(err error) bool { + return errors.Is(err, ErrNotFound) +} diff --git a/infrastructure/db/database/keys.go b/infrastructure/db/database/keys.go new file mode 100644 index 0000000..e7450a2 --- /dev/null +++ b/infrastructure/db/database/keys.go @@ -0,0 +1,80 @@ +package database + +import ( + "encoding/hex" +) + +var bucketSeparator = byte('/') + +// Key is a helper type meant to combine prefix +// and suffix into a single database key. +type Key struct { + bucket *Bucket + suffix []byte +} + +// Bytes returns the full key bytes that are consisted +// from the bucket path concatenated to the suffix. +func (k *Key) Bytes() []byte { + bucketPath := k.bucket.Path() + keyBytes := make([]byte, len(bucketPath)+len(k.suffix)) + copy(keyBytes, bucketPath) + copy(keyBytes[len(bucketPath):], k.suffix) + return keyBytes +} + +func (k *Key) String() string { + return hex.EncodeToString(k.Bytes()) +} + +// Bucket returns the key bucket. +func (k *Key) Bucket() *Bucket { + return k.bucket +} + +// Suffix returns the key suffix. +func (k *Key) Suffix() []byte { + return k.suffix +} + +// newKey returns a new key composed +// of the given bucket and suffix +func newKey(bucket *Bucket, suffix []byte) *Key { + return &Key{bucket: bucket, suffix: suffix} +} + +// Bucket is a helper type meant to combine buckets +// and sub-buckets that can be used to create database +// keys and prefix-based cursors. +type Bucket struct { + path []byte +} + +// MakeBucket creates a new Bucket using the given path +// of buckets. +func MakeBucket(path []byte) *Bucket { + if len(path) > 0 && path[len(path)-1] != bucketSeparator { + path = append(path, bucketSeparator) + } + return &Bucket{path: path} +} + +// Bucket returns the sub-bucket of the current bucket +// defined by bucketBytes. +func (b *Bucket) Bucket(bucketBytes []byte) *Bucket { + newPath := make([]byte, 0, len(b.path)+len(bucketBytes)+1) // +1 for the separator in MakeBucket + newPath = append(newPath, b.path...) + newPath = append(newPath, bucketBytes...) + return MakeBucket(newPath) +} + +// Key returns a key in the current bucket with the +// given suffix. +func (b *Bucket) Key(suffix []byte) *Key { + return newKey(b, suffix) +} + +// Path returns the full path of the current bucket. +func (b *Bucket) Path() []byte { + return b.path +} diff --git a/infrastructure/db/database/keys_test.go b/infrastructure/db/database/keys_test.go new file mode 100644 index 0000000..87f9566 --- /dev/null +++ b/infrastructure/db/database/keys_test.go @@ -0,0 +1,87 @@ +package database + +import ( + "bytes" + "reflect" + "testing" +) + +func makeBucketJoin(path ...[]byte) *Bucket { + return MakeBucket(bytes.Join(path, []byte{bucketSeparator})) +} + +func TestBucketPath(t *testing.T) { + tests := []struct { + bucketByteSlices [][]byte + expectedPath []byte + }{ + { + bucketByteSlices: [][]byte{[]byte("hello")}, + expectedPath: []byte("hello/"), + }, + { + bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")}, + expectedPath: []byte("hello/world/"), + }, + } + + for _, test := range tests { + // Build a result using the MakeBucket function alone + resultKey := makeBucketJoin(test.bucketByteSlices...).Path() + if !reflect.DeepEqual(resultKey, test.expectedPath) { + t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+ + "Want: %s, got: %s", string(test.expectedPath), string(resultKey)) + } + + // Build a result using sub-Bucket calls + bucket := MakeBucket(nil) + for _, bucketBytes := range test.bucketByteSlices { + bucket = bucket.Bucket(bucketBytes) + } + resultKey = bucket.Path() + if !reflect.DeepEqual(resultKey, test.expectedPath) { + t.Errorf("TestBucketPath: got wrong path using sub-Bucket "+ + "calls. Want: %s, got: %s", string(test.expectedPath), string(resultKey)) + } + } +} + +func TestBucketKey(t *testing.T) { + tests := []struct { + bucketByteSlices [][]byte + key []byte + expectedKeyBytes []byte + expectedKey *Key + }{ + { + bucketByteSlices: [][]byte{[]byte("hello")}, + key: []byte("test"), + expectedKeyBytes: []byte("hello/test"), + expectedKey: &Key{ + bucket: MakeBucket([]byte("hello")), + suffix: []byte("test"), + }, + }, + { + bucketByteSlices: [][]byte{[]byte("hello"), []byte("world")}, + key: []byte("test"), + expectedKeyBytes: []byte("hello/world/test"), + expectedKey: &Key{ + bucket: makeBucketJoin([]byte("hello"), []byte("world")), + suffix: []byte("test"), + }, + }, + } + + for _, test := range tests { + resultKey := makeBucketJoin(test.bucketByteSlices...).Key(test.key) + if !reflect.DeepEqual(resultKey, test.expectedKey) { + t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s", + test.expectedKeyBytes, resultKey) + } + if !bytes.Equal(resultKey.Bytes(), test.expectedKeyBytes) { + t.Errorf("TestBucketKey: got wrong key bytes. Want: %s, got: %s", + test.expectedKeyBytes, resultKey.Bytes()) + } + } +} diff --git a/infrastructure/db/database/ldb/cursor.go b/infrastructure/db/database/ldb/cursor.go new file mode 100644 index 0000000..0e65a33 --- /dev/null +++ b/infrastructure/db/database/ldb/cursor.go @@ -0,0 +1,113 @@ +package ldb + +import ( + "bytes" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// LevelDBCursor is a thin wrapper around native leveldb iterators. +type LevelDBCursor struct { + ldbIterator iterator.Iterator + bucket *database.Bucket + + isClosed bool +} + +// Cursor begins a new cursor over the given prefix. +func (db *LevelDB) Cursor(bucket *database.Bucket) (database.Cursor, error) { + ldbIterator := db.ldb.NewIterator(util.BytesPrefix(bucket.Path()), nil) + + return &LevelDBCursor{ + ldbIterator: ldbIterator, + bucket: bucket, + isClosed: false, + }, nil +} + +// Next moves the iterator to the next key/value pair. It returns whether the +// iterator is exhausted. Panics if the cursor is closed. +func (c *LevelDBCursor) Next() bool { + if c.isClosed { + panic("cannot call next on a closed cursor") + } + return c.ldbIterator.Next() +} + +// First moves the iterator to the first key/value pair. It returns false if +// such a pair does not exist. Panics if the cursor is closed. +func (c *LevelDBCursor) First() bool { + if c.isClosed { + panic("cannot call first on a closed cursor") + } + return c.ldbIterator.First() +} + +// Seek moves the iterator to the first key/value pair whose key is greater +// than or equal to the given key. It returns ErrNotFound if such pair does not +// exist. +func (c *LevelDBCursor) Seek(key *database.Key) error { + if c.isClosed { + return errors.New("cannot seek a closed cursor") + } + + found := c.ldbIterator.Seek(key.Bytes()) + if !found { + return errors.Wrapf(database.ErrNotFound, "key %s not found", key) + } + + // Use c.ldbIterator.Key because c.Key removes the prefix from the key + currentKey := c.ldbIterator.Key() + if currentKey == nil || !bytes.Equal(currentKey, key.Bytes()) { + return errors.Wrapf(database.ErrNotFound, "key %s not found", key) + } + + return nil +} + +// Key returns the key of the current key/value pair, or ErrNotFound if done. +// Note that the key is trimmed to not include the prefix the cursor was opened +// with. The caller should not modify the contents of the returned slice, and +// its contents may change on the next call to Next. +func (c *LevelDBCursor) Key() (*database.Key, error) { + if c.isClosed { + return nil, errors.New("cannot get the key of a closed cursor") + } + fullKeyPath := c.ldbIterator.Key() + if fullKeyPath == nil { + return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ + "key of an exhausted cursor") + } + suffix := bytes.TrimPrefix(fullKeyPath, c.bucket.Path()) + return c.bucket.Key(suffix), nil +} + +// Value returns the value of the current key/value pair, or ErrNotFound if done. +// The caller should not modify the contents of the returned slice, and its +// contents may change on the next call to Next. +func (c *LevelDBCursor) Value() ([]byte, error) { + if c.isClosed { + return nil, errors.New("cannot get the value of a closed cursor") + } + value := c.ldbIterator.Value() + if value == nil { + return nil, errors.Wrapf(database.ErrNotFound, "cannot get the "+ + "value of an exhausted cursor") + } + return value, nil +} + +// Close releases associated resources. +func (c *LevelDBCursor) Close() error { + if c.isClosed { + return errors.New("cannot close an already closed cursor") + } + c.isClosed = true + c.ldbIterator.Release() + c.ldbIterator = nil + c.bucket = nil + return nil +} diff --git a/infrastructure/db/database/ldb/cursor_test.go b/infrastructure/db/database/ldb/cursor_test.go new file mode 100644 index 0000000..e592ab0 --- /dev/null +++ b/infrastructure/db/database/ldb/cursor_test.go @@ -0,0 +1,259 @@ +package ldb + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func validateCurrentCursorKeyAndValue(t *testing.T, testName string, cursor database.Cursor, + expectedKey *database.Key, expectedValue []byte) { + + cursorKey, err := cursor.Key() + if err != nil { + t.Fatalf("%s: Key "+ + "unexpectedly failed: %s", testName, err) + } + if !reflect.DeepEqual(cursorKey, expectedKey) { + t.Fatalf("%s: Key "+ + "returned wrong key. Want: %s, got: %s", + testName, string(expectedKey.Bytes()), string(cursorKey.Bytes())) + } + cursorValue, err := cursor.Value() + if err != nil { + t.Fatalf("%s: Value "+ + "unexpectedly failed for key %s: %s", + testName, cursorKey, err) + } + if !bytes.Equal(cursorValue, expectedValue) { + t.Fatalf("%s: Value "+ + "returned wrong value for key %s. Want: %s, got: %s", + testName, cursorKey, string(expectedValue), string(cursorValue)) + } +} + +func recoverFromClosedCursorPanic(t *testing.T, testName string) { + panicErr := recover() + if panicErr == nil { + t.Fatalf("%s: cursor unexpectedly "+ + "didn't panic after being closed", testName) + } + expectedPanicErr := "closed cursor" + if !strings.Contains(fmt.Sprintf("%v", panicErr), expectedPanicErr) { + t.Fatalf("%s: cursor panicked "+ + "with wrong message. Want: %v, got: %s", + testName, expectedPanicErr, panicErr) + } +} + +// TestCursorSanity validates typical cursor usage, including +// opening a cursor over some existing data, seeking back +// and forth over that data, and getting some keys/values out +// of the cursor. +func TestCursorSanity(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorSanity") + defer teardownFunc() + + // Write some data to the database + bucket := database.MakeBucket([]byte("bucket")) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%d", i) + value := fmt.Sprintf("value%d", i) + err := ldb.Put(bucket.Key([]byte(key)), []byte(value)) + if err != nil { + t.Fatalf("TestCursorSanity: Put "+ + "unexpectedly failed: %s", err) + } + } + + // Open a new cursor + cursor, err := ldb.Cursor(bucket) + if err != nil { + t.Fatalf("TestCursorSanity: ldb.Cursor "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := cursor.Close() + if err != nil { + t.Fatalf("TestCursorSanity: Close "+ + "unexpectedly failed: %s", err) + } + }() + + // Seek to first key and make sure its key and value are correct + hasNext := cursor.First() + if !hasNext { + t.Fatalf("TestCursorSanity: First " + + "unexpectedly returned non-existance") + } + expectedKey := bucket.Key([]byte("key0")) + expectedValue := []byte("value0") + validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue) + + // Seek to a non-existant key + err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("TestCursorSanity: Seek " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Seek "+ + "returned wrong error: %s", err) + } + + // Seek to the last key + err = cursor.Seek(bucket.Key([]byte("key9"))) + if err != nil { + t.Fatalf("TestCursorSanity: Seek "+ + "unexpectedly failed: %s", err) + } + expectedKey = bucket.Key([]byte("key9")) + expectedValue = []byte("value9") + validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue) + + // Call Next to get to the end of the cursor. This should + // return false to signify that there are no items after that. + // Key and Value calls should return ErrNotFound. + hasNext = cursor.Next() + if hasNext { + t.Fatalf("TestCursorSanity: Next " + + "after last value is unexpectedly not done") + } + _, err = cursor.Key() + if err == nil { + t.Fatalf("TestCursorSanity: Key " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Key "+ + "returned wrong error: %s", err) + } + _, err = cursor.Value() + if err == nil { + t.Fatalf("TestCursorSanity: Value " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestCursorSanity: Value "+ + "returned wrong error: %s", err) + } +} + +func TestCursorCloseErrors(t *testing.T) { + tests := []struct { + name string + + // function is the LevelDBCursor function that we're + // verifying returns an error after the cursor had + // been closed. + function func(dbTx database.Cursor) error + }{ + { + name: "Seek", + function: func(cursor database.Cursor) error { + return cursor.Seek(database.MakeBucket(nil).Key([]byte{})) + }, + }, + { + name: "Key", + function: func(cursor database.Cursor) error { + _, err := cursor.Key() + return err + }, + }, + { + name: "Value", + function: func(cursor database.Cursor) error { + _, err := cursor.Value() + return err + }, + }, + { + name: "Close", + function: func(cursor database.Cursor) error { + return cursor.Close() + }, + }, + } + + for _, test := range tests { + func() { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseErrors") + defer teardownFunc() + + // Open a new cursor + cursor, err := ldb.Cursor(database.MakeBucket(nil)) + if err != nil { + t.Fatalf("TestCursorCloseErrors: ldb.Cursor "+ + "unexpectedly failed: %s", err) + } + + // Close the cursor + err = cursor.Close() + if err != nil { + t.Fatalf("TestCursorCloseErrors: Close "+ + "unexpectedly failed: %s", err) + } + + expectedErrContainsString := "closed cursor" + + // Make sure that the test function returns a "closed transaction" error + err = test.function(cursor) + if err == nil { + t.Fatalf("TestCursorCloseErrors: %s "+ + "unexpectedly succeeded", test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("TestCursorCloseErrors: %s "+ + "returned wrong error. Want: %s, got: %s", + test.name, expectedErrContainsString, err) + } + }() + } +} + +func TestCursorCloseFirstAndNext(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestCursorCloseFirstAndNext") + defer teardownFunc() + + // Write some data to the database + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%d", i) + value := fmt.Sprintf("value%d", i) + err := ldb.Put(database.MakeBucket([]byte("bucket")).Key([]byte(key)), []byte(value)) + if err != nil { + t.Fatalf("TestCursorCloseFirstAndNext: Put "+ + "unexpectedly failed: %s", err) + } + } + + // Open a new cursor + cursor, err := ldb.Cursor(database.MakeBucket([]byte("bucket"))) + if err != nil { + t.Fatalf("TestCursorCloseFirstAndNext: ldb.Cursor "+ + "unexpectedly failed: %s", err) + } + + // Close the cursor + err = cursor.Close() + if err != nil { + t.Fatalf("TestCursorCloseFirstAndNext: Close "+ + "unexpectedly failed: %s", err) + } + + // We expect First to panic + func() { + defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext") + cursor.First() + }() + + // We expect Next to panic + func() { + defer recoverFromClosedCursorPanic(t, "TestCursorCloseFirstAndNext") + cursor.Next() + }() +} diff --git a/infrastructure/db/database/ldb/leveldb.go b/infrastructure/db/database/ldb/leveldb.go new file mode 100644 index 0000000..c52957c --- /dev/null +++ b/infrastructure/db/database/ldb/leveldb.go @@ -0,0 +1,99 @@ +package ldb + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/syndtr/goleveldb/leveldb" + ldbErrors "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +// LevelDB defines a thin wrapper around leveldb. +type LevelDB struct { + ldb *leveldb.DB +} + +// NewLevelDB opens a leveldb instance defined by the given path. +func NewLevelDB(path string, cacheSizeMiB int) (*LevelDB, error) { + // Open leveldb. If it doesn't exist, create it. + options := Options() + options.BlockCacheCapacity = cacheSizeMiB * opt.MiB + options.WriteBuffer = (cacheSizeMiB * opt.MiB) / 2 + ldb, err := leveldb.OpenFile(path, &options) + + // If the database is corrupted, attempt to recover. + if _, corrupted := err.(*ldbErrors.ErrCorrupted); corrupted { + log.Warnf("LevelDB corruption detected for path %s: %s", + path, err) + var recoverErr error + ldb, recoverErr = leveldb.RecoverFile(path, nil) + if recoverErr != nil { + return nil, errors.Wrapf(err, "failed recovering from "+ + "database corruption: %s", recoverErr) + } + log.Warnf("LevelDB recovered from corruption for path %s", + path) + } + + // If the database cannot be opened for any other + // reason, return the error as-is. + if err != nil { + return nil, errors.WithStack(err) + } + + db := &LevelDB{ + ldb: ldb, + } + return db, nil +} + +// Compact compacts the leveldb instance. +func (db *LevelDB) Compact() error { + err := db.ldb.CompactRange(util.Range{Start: nil, Limit: nil}) + return errors.WithStack(err) +} + +// Close closes the leveldb instance. +func (db *LevelDB) Close() error { + err := db.ldb.Close() + return errors.WithStack(err) +} + +// Put sets the value for the given key. It overwrites +// any previous value for that key. +func (db *LevelDB) Put(key *database.Key, value []byte) error { + err := db.ldb.Put(key.Bytes(), value, nil) + return errors.WithStack(err) +} + +// Get gets the value for the given key. It returns +// ErrNotFound if the given key does not exist. +func (db *LevelDB) Get(key *database.Key) ([]byte, error) { + data, err := db.ldb.Get(key.Bytes(), nil) + if err != nil { + if errors.Is(err, leveldb.ErrNotFound) { + return nil, errors.Wrapf(database.ErrNotFound, + "key %s not found", key) + } + return nil, errors.WithStack(err) + } + return data, nil +} + +// Has returns true if the database does contains the +// given key. +func (db *LevelDB) Has(key *database.Key) (bool, error) { + exists, err := db.ldb.Has(key.Bytes(), nil) + if err != nil { + return false, errors.WithStack(err) + } + return exists, nil +} + +// Delete deletes the value for the given key. Will not +// return an error if the key doesn't exist. +func (db *LevelDB) Delete(key *database.Key) error { + err := db.ldb.Delete(key.Bytes(), nil) + return errors.WithStack(err) +} diff --git a/infrastructure/db/database/ldb/leveldb_test.go b/infrastructure/db/database/ldb/leveldb_test.go new file mode 100644 index 0000000..9633a62 --- /dev/null +++ b/infrastructure/db/database/ldb/leveldb_test.go @@ -0,0 +1,153 @@ +package ldb + +import ( + "io/ioutil" + "reflect" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func prepareDatabaseForTest(t *testing.T, testName string) (ldb *LevelDB, teardownFunc func()) { + // Create a temp db to run tests against + path, err := ioutil.TempDir("", testName) + if err != nil { + t.Fatalf("%s: TempDir unexpectedly "+ + "failed: %s", testName, err) + } + ldb, err = NewLevelDB(path, 8) + if err != nil { + t.Fatalf("%s: NewLevelDB unexpectedly "+ + "failed: %s", testName, err) + } + teardownFunc = func() { + err = ldb.Close() + if err != nil { + t.Fatalf("%s: Close unexpectedly "+ + "failed: %s", testName, err) + } + } + return ldb, teardownFunc +} + +func TestLevelDBSanity(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBSanity") + defer teardownFunc() + + // Put something into the db + key := database.MakeBucket(nil).Key([]byte("key")) + putData := []byte("Hello world!") + err := ldb.Put(key, putData) + if err != nil { + t.Fatalf("TestLevelDBSanity: Put returned "+ + "unexpected error: %s", err) + } + + // Get from the key previously put to + getData, err := ldb.Get(key) + if err != nil { + t.Fatalf("TestLevelDBSanity: Get returned "+ + "unexpected error: %s", err) + } + + // Make sure that the put data and the get data are equal + if !reflect.DeepEqual(getData, putData) { + t.Fatalf("TestLevelDBSanity: get data and "+ + "put data are not equal. Put: %s, got: %s", + string(putData), string(getData)) + } +} + +func TestLevelDBTransactionSanity(t *testing.T) { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestLevelDBTransactionSanity") + defer teardownFunc() + + // Case 1. Write in tx and then read directly from the DB + // Begin a new transaction + tx, err := ldb.Begin() + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Begin "+ + "unexpectedly failed: %s", err) + } + + // Put something into the transaction + key := database.MakeBucket(nil).Key([]byte("key")) + putData := []byte("Hello world!") + err = tx.Put(key, putData) + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Put "+ + "returned unexpected error: %s", err) + } + + // Get from the key previously put to. Since the tx is not + // yet committed, this should return ErrNotFound. + _, err = ldb.Get(key) + if err == nil { + t.Fatalf("TestLevelDBTransactionSanity: Get " + + "unexpectedly succeeded") + } + if !database.IsNotFoundError(err) { + t.Fatalf("TestLevelDBTransactionSanity: Get "+ + "returned wrong error: %s", err) + } + + // Commit the transaction + err = tx.Commit() + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Commit "+ + "returned unexpected error: %s", err) + } + + // Get from the key previously put to. Now that the tx was + // committed, this should succeed. + getData, err := ldb.Get(key) + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Get "+ + "returned unexpected error: %s", err) + } + + // Make sure that the put data and the get data are equal + if !reflect.DeepEqual(getData, putData) { + t.Fatalf("TestLevelDBTransactionSanity: get "+ + "data and put data are not equal. Put: %s, got: %s", + string(putData), string(getData)) + } + + // Case 2. Write directly to the DB and then read from a tx + // Put something into the db + key = database.MakeBucket(nil).Key([]byte("key2")) + putData = []byte("Goodbye world!") + err = ldb.Put(key, putData) + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Put "+ + "returned unexpected error: %s", err) + } + + // Begin a new transaction + tx, err = ldb.Begin() + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Begin "+ + "unexpectedly failed: %s", err) + } + + // Get from the key previously put to + getData, err = tx.Get(key) + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: Get "+ + "returned unexpected error: %s", err) + } + + // Make sure that the put data and the get data are equal + if !reflect.DeepEqual(getData, putData) { + t.Fatalf("TestLevelDBTransactionSanity: get "+ + "data and put data are not equal. Put: %s, got: %s", + string(putData), string(getData)) + } + + // Rollback the transaction + err = tx.Rollback() + if err != nil { + t.Fatalf("TestLevelDBTransactionSanity: rollback "+ + "returned unexpected error: %s", err) + } +} diff --git a/infrastructure/db/database/ldb/log.go b/infrastructure/db/database/ldb/log.go new file mode 100644 index 0000000..b89adf1 --- /dev/null +++ b/infrastructure/db/database/ldb/log.go @@ -0,0 +1,7 @@ +package ldb + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("SXDB") diff --git a/infrastructure/db/database/ldb/options.go b/infrastructure/db/database/ldb/options.go new file mode 100644 index 0000000..d5f830d --- /dev/null +++ b/infrastructure/db/database/ldb/options.go @@ -0,0 +1,13 @@ +package ldb + +import "github.com/syndtr/goleveldb/leveldb/opt" + +// Options is a function that returns a leveldb +// opt.Options struct for opening a database. +func Options() opt.Options { + return opt.Options{ + Compression: opt.NoCompression, + DisableSeeksCompaction: true, + NoSync: true, + } +} diff --git a/infrastructure/db/database/ldb/transaction.go b/infrastructure/db/database/ldb/transaction.go new file mode 100644 index 0000000..acbfd11 --- /dev/null +++ b/infrastructure/db/database/ldb/transaction.go @@ -0,0 +1,115 @@ +package ldb + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/syndtr/goleveldb/leveldb" +) + +// LevelDBTransaction is a thin wrapper around native leveldb +// batches. It supports both get and put. +// +// Note that reads are done from the Database directly, so if another transaction changed the data, +// you will read the new data, and not the one from the time the transaction was opened/ +// +// Note: As it's currently implemented, if one puts data into the transaction +// then it will not be available to get within the same transaction. +type LevelDBTransaction struct { + db *LevelDB + batch *leveldb.Batch + isClosed bool +} + +// Begin begins a new transaction. +func (db *LevelDB) Begin() (database.Transaction, error) { + batch := new(leveldb.Batch) + + transaction := &LevelDBTransaction{ + db: db, + batch: batch, + isClosed: false, + } + return transaction, nil +} + +// Commit commits whatever changes were made to the database +// within this transaction. +func (tx *LevelDBTransaction) Commit() error { + if tx.isClosed { + return errors.New("cannot commit a closed transaction") + } + + tx.isClosed = true + return errors.WithStack(tx.db.ldb.Write(tx.batch, nil)) +} + +// Rollback rolls back whatever changes were made to the +// database within this transaction. +func (tx *LevelDBTransaction) Rollback() error { + if tx.isClosed { + return errors.New("cannot rollback a closed transaction") + } + + tx.isClosed = true + tx.batch.Reset() + return nil +} + +// RollbackUnlessClosed rolls back changes that were made to +// the database within the transaction, unless the transaction +// had already been closed using either Rollback or Commit. +func (tx *LevelDBTransaction) RollbackUnlessClosed() error { + if tx.isClosed { + return nil + } + return tx.Rollback() +} + +// Put sets the value for the given key. It overwrites +// any previous value for that key. +func (tx *LevelDBTransaction) Put(key *database.Key, value []byte) error { + if tx.isClosed { + return errors.New("cannot put into a closed transaction") + } + + tx.batch.Put(key.Bytes(), value) + return nil +} + +// Get gets the value for the given key. It returns +// ErrNotFound if the given key does not exist. +func (tx *LevelDBTransaction) Get(key *database.Key) ([]byte, error) { + if tx.isClosed { + return nil, errors.New("cannot get from a closed transaction") + } + return tx.db.Get(key) +} + +// Has returns true if the database does contains the +// given key. +func (tx *LevelDBTransaction) Has(key *database.Key) (bool, error) { + if tx.isClosed { + return false, errors.New("cannot has from a closed transaction") + } + return tx.db.Has(key) +} + +// Delete deletes the value for the given key. Will not +// return an error if the key doesn't exist. +func (tx *LevelDBTransaction) Delete(key *database.Key) error { + if tx.isClosed { + return errors.New("cannot delete from a closed transaction") + } + + tx.batch.Delete(key.Bytes()) + return nil +} + +// Cursor begins a new cursor over the given bucket. +func (tx *LevelDBTransaction) Cursor(bucket *database.Bucket) (database.Cursor, error) { + if tx.isClosed { + return nil, errors.New("cannot open a cursor from a closed transaction") + } + + return tx.db.Cursor(bucket) +} diff --git a/infrastructure/db/database/ldb/transaction_test.go b/infrastructure/db/database/ldb/transaction_test.go new file mode 100644 index 0000000..c61b291 --- /dev/null +++ b/infrastructure/db/database/ldb/transaction_test.go @@ -0,0 +1,147 @@ +package ldb + +import ( + "strings" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func TestTransactionCloseErrors(t *testing.T) { + tests := []struct { + name string + + // function is the LevelDBTransaction function that + // we're verifying whether it returns an error after + // the transaction had been closed. + function func(dbTx *LevelDBTransaction) error + shouldReturnError bool + }{ + { + name: "Put", + function: func(dbTx *LevelDBTransaction) error { + return dbTx.Put(database.MakeBucket(nil).Key([]byte("key")), []byte("value")) + }, + shouldReturnError: true, + }, + { + name: "Get", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Get(database.MakeBucket(nil).Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Has", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Has(database.MakeBucket(nil).Key([]byte("key"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Delete", + function: func(dbTx *LevelDBTransaction) error { + return dbTx.Delete(database.MakeBucket(nil).Key([]byte("key"))) + }, + shouldReturnError: true, + }, + { + name: "Cursor", + function: func(dbTx *LevelDBTransaction) error { + _, err := dbTx.Cursor(database.MakeBucket([]byte("bucket"))) + return err + }, + shouldReturnError: true, + }, + { + name: "Rollback", + function: (*LevelDBTransaction).Rollback, + shouldReturnError: true, + }, + { + name: "Commit", + function: (*LevelDBTransaction).Commit, + shouldReturnError: true, + }, + { + name: "RollbackUnlessClosed", + function: (*LevelDBTransaction).RollbackUnlessClosed, + shouldReturnError: false, + }, + } + + for _, test := range tests { + func() { + ldb, teardownFunc := prepareDatabaseForTest(t, "TestTransactionCloseErrors") + defer teardownFunc() + + // Begin a new transaction to test Commit + commitTx, err := ldb.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := commitTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Commit the Commit test transaction + err = commitTx.Commit() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Commit "+ + "unexpectedly failed: %s", err) + } + + // Begin a new transaction to test Rollback + rollbackTx, err := ldb.Begin() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Begin "+ + "unexpectedly failed: %s", err) + } + defer func() { + err := rollbackTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: RollbackUnlessClosed "+ + "unexpectedly failed: %s", err) + } + }() + + // Rollback the Rollback test transaction + err = rollbackTx.Rollback() + if err != nil { + t.Fatalf("TestTransactionCloseErrors: Rollback "+ + "unexpectedly failed: %s", err) + } + + expectedErrContainsString := "closed transaction" + + // Make sure that the test function returns a "closed transaction" error + // for both the commitTx and the rollbackTx + for _, closedTx := range []database.Transaction{commitTx, rollbackTx} { + err = test.function(closedTx.(*LevelDBTransaction)) + if test.shouldReturnError { + if err == nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly succeeded", test.name) + } + if !strings.Contains(err.Error(), expectedErrContainsString) { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "returned wrong error. Want: %s, got: %s", + test.name, expectedErrContainsString, err) + } + } else { + if err != nil { + t.Fatalf("TestTransactionCloseErrors: %s "+ + "unexpectedly failed: %s", test.name, err) + } + } + } + }() + } +} diff --git a/infrastructure/db/database/transaction.go b/infrastructure/db/database/transaction.go new file mode 100644 index 0000000..407d5dd --- /dev/null +++ b/infrastructure/db/database/transaction.go @@ -0,0 +1,25 @@ +package database + +// Transaction defines the interface of a generic spectred database +// transaction. +// +// Note: Transactions provide data consistency over the state of +// the database as it was when the transaction started. There is +// NO guarantee that if one puts data into the transaction then +// it will be available to get within the same transaction. +type Transaction interface { + DataAccessor + + // Rollback rolls back whatever changes were made to the + // database within this transaction. + Rollback() error + + // Commit commits whatever changes were made to the database + // within this transaction. + Commit() error + + // RollbackUnlessClosed rolls back changes that were made to + // the database within the transaction, unless the transaction + // had already been closed using either Rollback or Commit. + RollbackUnlessClosed() error +} diff --git a/infrastructure/db/database/transaction_test.go b/infrastructure/db/database/transaction_test.go new file mode 100644 index 0000000..800e5ee --- /dev/null +++ b/infrastructure/db/database/transaction_test.go @@ -0,0 +1,496 @@ +// All tests within this file should call testForAllDatabaseTypes +// over the actual test. This is to make sure that all supported +// database types adhere to the assumptions defined in the +// interfaces in this package. + +package database_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/spectre-project/spectred/infrastructure/db/database" +) + +func TestTransactionPut(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionPut", testTransactionPut) +} + +func testTransactionPut(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put value1 into the transaction + key := database.MakeBucket(nil).Key([]byte("key")) + value1 := []byte("value1") + err = dbTx.Put(key, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Put value2 into the transaction with the same key + value2 := []byte("value2") + err = dbTx.Put(key, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value is value2 + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value2) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value2), string(returnedValue)) + } +} + +func TestTransactionGet(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionGet", testTransactionGet) +} + +func testTransactionGet(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key1 := database.MakeBucket(nil).Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Get the value back and make sure it's the same one + returnedValue, err := dbTx.Get(key1) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value1) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value1), string(returnedValue)) + } + + // Try getting a non-existent value and make sure + // the returned error is ErrNotFound + _, err = dbTx.Get(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } + + // Put a new value into the database outside of the transaction + key2 := database.MakeBucket(nil).Key([]byte("key2")) + value2 := []byte("value2") + err = db.Put(key2, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value exists inside the transaction + newValue2, err := dbTx.Get(key2) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %v", testName, err) + } + if !bytes.Equal(value2, newValue2) { + t.Fatalf("Expected %x and %x to be the same", value2, newValue2) + } + + // Put a new value into the transaction + key3 := database.MakeBucket(nil).Key([]byte("key3")) + value3 := []byte("value3") + err = dbTx.Put(key3, value3) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value doesn't exist outside the transaction + _, err = db.Get(key3) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error: %s", testName, err) + } +} + +func TestTransactionHas(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionHas", testTransactionHas) +} + +func testTransactionHas(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key1 := database.MakeBucket(nil).Key([]byte("key1")) + value1 := []byte("value1") + err := db.Put(key1, value1) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Make sure that Has returns true for the value we just put + exists, err := dbTx.Has(key1) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value does not exist", testName) + } + + // Make sure that Has returns false for a non-existent value + exists, err = dbTx.Has(database.MakeBucket(nil).Key([]byte("doesn't exist"))) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } + + // Put a new value into the database outside of the transaction + key2 := database.MakeBucket(nil).Key([]byte("key2")) + value2 := []byte("value2") + err = db.Put(key2, value2) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the new value exists inside the transaction + exists, err = dbTx.Has(key2) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if !exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value doesn't exists", testName) + } +} + +func TestTransactionDelete(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionDelete", testTransactionDelete) +} + +func testTransactionDelete(t *testing.T, db database.Database, testName string) { + // Put a value into the database + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err := db.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Begin two new transactions + dbTx1, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + dbTx2, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx1.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + err = dbTx2.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Delete the value in the first transaction + err = dbTx1.Delete(key) + if err != nil { + t.Fatalf("%s: Delete "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the first transaction + err = dbTx1.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that Has returns false for the deleted value + exists, err := db.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } + + // Make sure that the second transaction is also affected + exists, err = dbTx2.Has(key) + if err != nil { + t.Fatalf("%s: Has "+ + "unexpectedly failed: %s", testName, err) + } + if exists { + t.Fatalf("%s: Has "+ + "unexpectedly returned that the value exists", testName) + } +} + +func TestTransactionCommit(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionCommit", testTransactionCommit) +} + +func testTransactionCommit(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Commit the transaction + err = dbTx.Commit() + if err != nil { + t.Fatalf("%s: Commit "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value exists and is as expected + returnedValue, err := db.Get(key) + if err != nil { + t.Fatalf("%s: Get "+ + "unexpectedly failed: %s", testName, err) + } + if !bytes.Equal(returnedValue, value) { + t.Fatalf("%s: Get "+ + "returned wrong value. Want: %s, got: %s", + testName, string(value), string(returnedValue)) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } +} + +func TestTransactionRollback(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionRollback", testTransactionRollback) +} + +func testTransactionRollback(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // Rollback the transaction + err = dbTx.Rollback() + if err != nil { + t.Fatalf("%s: Rollback "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value did not get added to the database + _, err = db.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error", testName) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } +} + +func TestTransactionRollbackUnlessClosed(t *testing.T) { + testForAllDatabaseTypes(t, "TestTransactionRollbackUnlessClosed", testTransactionRollbackUnlessClosed) +} + +func testTransactionRollbackUnlessClosed(t *testing.T, db database.Database, testName string) { + // Begin a new transaction + dbTx, err := db.Begin() + if err != nil { + t.Fatalf("%s: Begin "+ + "unexpectedly failed: %s", testName, err) + } + defer func() { + err := dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + }() + + // Put a value into the transaction + key := database.MakeBucket(nil).Key([]byte("key")) + value := []byte("value") + err = dbTx.Put(key, value) + if err != nil { + t.Fatalf("%s: Put "+ + "unexpectedly failed: %s", testName, err) + } + + // RollbackUnlessClosed the transaction + err = dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } + + // Make sure that the returned value did not get added to the database + _, err = db.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + if !database.IsNotFoundError(err) { + t.Fatalf("%s: Get "+ + "returned wrong error", testName) + } + + // Make sure that further operations on the transaction return an error + _, err = dbTx.Get(key) + if err == nil { + t.Fatalf("%s: Get "+ + "unexpectedly succeeded", testName) + } + expectedError := "closed transaction" + if !strings.Contains(err.Error(), expectedError) { + t.Fatalf("%s: Get "+ + "returned wrong error. Want: %s, got: %s", + testName, expectedError, err) + } + + // Make sure that further calls to RollbackUnlessClosed don't return an error + err = dbTx.RollbackUnlessClosed() + if err != nil { + t.Fatalf("%s: RollbackUnlessClosed "+ + "unexpectedly failed: %s", testName, err) + } +} diff --git a/infrastructure/logger/backend.go b/infrastructure/logger/backend.go new file mode 100644 index 0000000..cb5574c --- /dev/null +++ b/infrastructure/logger/backend.go @@ -0,0 +1,191 @@ +package logger + +import ( + "fmt" + "github.com/jrick/logrotate/rotator" + "github.com/pkg/errors" + "io" + "os" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "sync/atomic" +) + +const normalLogSize = 512 + +// defaultFlags specifies changes to the default logger behavior. It is set +// during package init and configured using the LOGFLAGS environment variable. +// New logger backends can override these default flags using WithFlags. +// We're using this instead of `init()` function because variables are initialized before init functions, +// and this variable is used inside other variable intializations, so runs before them +var defaultFlags = getDefaultFlags() + +// Flags to modify Backend's behavior. +const ( + // LogFlagLongFile modifies the logger output to include full path and line number + // of the logging callsite, e.g. /a/b/c/main.go:123. + LogFlagLongFile uint32 = 1 << iota + + // LogFlagShortFile modifies the logger output to include filename and line number + // of the logging callsite, e.g. main.go:123. takes precedence over LogFlagLongFile. + LogFlagShortFile +) + +// Read logger flags from the LOGFLAGS environment variable. Multiple flags can +// be set at once, separated by commas. +func getDefaultFlags() (flags uint32) { + for _, f := range strings.Split(os.Getenv("LOGFLAGS"), ",") { + switch f { + case "longfile": + flags |= LogFlagLongFile + case "shortfile": + flags |= LogFlagShortFile + } + } + return +} + +const logsBuffer = 0 + +// Backend is a logging backend. Subsystems created from the backend write to +// the backend's Writer. Backend provides atomic writes to the Writer from all +// subsystems. +type Backend struct { + flag uint32 + isRunning uint32 + writers []logWriter + writeChan chan logEntry + syncClose sync.Mutex // used to sync that the logger finished writing everything +} + +// NewBackendWithFlags configures a Backend to use the specified flags rather than using +// the package's defaults as determined through the LOGFLAGS environment +// variable. +func NewBackendWithFlags(flags uint32) *Backend { + return &Backend{flag: flags, writeChan: make(chan logEntry, logsBuffer)} +} + +// NewBackend creates a new logger backend. +func NewBackend() *Backend { + return NewBackendWithFlags(defaultFlags) +} + +const ( + defaultThresholdKB = 100 * 1000 // 100 MB logs by default. + defaultMaxRolls = 8 // keep 8 last logs by default. +) + +type logWriter interface { + io.WriteCloser + LogLevel() Level +} + +type logWriterWrap struct { + io.WriteCloser + logLevel Level +} + +func (lw logWriterWrap) LogLevel() Level { + return lw.logLevel +} + +// AddLogFile adds a file which the log will write into on a certain +// log level with the default log rotation settings. It'll create the file if it doesn't exist. +func (b *Backend) AddLogFile(logFile string, logLevel Level) error { + return b.AddLogFileWithCustomRotator(logFile, logLevel, defaultThresholdKB, defaultMaxRolls) +} + +// AddLogWriter adds a type implementing io.WriteCloser which the log will write into on a certain +// log level with the default log rotation settings. It'll create the file if it doesn't exist. +func (b *Backend) AddLogWriter(logWriter io.WriteCloser, logLevel Level) error { + if b.IsRunning() { + return errors.New("The logger is already running") + } + b.writers = append(b.writers, logWriterWrap{ + WriteCloser: logWriter, + logLevel: logLevel, + }) + return nil +} + +// AddLogFileWithCustomRotator adds a file which the log will write into on a certain +// log level, with the specified log rotation settings. +// It'll create the file if it doesn't exist. +func (b *Backend) AddLogFileWithCustomRotator(logFile string, logLevel Level, thresholdKB int64, maxRolls int) error { + if b.IsRunning() { + return errors.New("The logger is already running") + } + logDir, _ := filepath.Split(logFile) + // if the logDir is empty then `logFile` is in the cwd and there's no need to create any directory. + if logDir != "" { + err := os.MkdirAll(logDir, 0700) + if err != nil { + return errors.Errorf("failed to create log directory: %+v", err) + } + } + r, err := rotator.New(logFile, thresholdKB, false, maxRolls) + if err != nil { + return errors.Errorf("failed to create file rotator: %s", err) + } + b.writers = append(b.writers, logWriterWrap{ + WriteCloser: r, + logLevel: logLevel, + }) + return nil +} + +// Run launches the logger backend in a separate go-routine. should only be called once. +func (b *Backend) Run() error { + if !atomic.CompareAndSwapUint32(&b.isRunning, 0, 1) { + return errors.New("The logger is already running") + } + go func() { + defer func() { + if err := recover(); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Fatal error in logger.Backend goroutine: %+v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "Goroutine stacktrace: %s\n", debug.Stack()) + } + }() + b.runBlocking() + }() + return nil +} + +func (b *Backend) runBlocking() { + defer atomic.StoreUint32(&b.isRunning, 0) + b.syncClose.Lock() + defer b.syncClose.Unlock() + + for log := range b.writeChan { + for _, writer := range b.writers { + if log.level >= writer.LogLevel() { + _, _ = writer.Write(log.log) + } + } + } +} + +// IsRunning returns true if backend.Run() has been called and false if it hasn't. +func (b *Backend) IsRunning() bool { + return atomic.LoadUint32(&b.isRunning) != 0 +} + +// Close finalizes all log rotators for this backend +func (b *Backend) Close() { + close(b.writeChan) + // Wait for it to finish writing using the syncClose mutex. + b.syncClose.Lock() + defer b.syncClose.Unlock() + for _, writer := range b.writers { + _ = writer.Close() + } +} + +// Logger returns a new logger for a particular subsystem that writes to the +// Backend b. A tag describes the subsystem and is included in all log +// messages. The logger uses the info verbosity level by default. +func (b *Backend) Logger(subsystemTag string) *Logger { + return &Logger{LevelOff, subsystemTag, b, b.writeChan} +} diff --git a/infrastructure/logger/doc.go b/infrastructure/logger/doc.go new file mode 100644 index 0000000..9c53461 --- /dev/null +++ b/infrastructure/logger/doc.go @@ -0,0 +1,23 @@ +/* +Package logger defines an interface and default implementation for subsystem +logging. + +Log level verbosity may be modified at runtime for each individual subsystem +logger. + +The default implementation in this package must be created by the Backend type. +Backends can write to any io.Writer, including multi-writers created by +io.MultiWriter. Multi-writers allow log output to be written to many writers, +including standard output and log files. + +Optional logging behavior can be specified by using the LOGFLAGS environment +variable and overridden per-Backend by using the WithFlags call option. Multiple +LOGFLAGS options can be specified, separated by commas. The following options +are recognized: + + longfile: Include the full filepath and line number in all log messages + + shortfile: Include the filename and line number in all log messages. + Overrides longfile. +*/ +package logger diff --git a/infrastructure/logger/level.go b/infrastructure/logger/level.go new file mode 100644 index 0000000..35e4d8f --- /dev/null +++ b/infrastructure/logger/level.go @@ -0,0 +1,54 @@ +package logger + +import "strings" + +// Level is the level at which a logger is configured. All messages sent +// to a level which is below the current level are filtered. +type Level uint32 + +// Level constants. +const ( + LevelTrace Level = iota + LevelDebug + LevelInfo + LevelWarn + LevelError + LevelCritical + LevelOff +) + +// levelStrs defines the human-readable names for each logging level. +var levelStrs = [...]string{"TRC", "DBG", "INF", "WRN", "ERR", "CRT", "OFF"} + +// LevelFromString returns a level based on the input string s. If the input +// can't be interpreted as a valid log level, the info level and false is +// returned. +func LevelFromString(s string) (l Level, ok bool) { + switch strings.ToLower(s) { + case "trace", "trc": + return LevelTrace, true + case "debug", "dbg": + return LevelDebug, true + case "info", "inf": + return LevelInfo, true + case "warn", "wrn": + return LevelWarn, true + case "error", "err": + return LevelError, true + case "critical", "crt": + return LevelCritical, true + case "off": + return LevelOff, true + default: + return LevelInfo, false + } +} + +// String returns the tag of the logger used in log messages, or "OFF" if +// the level will not produce any log output. +func (l Level) String() string { + if l >= LevelOff { + return "OFF" + } + return levelStrs[l] +} diff --git a/infrastructure/logger/logger.go b/infrastructure/logger/logger.go new file mode 100644 index 0000000..bd6758a --- /dev/null +++ b/infrastructure/logger/logger.go @@ -0,0 +1,182 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Copyright (c) 2017 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package logger + +import ( + "fmt" + "os" + "sort" + "strings" + "sync" + + "github.com/pkg/errors" +) + +// Loggers per subsystem. A single backend logger is created and all subsytem +// loggers created from it will write to the backend. When adding new +// subsystems, add the subsystem logger variable here and to the +// subsystemLoggers map. +// +// Loggers can not be used before the log rotator has been initialized with a +// log file. This must be performed early during application startup by calling +// InitLog. +var ( + // BackendLog is the logging backend used to create all subsystem loggers. + BackendLog = NewBackend() + + // subsystemLoggers maps each subsystem identifier to its associated logger. + subsystemLoggers = make(map[string]*Logger) + subsystemLoggersMutex sync.Mutex +) + +// RegisterSubSystem Registers a new subsystem logger, should be called in a global variable, +// returns the existing one if the subsystem is already registered +func RegisterSubSystem(subsystem string) *Logger { + subsystemLoggersMutex.Lock() + defer subsystemLoggersMutex.Unlock() + logger, exists := subsystemLoggers[subsystem] + if !exists { + logger = BackendLog.Logger(subsystem) + subsystemLoggers[subsystem] = logger + } + return logger +} + +// InitLogStdout attaches stdout to the backend log and starts the logger. +func InitLogStdout(logLevel Level) { + err := BackendLog.AddLogWriter(os.Stdout, logLevel) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %s", LevelWarn, err) + os.Exit(1) + } + + err = BackendLog.Run() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err) + os.Exit(1) + } +} + +// InitLog attaches log file and error log file to the backend log. +func InitLog(logFile, errLogFile string) { + // 280 MB (MB=1000^2 bytes) + err := BackendLog.AddLogFileWithCustomRotator(logFile, LevelTrace, 1000*280, 64) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", logFile, LevelTrace, err) + os.Exit(1) + } + err = BackendLog.AddLogFile(errLogFile, LevelWarn) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %s", errLogFile, LevelWarn, err) + os.Exit(1) + } + + InitLogStdout(LevelInfo) +} + +// SetLogLevel sets the logging level for provided subsystem. Invalid +// subsystems are ignored. Uninitialized subsystems are dynamically created as +// needed. +func SetLogLevel(subsystemID string, logLevel string) error { + subsystemLoggersMutex.Lock() + defer subsystemLoggersMutex.Unlock() + logger, ok := subsystemLoggers[subsystemID] + if !ok { + return errors.Errorf("'%s' Isn't a valid subsystem", subsystemID) + } + level, ok := LevelFromString(logLevel) + if !ok { + return errors.Errorf("'%s' Isn't a valid log level", logLevel) + } + + logger.SetLevel(level) + return nil +} + +// SetLogLevelsString the same as SetLogLevels but also parses the level from a string +func SetLogLevelsString(logLevel string) error { + level, ok := LevelFromString(logLevel) + if !ok { + return errors.Errorf("'%s' Isn't a valid log level", logLevel) + } + SetLogLevels(level) + return nil +} + +// SetLogLevels sets the log level for all subsystem loggers to the passed +// level. It also dynamically creates the subsystem loggers as needed, so it +// can be used to initialize the logging system. +func SetLogLevels(logLevel Level) { + subsystemLoggersMutex.Lock() + defer subsystemLoggersMutex.Unlock() + // Configure all sub-systems with the new logging level. Dynamically + // create loggers as needed. + for _, logger := range subsystemLoggers { + logger.SetLevel(logLevel) + } +} + +// SupportedSubsystems returns a sorted slice of the supported subsystems for +// logging purposes. +func SupportedSubsystems() []string { + subsystemLoggersMutex.Lock() + defer subsystemLoggersMutex.Unlock() + // Convert the subsystemLoggers map keys to a slice. + subsystems := make([]string, 0, len(subsystemLoggers)) + for subsysID := range subsystemLoggers { + subsystems = append(subsystems, subsysID) + } + + // Sort the subsystems for stable display. + sort.Strings(subsystems) + return subsystems +} + +func getSubsystem(tag string) (logger *Logger, ok bool) { + subsystemLoggersMutex.Lock() + defer subsystemLoggersMutex.Unlock() + logger, ok = subsystemLoggers[tag] + return +} + +// ParseAndSetLogLevels attempts to parse the specified debug level and set +// the levels accordingly. An appropriate error is returned if anything is +// invalid. +func ParseAndSetLogLevels(logLevel string) error { + // When the specified string doesn't have any delimters, treat it as + // the log level for all subsystems. + if !strings.Contains(logLevel, ",") && !strings.Contains(logLevel, "=") { + // Validate and change the logging level for all subsystems. + return SetLogLevelsString(logLevel) + } + + // Split the specified string into subsystem/level pairs while detecting + // issues and update the log levels accordingly. + for _, logLevelPair := range strings.Split(logLevel, ",") { + if !strings.Contains(logLevelPair, "=") { + str := "The specified debug level contains an invalid " + + "subsystem/level pair [%s]" + return errors.Errorf(str, logLevelPair) + } + + // Extract the specified subsystem and log level. + fields := strings.Split(logLevelPair, "=") + subsysID, logLevel := fields[0], fields[1] + + // Validate subsystem. + if _, exists := getSubsystem(subsysID); !exists { + str := "The specified subsystem [%s] is invalid -- " + + "supported subsytems %s" + return errors.Errorf(str, subsysID, strings.Join(SupportedSubsystems(), ", ")) + } + + err := SetLogLevel(subsysID, logLevel) + if err != nil { + return err + } + } + return nil +} diff --git a/infrastructure/logger/logs.go b/infrastructure/logger/logs.go new file mode 100644 index 0000000..809f9ba --- /dev/null +++ b/infrastructure/logger/logs.go @@ -0,0 +1,293 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package logger + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sync/atomic" + + "github.com/spectre-project/spectred/util/mstime" +) + +// Logger is a subsystem logger for a Backend. +type Logger struct { + lvl Level // atomic + tag string + b *Backend + writeChan chan<- logEntry +} + +type logEntry struct { + log []byte + level Level +} + +// Trace formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelTrace. +func (l *Logger) Trace(args ...interface{}) { + l.Write(LevelTrace, args...) +} + +// Tracef formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelTrace. +func (l *Logger) Tracef(format string, args ...interface{}) { + l.Writef(LevelTrace, format, args...) +} + +// Debug formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelDebug. +func (l *Logger) Debug(args ...interface{}) { + l.Write(LevelDebug, args...) +} + +// Debugf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelDebug. +func (l *Logger) Debugf(format string, args ...interface{}) { + l.Writef(LevelDebug, format, args...) +} + +// Info formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelInfo. +func (l *Logger) Info(args ...interface{}) { + l.Write(LevelInfo, args...) +} + +// Infof formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelInfo. +func (l *Logger) Infof(format string, args ...interface{}) { + l.Writef(LevelInfo, format, args...) +} + +// Warn formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelWarn. +func (l *Logger) Warn(args ...interface{}) { + l.Write(LevelWarn, args...) +} + +// Warnf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelWarn. +func (l *Logger) Warnf(format string, args ...interface{}) { + l.Writef(LevelWarn, format, args...) +} + +// Error formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelError. +func (l *Logger) Error(args ...interface{}) { + l.Write(LevelError, args...) +} + +// Errorf formats message according to format specifier, prepends the prefix as +// necessary, and writes to log with LevelError. +func (l *Logger) Errorf(format string, args ...interface{}) { + l.Writef(LevelError, format, args...) +} + +// Critical formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with LevelCritical. +func (l *Logger) Critical(args ...interface{}) { + l.Write(LevelCritical, args...) +} + +// Criticalf formats message according to format specifier, prepends the prefix +// as necessary, and writes to log with LevelCritical. +func (l *Logger) Criticalf(format string, args ...interface{}) { + l.Writef(LevelCritical, format, args...) +} + +// Write formats message using the default formats for its operands, prepends +// the prefix as necessary, and writes to log with the given logLevel. +func (l *Logger) Write(logLevel Level, args ...interface{}) { + lvl := l.Level() + if lvl <= logLevel { + l.print(logLevel, l.tag, args...) + } +} + +// Writef formats message according to format specifier, prepends the prefix +// as necessary, and writes to log with the given logLevel. +func (l *Logger) Writef(logLevel Level, format string, args ...interface{}) { + lvl := l.Level() + if lvl <= logLevel { + l.printf(logLevel, l.tag, format, args...) + } +} + +// Level returns the current logging level +func (l *Logger) Level() Level { + return Level(atomic.LoadUint32((*uint32)(&l.lvl))) +} + +// SetLevel changes the logging level to the passed level. +func (l *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&l.lvl), uint32(level)) +} + +// Backend returns the log backend +func (l *Logger) Backend() *Backend { + return l.b +} + +// printf outputs a log message to the writer associated with the backend after +// creating a prefix for the given level and tag according to the formatHeader +// function and formatting the provided arguments according to the given format +// specifier. +func (l *Logger) printf(lvl Level, tag string, format string, args ...interface{}) { + t := mstime.Now() // get as early as possible + + var file string + var line int + if l.b.flag&(LogFlagShortFile|LogFlagLongFile) != 0 { + file, line = callsite(l.b.flag) + } + + buf := make([]byte, 0, normalLogSize) + + formatHeader(&buf, t, lvl.String(), tag, file, line) + bytesBuf := bytes.NewBuffer(buf) + _, _ = fmt.Fprintf(bytesBuf, format, args...) + bytesBuf.WriteByte('\n') + + if !l.b.IsRunning() { + _, _ = fmt.Fprintf(os.Stderr, bytesBuf.String()) + panic("Writing to the logger when it's not running") + } + l.writeChan <- logEntry{bytesBuf.Bytes(), lvl} +} + +// print outputs a log message to the writer associated with the backend after +// creating a prefix for the given level and tag according to the formatHeader +// function and formatting the provided arguments using the default formatting +// rules. +func (l *Logger) print(lvl Level, tag string, args ...interface{}) { + if atomic.LoadUint32(&l.b.isRunning) == 0 { + panic("printing log without initializing") + } + t := mstime.Now() // get as early as possible + + var file string + var line int + if l.b.flag&(LogFlagShortFile|LogFlagLongFile) != 0 { + file, line = callsite(l.b.flag) + } + + buf := make([]byte, 0, normalLogSize) + formatHeader(&buf, t, lvl.String(), tag, file, line) + bytesBuf := bytes.NewBuffer(buf) + _, _ = fmt.Fprintln(bytesBuf, args...) + + if !l.b.IsRunning() { + panic("Writing to the logger when it's not running") + } + l.writeChan <- logEntry{bytesBuf.Bytes(), lvl} +} + +// From stdlib log package. +// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding. +func itoa(buf *[]byte, i int, wid int) { + // Assemble decimal in reverse order. + var b [20]byte + bp := len(b) - 1 + for i >= 10 || wid > 1 { + wid-- + q := i / 10 + b[bp] = byte('0' + i - q*10) + bp-- + i = q + } + // i < 10 + b[bp] = byte('0' + i) + *buf = append(*buf, b[bp:]...) +} + +// Appends a header in the default format 'YYYY-MM-DD hh:mm:ss.sss [LVL] TAG: '. +// If either of the LogFlagShortFile or LogFlagLongFile flags are specified, the file named +// and line number are included after the tag and before the final colon. +func formatHeader(buf *[]byte, t mstime.Time, lvl, tag string, file string, line int) { + year, month, day := t.Date() + hour, min, sec := t.Clock() + ms := t.Millisecond() + + itoa(buf, year, 4) + *buf = append(*buf, '-') + itoa(buf, int(month), 2) + *buf = append(*buf, '-') + itoa(buf, day, 2) + *buf = append(*buf, ' ') + itoa(buf, hour, 2) + *buf = append(*buf, ':') + itoa(buf, min, 2) + *buf = append(*buf, ':') + itoa(buf, sec, 2) + *buf = append(*buf, '.') + itoa(buf, ms, 3) + *buf = append(*buf, " ["...) + *buf = append(*buf, lvl...) + *buf = append(*buf, "] "...) + *buf = append(*buf, tag...) + if file != "" { + *buf = append(*buf, ' ') + *buf = append(*buf, file...) + *buf = append(*buf, ':') + itoa(buf, line, -1) + } + *buf = append(*buf, ": "...) +} + +// calldepth is the call depth of the callsite function relative to the +// caller of the subsystem logger. It is used to recover the filename and line +// number of the logging call if either the short or long file flags are +// specified. +const calldepth = 4 + +// callsite returns the file name and line number of the callsite to the +// subsystem logger. +func callsite(flag uint32) (string, int) { + _, file, line, ok := runtime.Caller(calldepth) + if !ok { + return "???", 0 + } + if flag&LogFlagShortFile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if os.IsPathSeparator(file[i]) { + short = file[i+1:] + break + } + } + file = short + } + return file, line +} diff --git a/infrastructure/logger/utils.go b/infrastructure/logger/utils.go new file mode 100644 index 0000000..a1fbd63 --- /dev/null +++ b/infrastructure/logger/utils.go @@ -0,0 +1,44 @@ +package logger + +import ( + "fmt" + "runtime" + "time" +) + +// LogAndMeasureExecutionTime logs that `functionName` has +// started. The user is expected to defer `onEnd`, which +// will then log that the function has ended, as well as +// the time duration the function had ran. +func LogAndMeasureExecutionTime(log *Logger, functionName string) (onEnd func()) { + start := time.Now() + log.Tracef("%s start", functionName) + return func() { + log.Tracef("%s end. Took: %s", functionName, time.Since(start)) + } +} + +// LogMemoryStats logs memory stats for `functionName` +func LogMemoryStats(log *Logger, functionName string) { + log.Debug(NewLogClosure(func() string { + stats := runtime.MemStats{} + runtime.ReadMemStats(&stats) + return fmt.Sprintf("%s: used memory: %d bytes, total: %d bytes", functionName, + stats.Alloc, stats.HeapIdle-stats.HeapReleased+stats.HeapInuse) + })) +} + +// LogClosure is a closure that can be printed with %s to be used to +// generate expensive-to-create data for a detailed log level and avoid doing +// the work if the data isn't printed. +type LogClosure func() string + +func (c LogClosure) String() string { + return c() +} + +// NewLogClosure casts a function to a LogClosure. +// See LogClosure for details. +func NewLogClosure(c func() string) LogClosure { + return c +} diff --git a/infrastructure/network/addressmanager/addressmanager.go b/infrastructure/network/addressmanager/addressmanager.go new file mode 100644 index 0000000..4bc879f --- /dev/null +++ b/infrastructure/network/addressmanager/addressmanager.go @@ -0,0 +1,300 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "net" + "sync" + "time" + + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/util/mstime" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +const ( + maxAddresses = 4096 + connectionFailedCountForRemove = 4 +) + +// addressRandomizer is the interface for the randomizer needed for the AddressManager. +type addressRandomizer interface { + RandomAddresses(addresses []*address, count int) []*appmessage.NetAddress +} + +// addressKey represents a pair of IP and port, the IP is always in V6 representation +type addressKey struct { + port uint16 + address ipv6 +} + +type address struct { + netAddress *appmessage.NetAddress + connectionFailedCount uint64 +} + +type ipv6 [net.IPv6len]byte + +func (i ipv6) equal(other ipv6) bool { + return i == other +} + +// ErrAddressNotFound is an error returned from some functions when a +// given address is not found in the address manager +var ErrAddressNotFound = errors.New("address not found") + +// NetAddressKey returns a key of the ip address to use it in maps. +func netAddressKey(netAddress *appmessage.NetAddress) addressKey { + key := addressKey{port: netAddress.Port} + // all IPv4 can be represented as IPv6. + copy(key.address[:], netAddress.IP.To16()) + return key +} + +// AddressManager provides a concurrency safe address manager for caching potential +// peers on the Spectre network. +type AddressManager struct { + store *addressStore + localAddresses *localAddressManager + mutex sync.Mutex + cfg *Config + random addressRandomizer +} + +// New returns a new Spectre address manager. +func New(cfg *Config, database database.Database) (*AddressManager, error) { + addressStore, err := newAddressStore(database) + if err != nil { + return nil, err + } + localAddresses, err := newLocalAddressManager(cfg) + if err != nil { + return nil, err + } + + return &AddressManager{ + store: addressStore, + localAddresses: localAddresses, + random: NewAddressRandomize(connectionFailedCountForRemove), + cfg: cfg, + }, nil +} + +func (am *AddressManager) addAddressNoLock(netAddress *appmessage.NetAddress) error { + if !IsRoutable(netAddress, am.cfg.AcceptUnroutable) { + return nil + } + + key := netAddressKey(netAddress) + // We mark `connectionFailedCount` as 0 only after first success + address := &address{netAddress: netAddress, connectionFailedCount: 1} + err := am.store.add(key, address) + if err != nil { + return err + } + + if am.store.notBannedCount() > maxAddresses { + allAddresses := am.store.getAllNotBanned() + + maxConnectionFailedCount := uint64(0) + toRemove := allAddresses[0] + for _, address := range allAddresses[1:] { + if address.connectionFailedCount > maxConnectionFailedCount { + maxConnectionFailedCount = address.connectionFailedCount + toRemove = address + } + } + + err := am.removeAddressNoLock(toRemove.netAddress) + if err != nil { + return err + } + } + return nil +} + +func (am *AddressManager) removeAddressNoLock(address *appmessage.NetAddress) error { + key := netAddressKey(address) + return am.store.remove(key) +} + +// AddAddress adds address to the address manager +func (am *AddressManager) AddAddress(address *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.addAddressNoLock(address) +} + +// AddAddresses adds addresses to the address manager +func (am *AddressManager) AddAddresses(addresses ...*appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + for _, address := range addresses { + err := am.addAddressNoLock(address) + if err != nil { + return err + } + } + return nil +} + +// RemoveAddress removes addresses from the address manager +func (am *AddressManager) RemoveAddress(address *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.removeAddressNoLock(address) +} + +// MarkConnectionFailure notifies the address manager that the given address +// has failed to connect +func (am *AddressManager) MarkConnectionFailure(address *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + key := netAddressKey(address) + entry, ok := am.store.getNotBanned(key) + if !ok { + return errors.Errorf("address %s is not registered with the address manager", address.TCPAddress()) + } + entry.connectionFailedCount = entry.connectionFailedCount + 1 + + if entry.connectionFailedCount >= connectionFailedCountForRemove { + log.Debugf("Address %s has failed %d connection attempts - removing from address manager", + address, entry.connectionFailedCount) + return am.store.remove(key) + } + return am.store.updateNotBanned(key, entry) +} + +// MarkConnectionSuccess notifies the address manager that the given address +// has successfully connected +func (am *AddressManager) MarkConnectionSuccess(address *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + key := netAddressKey(address) + entry, ok := am.store.getNotBanned(key) + if !ok { + return errors.Errorf("address %s is not registered with the address manager", address.TCPAddress()) + } + entry.connectionFailedCount = 0 + return am.store.updateNotBanned(key, entry) +} + +// Addresses returns all addresses +func (am *AddressManager) Addresses() []*appmessage.NetAddress { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.store.getAllNotBannedNetAddresses() +} + +// BannedAddresses returns all banned addresses +func (am *AddressManager) BannedAddresses() []*appmessage.NetAddress { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.store.getAllBannedNetAddresses() +} + +// notBannedAddressesWithException returns all not banned addresses with excpetion +func (am *AddressManager) notBannedAddressesWithException(exceptions []*appmessage.NetAddress) []*address { + am.mutex.Lock() + defer am.mutex.Unlock() + + return am.store.getAllNotBannedNetAddressesWithout(exceptions) +} + +// RandomAddresses returns count addresses at random that aren't banned and aren't in exceptions +func (am *AddressManager) RandomAddresses(count int, exceptions []*appmessage.NetAddress) []*appmessage.NetAddress { + validAddresses := am.notBannedAddressesWithException(exceptions) + return am.random.RandomAddresses(validAddresses, count) +} + +// BestLocalAddress returns the most appropriate local address to use +// for the given remote address. +func (am *AddressManager) BestLocalAddress(remoteAddress *appmessage.NetAddress) *appmessage.NetAddress { + return am.localAddresses.bestLocalAddress(remoteAddress) +} + +// Ban marks the given address as banned +func (am *AddressManager) Ban(addressToBan *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + keyToBan := netAddressKey(addressToBan) + keysToDelete := make([]addressKey, 0) + for _, address := range am.store.getAllNotBannedNetAddresses() { + key := netAddressKey(address) + if key.address.equal(keyToBan.address) { + keysToDelete = append(keysToDelete, key) + } + } + for _, key := range keysToDelete { + err := am.store.remove(key) + if err != nil { + return err + } + } + + address := &address{netAddress: addressToBan} + return am.store.addBanned(keyToBan, address) +} + +// Unban unmarks the given address as banned +func (am *AddressManager) Unban(address *appmessage.NetAddress) error { + am.mutex.Lock() + defer am.mutex.Unlock() + + key := netAddressKey(address) + if !am.store.isBanned(key) { + return errors.Wrapf(ErrAddressNotFound, "address %s "+ + "is not registered with the address manager as banned", address.TCPAddress()) + } + + return am.store.removeBanned(key) +} + +// IsBanned returns true if the given address is marked as banned +func (am *AddressManager) IsBanned(address *appmessage.NetAddress) (bool, error) { + am.mutex.Lock() + defer am.mutex.Unlock() + + key := netAddressKey(address) + err := am.unbanIfOldEnough(key) + if err != nil { + return false, err + } + if !am.store.isBanned(key) { + if !am.store.isNotBanned(key) { + return false, errors.Wrapf(ErrAddressNotFound, "address %s "+ + "is not registered with the address manager", address.TCPAddress()) + } + return false, nil + } + + return true, nil +} + +func (am *AddressManager) unbanIfOldEnough(key addressKey) error { + address, ok := am.store.getBanned(key) + if !ok { + return nil + } + + const maxBanTime = 24 * time.Hour + if mstime.Since(address.netAddress.Timestamp) > maxBanTime { + err := am.store.removeBanned(key) + if err != nil { + return err + } + } + return nil +} diff --git a/infrastructure/network/addressmanager/addressmanager_test.go b/infrastructure/network/addressmanager/addressmanager_test.go new file mode 100644 index 0000000..787ba13 --- /dev/null +++ b/infrastructure/network/addressmanager/addressmanager_test.go @@ -0,0 +1,374 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "net" + "reflect" + "testing" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" + "github.com/spectre-project/spectred/util/mstime" +) + +func newAddressManagerForTest(t *testing.T, testName string) (addressManager *AddressManager, teardown func()) { + cfg := config.DefaultConfig() + + datadir := t.TempDir() + database, err := ldb.NewLevelDB(datadir, 8) + if err != nil { + t.Fatalf("%s: could not create a database: %s", testName, err) + } + + addressManager, err = New(NewConfig(cfg), database) + if err != nil { + t.Fatalf("%s: error creating address manager: %s", testName, err) + } + + return addressManager, func() { + database.Close() + } +} + +func TestBestLocalAddress(t *testing.T) { + localAddrs := []appmessage.NetAddress{ + {IP: net.ParseIP("192.168.0.100")}, + {IP: net.ParseIP("::1")}, + {IP: net.ParseIP("fe80::1")}, + {IP: net.ParseIP("2001:470::1")}, + } + + var tests = []struct { + remoteAddr appmessage.NetAddress + want0 appmessage.NetAddress + want1 appmessage.NetAddress + want2 appmessage.NetAddress + want3 appmessage.NetAddress + }{ + { + // Remote connection from public IPv4 + appmessage.NetAddress{IP: net.ParseIP("204.124.8.1")}, + appmessage.NetAddress{IP: net.IPv4zero}, + appmessage.NetAddress{IP: net.IPv4zero}, + appmessage.NetAddress{IP: net.ParseIP("204.124.8.100")}, + appmessage.NetAddress{IP: net.ParseIP("fd87:d87e:eb43:25::1")}, + }, + { + // Remote connection from private IPv4 + appmessage.NetAddress{IP: net.ParseIP("172.16.0.254")}, + appmessage.NetAddress{IP: net.IPv4zero}, + appmessage.NetAddress{IP: net.IPv4zero}, + appmessage.NetAddress{IP: net.IPv4zero}, + appmessage.NetAddress{IP: net.IPv4zero}, + }, + { + // Remote connection from public IPv6 + appmessage.NetAddress{IP: net.ParseIP("2602:100:abcd::102")}, + appmessage.NetAddress{IP: net.IPv6zero}, + appmessage.NetAddress{IP: net.ParseIP("2001:470::1")}, + appmessage.NetAddress{IP: net.ParseIP("2001:470::1")}, + appmessage.NetAddress{IP: net.ParseIP("2001:470::1")}, + }, + } + + amgr, teardown := newAddressManagerForTest(t, "TestGetBestLocalAddress") + defer teardown() + + // Test against default when there's no address + for x, test := range tests { + got := amgr.BestLocalAddress(&test.remoteAddr) + if !test.want0.IP.Equal(got.IP) { + t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", + x, test.remoteAddr.IP, test.want1.IP, got.IP) + continue + } + } + + for _, localAddr := range localAddrs { + amgr.localAddresses.addLocalNetAddress(&localAddr, InterfacePrio) + } + + // Test against want1 + for x, test := range tests { + got := amgr.BestLocalAddress(&test.remoteAddr) + if !test.want1.IP.Equal(got.IP) { + t.Errorf("TestGetBestLocalAddress test1 #%d failed for remote address %s: want %s got %s", + x, test.remoteAddr.IP, test.want1.IP, got.IP) + continue + } + } + + // Add a public IP to the list of local addresses. + localAddr := appmessage.NetAddress{IP: net.ParseIP("204.124.8.100")} + amgr.localAddresses.addLocalNetAddress(&localAddr, InterfacePrio) + + // Test against want2 + for x, test := range tests { + got := amgr.BestLocalAddress(&test.remoteAddr) + if !test.want2.IP.Equal(got.IP) { + t.Errorf("TestGetBestLocalAddress test2 #%d failed for remote address %s: want %s got %s", + x, test.remoteAddr.IP, test.want2.IP, got.IP) + continue + } + } +} + +func TestAddressManager(t *testing.T) { + addressManager, teardown := newAddressManagerForTest(t, "TestAddressManager") + defer teardown() + + testAddress1 := &appmessage.NetAddress{IP: net.ParseIP("1.2.3.4"), Timestamp: mstime.Now()} + testAddress2 := &appmessage.NetAddress{IP: net.ParseIP("5.6.8.8"), Timestamp: mstime.Now()} + testAddress3 := &appmessage.NetAddress{IP: net.ParseIP("9.0.1.2"), Timestamp: mstime.Now()} + testAddresses := []*appmessage.NetAddress{testAddress1, testAddress2, testAddress3} + + // Add a few addresses + err := addressManager.AddAddresses(testAddresses...) + if err != nil { + t.Fatalf("AddAddresses() failed: %s", err) + } + + // Make sure that all the addresses are returned + addresses := addressManager.Addresses() + if len(testAddresses) != len(addresses) { + t.Fatalf("Unexpected amount of addresses returned from Addresses. "+ + "Want: %d, got: %d", len(testAddresses), len(addresses)) + } + for _, testAddress := range testAddresses { + found := false + for _, address := range addresses { + if reflect.DeepEqual(testAddress, address) { + found = true + break + } + } + if !found { + t.Fatalf("Address %s not returned from Addresses().", testAddress.IP) + } + } + + // Remove an address + addressToRemove := testAddress2 + err = addressManager.RemoveAddress(addressToRemove) + if err != nil { + t.Fatalf("RemoveAddress() failed: %s", err) + } + + // Make sure that the removed address is not returned + addresses = addressManager.Addresses() + if len(addresses) != len(testAddresses)-1 { + t.Fatalf("Unexpected amount of addresses returned from Addresses(). "+ + "Want: %d, got: %d", len(addresses), len(testAddresses)-1) + } + for _, address := range addresses { + if reflect.DeepEqual(addressToRemove, address) { + t.Fatalf("Removed addresses %s returned from Addresses()", addressToRemove.IP) + } + } + + // Add that address back + err = addressManager.AddAddress(addressToRemove) + if err != nil { + t.Fatalf("AddAddress() failed: %s", err) + } + + // Ban a different address + addressToBan := testAddress3 + err = addressManager.Ban(addressToBan) + if err != nil { + t.Fatalf("Ban() failed: %s", err) + } + + // Make sure that the banned address is not returned + addresses = addressManager.Addresses() + if len(addresses) != len(testAddresses)-1 { + t.Fatalf("Unexpected amount of addresses returned from Addresses(). "+ + "Want: %d, got: %d", len(addresses), len(testAddresses)-1) + } + for _, address := range addresses { + if reflect.DeepEqual(addressToBan, address) { + t.Fatalf("Banned addresses %s returned from Addresses()", addressToBan.IP) + } + } + + // Check that the address is banned + isBanned, err := addressManager.IsBanned(addressToBan) + if err != nil { + t.Fatalf("IsBanned() failed: %s", err) + } + if !isBanned { + t.Fatalf("Adderss %s is unexpectedly not banned", addressToBan.IP) + } + + // Check that BannedAddresses() returns the banned address + bannedAddresses := addressManager.BannedAddresses() + if len(bannedAddresses) != 1 { + t.Fatalf("Unexpected amount of addresses returned from BannedAddresses(). "+ + "Want: %d, got: %d", 1, len(bannedAddresses)) + } + if !reflect.DeepEqual(addressToBan, bannedAddresses[0]) { + t.Fatalf("Banned address %s not returned from BannedAddresses()", addressToBan.IP) + } + + // Unban the address + err = addressManager.Unban(addressToBan) + if err != nil { + t.Fatalf("Unban() failed: %s", err) + } + + // Check that BannedAddresses() not longer returns the banned address + bannedAddresses = addressManager.BannedAddresses() + if len(bannedAddresses) != 0 { + t.Fatalf("Unexpected amount of addresses returned from BannedAddresses(). "+ + "Want: %d, got: %d", 0, len(bannedAddresses)) + } +} + +func TestRestoreAddressManager(t *testing.T) { + cfg := config.DefaultConfig() + + // Create an empty database + datadir := t.TempDir() + database, err := ldb.NewLevelDB(datadir, 8) + if err != nil { + t.Fatalf("Could not create a database: %s", err) + } + defer database.Close() + + // Create an addressManager with the empty database + addressManager, err := New(NewConfig(cfg), database) + if err != nil { + t.Fatalf("Error creating address manager: %s", err) + } + + testAddress1 := &appmessage.NetAddress{IP: net.ParseIP("1.2.3.4"), Timestamp: mstime.Now()} + testAddress2 := &appmessage.NetAddress{IP: net.ParseIP("5.6.8.8"), Timestamp: mstime.Now()} + testAddress3 := &appmessage.NetAddress{IP: net.ParseIP("9.0.1.2"), Timestamp: mstime.Now()} + testAddresses := []*appmessage.NetAddress{testAddress1, testAddress2, testAddress3} + + // Add some addresses + err = addressManager.AddAddresses(testAddresses...) + if err != nil { + t.Fatalf("AddAddresses() failed: %s", err) + } + + // Ban one of the addresses + addressToBan := testAddress1 + err = addressManager.Ban(addressToBan) + if err != nil { + t.Fatalf("Ban() failed: %s", err) + } + + // Close the database + err = database.Close() + if err != nil { + t.Fatalf("Close() failed: %s", err) + } + + // Reopen the database + database, err = ldb.NewLevelDB(datadir, 8) + if err != nil { + t.Fatalf("Could not create a database: %s", err) + } + defer database.Close() + + // Recreate an addressManager with a the previous database + addressManager, err = New(NewConfig(cfg), database) + if err != nil { + t.Fatalf("Error creating address manager: %s", err) + } + + // Make sure that Addresses() returns the correct addresses + addresses := addressManager.Addresses() + if len(addresses) != len(testAddresses)-1 { + t.Fatalf("Unexpected amount of addresses returned from Addresses(). "+ + "Want: %d, got: %d", len(addresses), len(testAddresses)-1) + } + for _, address := range addresses { + if reflect.DeepEqual(addressToBan, address) { + t.Fatalf("Banned addresses %s returned from Addresses()", addressToBan.IP) + } + } + + // Make sure that BannedAddresses() returns the correct addresses + bannedAddresses := addressManager.BannedAddresses() + if len(bannedAddresses) != 1 { + t.Fatalf("Unexpected amount of addresses returned from BannedAddresses(). "+ + "Want: %d, got: %d", 1, len(bannedAddresses)) + } + if !reflect.DeepEqual(addressToBan, bannedAddresses[0]) { + t.Fatalf("Banned address %s not returned from BannedAddresses()", addressToBan.IP) + } +} + +func TestOverfillAddressManager(t *testing.T) { + addressManager, teardown := newAddressManagerForTest(t, "TestAddressManager") + defer teardown() + + generateTestAddresses := func(amount int) []*appmessage.NetAddress { + testAddresses := make([]*appmessage.NetAddress, 0, amount) + for i := byte(0); i < 128; i++ { + for j := byte(0); j < 128; j++ { + testAddress := &appmessage.NetAddress{IP: net.IP{1, 2, i, j}, Timestamp: mstime.Now()} + testAddresses = append(testAddresses, testAddress) + if len(testAddresses) == amount { + break + } + } + if len(testAddresses) == amount { + break + } + } + return testAddresses + } + + // Add a single test address to the address manager + testAddress := &appmessage.NetAddress{IP: net.IP{5, 6, 0, 0}, Timestamp: mstime.Now()} + err := addressManager.AddAddress(testAddress) + if err != nil { + t.Fatalf("AddAddress: %s", err) + } + + // Add `maxAddresses-1` addresses to the address manager + addresses := generateTestAddresses(maxAddresses - 1) + err = addressManager.AddAddresses(addresses...) + if err != nil { + t.Fatalf("AddAddresses: %s", err) + } + + // Make sure that it now contains exactly `maxAddresses` entries + returnedAddresses := addressManager.Addresses() + if len(returnedAddresses) != maxAddresses { + t.Fatalf("Unexpected address amount. Want: %d, got: %d", maxAddresses, len(returnedAddresses)) + } + + // Mark the first test address as a connection failure + err = addressManager.MarkConnectionFailure(testAddress) + if err != nil { + t.Fatalf("MarkConnectionFailure: %s", err) + } + + // Add one more address to the address manager + err = addressManager.AddAddress(&appmessage.NetAddress{IP: net.IP{7, 8, 0, 0}, Timestamp: mstime.Now()}) + if err != nil { + t.Fatalf("AddAddress: %s", err) + } + + // Make sure that it now still contains exactly `maxAddresses` entries + returnedAddresses = addressManager.Addresses() + if len(returnedAddresses) != maxAddresses { + t.Fatalf("Unexpected address amount. Want: %d, got: %d", maxAddresses, len(returnedAddresses)) + } + + // Make sure that the first address is no longer in the + // connection manager + for _, address := range returnedAddresses { + if address.IP.Equal(testAddress.IP) { + t.Fatalf("Unexpectedly found testAddress returned addresses") + } + } +} diff --git a/infrastructure/network/addressmanager/addressrandomize.go b/infrastructure/network/addressmanager/addressrandomize.go new file mode 100644 index 0000000..a5f1e21 --- /dev/null +++ b/infrastructure/network/addressmanager/addressrandomize.go @@ -0,0 +1,63 @@ +package addressmanager + +import ( + "math" + "math/rand" + "time" + + "github.com/spectre-project/spectred/app/appmessage" +) + +// AddressRandomize implement addressRandomizer interface +type AddressRandomize struct { + random *rand.Rand + maxFailedCount uint64 +} + +// NewAddressRandomize returns a new RandomizeAddress. +func NewAddressRandomize(maxFailedCount uint64) *AddressRandomize { + return &AddressRandomize{ + random: rand.New(rand.NewSource(time.Now().UnixNano())), + maxFailedCount: maxFailedCount, + } +} + +// weightedRand is a help function which returns a random index in the +// range [0, len(weights)-1] with probability weighted by `weights` +func weightedRand(weights []float32) int { + sum := float32(0) + for _, weight := range weights { + sum += weight + } + randPoint := rand.Float32() + scanPoint := float32(0) + for i, weight := range weights { + normalizedWeight := weight / sum + scanPoint += normalizedWeight + if randPoint <= scanPoint { + return i + } + } + return len(weights) - 1 +} + +// RandomAddresses returns count addresses at random from input list +func (amc *AddressRandomize) RandomAddresses(addresses []*address, count int) []*appmessage.NetAddress { + if len(addresses) < count { + count = len(addresses) + } + weights := make([]float32, 0, len(addresses)) + for _, addr := range addresses { + weights = append(weights, float32(math.Pow(64, float64(amc.maxFailedCount-addr.connectionFailedCount)))) + } + result := make([]*appmessage.NetAddress, 0, count) + for count > 0 { + i := weightedRand(weights) + result = append(result, addresses[i].netAddress) + // Zero entry i to avoid re-selection + weights[i] = 0 + // Update count + count-- + } + return result +} diff --git a/infrastructure/network/addressmanager/config.go b/infrastructure/network/addressmanager/config.go new file mode 100644 index 0000000..d64eb22 --- /dev/null +++ b/infrastructure/network/addressmanager/config.go @@ -0,0 +1,27 @@ +package addressmanager + +import ( + "net" + + "github.com/spectre-project/spectred/infrastructure/config" +) + +// Config is a descriptor which specifies the AddressManager instance configuration. +type Config struct { + AcceptUnroutable bool + DefaultPort string + ExternalIPs []string + Listeners []string + Lookup func(string) ([]net.IP, error) +} + +// NewConfig returns a new address manager Config. +func NewConfig(cfg *config.Config) *Config { + return &Config{ + AcceptUnroutable: cfg.NetParams().AcceptUnroutable, + DefaultPort: cfg.NetParams().DefaultPort, + ExternalIPs: cfg.ExternalIPs, + Listeners: cfg.Listeners, + Lookup: cfg.Lookup, + } +} diff --git a/infrastructure/network/addressmanager/doc.go b/infrastructure/network/addressmanager/doc.go new file mode 100644 index 0000000..9146e47 --- /dev/null +++ b/infrastructure/network/addressmanager/doc.go @@ -0,0 +1,34 @@ +/* +Package addressmanager implements concurrency safe Spectre address manager. + +# Address Manager Overview + +In order maintain the peer-to-peer Spectre network, there needs to be a source +of addresses to connect to as nodes come and go. The Spectre protocol provides +the getaddr and addr messages to allow peers to communicate known addresses with +each other. However, there needs to a mechanism to store those results and +select peers from them. It is also important to note that remote peers can't +be trusted to send valid peers nor attempt to provide you with only peers they +control with malicious intent. + +With that in mind, this package provides a concurrency safe address manager for +caching and selecting peers in a non-deterministic manner. The general idea is +the caller adds addresses to the address manager and notifies it when addresses +are connected, known good, and attempted. The caller also requests addresses as +it needs them. + +The address manager internally segregates the addresses into groups and +non-deterministically selects groups in a cryptographically random manner. This +reduce the chances multiple addresses from the same nets are selected which +generally helps provide greater peer diversity, and perhaps more importantly, +drastically reduces the chances an attacker is able to coerce your peer into +only connecting to nodes they control. + +The address manager also understands routability and tries hard to only return +routable addresses. In addition, it uses the information provided by the caller +about connected, known good, and attempted addresses to periodically purge +peers which no longer appear to be good peers as well as bias the selection +toward known good peers. The general idea is to make a best effort at only +providing usable addresses. +*/ +package addressmanager diff --git a/infrastructure/network/addressmanager/localaddressmanager.go b/infrastructure/network/addressmanager/localaddressmanager.go new file mode 100644 index 0000000..2ea98f4 --- /dev/null +++ b/infrastructure/network/addressmanager/localaddressmanager.go @@ -0,0 +1,399 @@ +package addressmanager + +import ( + "net" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +// AddressPriority type is used to describe the hierarchy of local address +// discovery methods. +type AddressPriority int + +const ( + // InterfacePrio signifies the address is on a local interface + InterfacePrio AddressPriority = iota + + // BoundPrio signifies the address has been explicitly bounded to. + BoundPrio + + // UpnpPrio signifies the address was obtained from UPnP. + UpnpPrio + + // HTTPPrio signifies the address was obtained from an external HTTP service. + HTTPPrio + + // ManualPrio signifies the address was provided by --externalip. + ManualPrio +) + +type localAddress struct { + netAddress *appmessage.NetAddress + score AddressPriority +} + +type localAddressManager struct { + localAddresses map[addressKey]*localAddress + lookupFunc func(string) ([]net.IP, error) + cfg *Config + mutex sync.Mutex +} + +func newLocalAddressManager(cfg *Config) (*localAddressManager, error) { + localAddressManager := localAddressManager{ + localAddresses: map[addressKey]*localAddress{}, + cfg: cfg, + lookupFunc: cfg.Lookup, + } + + err := localAddressManager.initListeners() + if err != nil { + return nil, err + } + + return &localAddressManager, nil +} + +// addLocalNetAddress adds netAddress to the list of known local addresses to advertise +// with the given priority. +func (lam *localAddressManager) addLocalNetAddress(netAddress *appmessage.NetAddress, priority AddressPriority) error { + if !IsRoutable(netAddress, lam.cfg.AcceptUnroutable) { + return errors.Errorf("address %s is not routable", netAddress.IP) + } + + lam.mutex.Lock() + defer lam.mutex.Unlock() + + addressKey := netAddressKey(netAddress) + address, ok := lam.localAddresses[addressKey] + if !ok || address.score < priority { + if ok { + address.score = priority + 1 + } else { + lam.localAddresses[addressKey] = &localAddress{ + netAddress: netAddress, + score: priority, + } + } + } + return nil +} + +// bestLocalAddress returns the most appropriate local address to use +// for the given remote address. +func (lam *localAddressManager) bestLocalAddress(remoteAddress *appmessage.NetAddress) *appmessage.NetAddress { + lam.mutex.Lock() + defer lam.mutex.Unlock() + + bestReach := 0 + var bestScore AddressPriority + var bestAddress *appmessage.NetAddress + for _, localAddress := range lam.localAddresses { + reach := reachabilityFrom(localAddress.netAddress, remoteAddress, lam.cfg.AcceptUnroutable) + if reach > bestReach || + (reach == bestReach && localAddress.score > bestScore) { + bestReach = reach + bestScore = localAddress.score + bestAddress = localAddress.netAddress + } + } + + if bestAddress == nil { + // Send something unroutable if nothing suitable. + var ip net.IP + if !IsIPv4(remoteAddress) { + ip = net.IPv6zero + } else { + ip = net.IPv4zero + } + bestAddress = appmessage.NewNetAddressIPPort(ip, 0) + } + + return bestAddress +} + +// addLocalAddress adds an address that this node is listening on to the +// address manager so that it may be relayed to peers. +func (lam *localAddressManager) addLocalAddress(addr string) error { + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return err + } + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return err + } + + if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() { + // If bound to unspecified address, advertise all local interfaces + addrs, err := net.InterfaceAddrs() + if err != nil { + return err + } + + for _, addr := range addrs { + ifaceIP, _, err := net.ParseCIDR(addr.String()) + if err != nil { + continue + } + + // If bound to 0.0.0.0, do not add IPv6 interfaces and if bound to + // ::, do not add IPv4 interfaces. + if (ip.To4() == nil) != (ifaceIP.To4() == nil) { + continue + } + + netAddr := appmessage.NewNetAddressIPPort(ifaceIP, uint16(port)) + lam.addLocalNetAddress(netAddr, BoundPrio) + } + } else { + netAddr, err := lam.hostToNetAddress(host, uint16(port)) + if err != nil { + return err + } + + lam.addLocalNetAddress(netAddr, BoundPrio) + } + + return nil +} + +// initListeners initializes the configured net listeners and adds any bound +// addresses to the address manager +func (lam *localAddressManager) initListeners() error { + if len(lam.cfg.ExternalIPs) != 0 { + defaultPort, err := strconv.ParseUint(lam.cfg.DefaultPort, 10, 16) + if err != nil { + log.Errorf("Can not parse default port %s for active DAG: %s", + lam.cfg.DefaultPort, err) + return err + } + + for _, sip := range lam.cfg.ExternalIPs { + eport := uint16(defaultPort) + host, portstr, err := net.SplitHostPort(sip) + if err != nil { + // no port, use default. + host = sip + } else { + port, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + log.Warnf("Can not parse port from %s for "+ + "externalip: %s", sip, err) + continue + } + eport = uint16(port) + } + na, err := lam.hostToNetAddress(host, eport) + if err != nil { + log.Warnf("Not adding %s as externalip: %s", sip, err) + continue + } + + err = lam.addLocalNetAddress(na, ManualPrio) + if err != nil { + log.Warnf("Skipping specified external IP: %s", err) + } + } + } else { + // Listen for TCP connections at the configured addresses + netAddrs, err := parseListeners(lam.cfg.Listeners) + if err != nil { + return err + } + + // Add bound addresses to address manager to be advertised to peers. + for _, addr := range netAddrs { + listener, err := net.Listen(addr.Network(), addr.String()) + if err != nil { + log.Warnf("Can't listen on %s: %s", addr, err) + continue + } + addr := listener.Addr().String() + err = listener.Close() + if err != nil { + return err + } + err = lam.addLocalAddress(addr) + if err != nil { + log.Warnf("Skipping bound address %s: %s", addr, err) + } + } + } + + return nil +} + +// hostToNetAddress returns a netaddress given a host address. If +// the host is not an IP address it will be resolved. +func (lam *localAddressManager) hostToNetAddress(host string, port uint16) (*appmessage.NetAddress, error) { + ip := net.ParseIP(host) + if ip == nil { + ips, err := lam.lookupFunc(host) + if err != nil { + return nil, err + } + if len(ips) == 0 { + return nil, errors.Errorf("no addresses found for %s", host) + } + ip = ips[0] + } + + return appmessage.NewNetAddressIPPort(ip, port), nil +} + +// parseListeners determines whether each listen address is IPv4 and IPv6 and +// returns a slice of appropriate net.Addrs to listen on with TCP. It also +// properly detects addresses which apply to "all interfaces" and adds the +// address as both IPv4 and IPv6. +func parseListeners(addrs []string) ([]net.Addr, error) { + netAddrs := make([]net.Addr, 0, len(addrs)*2) + for _, addr := range addrs { + host, _, err := net.SplitHostPort(addr) + if err != nil { + // Shouldn't happen due to already being normalized. + return nil, err + } + + // Empty host or host of * on plan9 is both IPv4 and IPv6. + if host == "" || (host == "*" && runtime.GOOS == "plan9") { + netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) + netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) + continue + } + + // Strip IPv6 zone id if present since net.ParseIP does not + // handle it. + zoneIndex := strings.LastIndex(host, "%") + if zoneIndex > 0 { + host = host[:zoneIndex] + } + + // Parse the IP. + ip := net.ParseIP(host) + if ip == nil { + hostAddrs, err := net.LookupHost(host) + if err != nil { + return nil, err + } + ip = net.ParseIP(hostAddrs[0]) + if ip == nil { + return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host) + } + } + + // To4 returns nil when the IP is not an IPv4 address, so use + // this determine the address type. + if ip.To4() == nil { + netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) + } else { + netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) + } + } + return netAddrs, nil +} + +// reachabilityFrom returns the relative reachability of the provided local +// address to the provided remote address. +func reachabilityFrom(localAddress, remoteAddress *appmessage.NetAddress, acceptUnroutable bool) int { + const ( + Unreachable = 0 + Default = iota + Teredo + Ipv6Weak + Ipv4 + Ipv6Strong + Private + ) + + IsRoutable := func(na *appmessage.NetAddress) bool { + if acceptUnroutable { + return !IsLocal(na) + } + + return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) || + IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) || + IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) || + IsLocal(na) || (IsRFC4193(na))) + } + + if !IsRoutable(remoteAddress) { + return Unreachable + } + + if IsRFC4380(remoteAddress) { + if !IsRoutable(localAddress) { + return Default + } + + if IsRFC4380(localAddress) { + return Teredo + } + + if IsIPv4(localAddress) { + return Ipv4 + } + + return Ipv6Weak + } + + if IsIPv4(remoteAddress) { + if IsRoutable(localAddress) && IsIPv4(localAddress) { + return Ipv4 + } + return Unreachable + } + + /* ipv6 */ + var tunnelled bool + // Is our v6 is tunnelled? + if IsRFC3964(localAddress) || IsRFC6052(localAddress) || IsRFC6145(localAddress) { + tunnelled = true + } + + if !IsRoutable(localAddress) { + return Default + } + + if IsRFC4380(localAddress) { + return Teredo + } + + if IsIPv4(localAddress) { + return Ipv4 + } + + if tunnelled { + // only prioritise ipv6 if we aren't tunnelling it. + return Ipv6Weak + } + + return Ipv6Strong +} + +// simpleAddr implements the net.Addr interface with two struct fields +type simpleAddr struct { + net, addr string +} + +// String returns the address. +// +// This is part of the net.Addr interface. +func (a simpleAddr) String() string { + return a.addr +} + +// Network returns the network. +// +// This is part of the net.Addr interface. +func (a simpleAddr) Network() string { + return a.net +} + +// Ensure simpleAddr implements the net.Addr interface. +var _ net.Addr = simpleAddr{} diff --git a/infrastructure/network/addressmanager/log.go b/infrastructure/network/addressmanager/log.go new file mode 100644 index 0000000..fd78942 --- /dev/null +++ b/infrastructure/network/addressmanager/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("ADXR") diff --git a/infrastructure/network/addressmanager/network.go b/infrastructure/network/addressmanager/network.go new file mode 100644 index 0000000..8f9f1a0 --- /dev/null +++ b/infrastructure/network/addressmanager/network.go @@ -0,0 +1,265 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "net" + + "github.com/spectre-project/spectred/app/appmessage" +) + +var ( + // rfc1918Nets specifies the IPv4 private address blocks as defined by + // by RFC1918 (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16). + rfc1918Nets = []net.IPNet{ + ipNet("10.0.0.0", 8, 32), + ipNet("172.16.0.0", 12, 32), + ipNet("192.168.0.0", 16, 32), + } + + // rfc2544Net specifies the the IPv4 block as defined by RFC2544 + // (198.18.0.0/15) + rfc2544Net = ipNet("198.18.0.0", 15, 32) + + // rfc3849Net specifies the IPv6 documentation address block as defined + // by RFC3849 (2001:DB8::/32). + rfc3849Net = ipNet("2001:DB8::", 32, 128) + + // rfc3927Net specifies the IPv4 auto configuration address block as + // defined by RFC3927 (169.254.0.0/16). + rfc3927Net = ipNet("169.254.0.0", 16, 32) + + // rfc3964Net specifies the IPv6 to IPv4 encapsulation address block as + // defined by RFC3964 (2002::/16). + rfc3964Net = ipNet("2002::", 16, 128) + + // rfc4193Net specifies the IPv6 unique local address block as defined + // by RFC4193 (FC00::/7). + rfc4193Net = ipNet("FC00::", 7, 128) + + // rfc4380Net specifies the IPv6 teredo tunneling over UDP address block + // as defined by RFC4380 (2001::/32). + rfc4380Net = ipNet("2001::", 32, 128) + + // rfc4843Net specifies the IPv6 ORCHID address block as defined by + // RFC4843 (2001:10::/28). + rfc4843Net = ipNet("2001:10::", 28, 128) + + // rfc4862Net specifies the IPv6 stateless address autoconfiguration + // address block as defined by RFC4862 (FE80::/64). + rfc4862Net = ipNet("FE80::", 64, 128) + + // rfc5737Net specifies the IPv4 documentation address blocks as defined + // by RFC5737 (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24) + rfc5737Net = []net.IPNet{ + ipNet("192.0.2.0", 24, 32), + ipNet("198.51.100.0", 24, 32), + ipNet("203.0.113.0", 24, 32), + } + + // rfc6052Net specifies the IPv6 well-known prefix address block as + // defined by RFC6052 (64:FF9B::/96). + rfc6052Net = ipNet("64:FF9B::", 96, 128) + + // rfc6145Net specifies the IPv6 to IPv4 translated address range as + // defined by RFC6145 (::FFFF:0:0:0/96). + rfc6145Net = ipNet("::FFFF:0:0:0", 96, 128) + + // rfc6598Net specifies the IPv4 block as defined by RFC6598 (100.64.0.0/10) + rfc6598Net = ipNet("100.64.0.0", 10, 32) + + // zero4Net defines the IPv4 address block for address staring with 0 + // (0.0.0.0/8). + zero4Net = ipNet("0.0.0.0", 8, 32) + + // heNet defines the Hurricane Electric IPv6 address block. + heNet = ipNet("2001:470::", 32, 128) +) + +const ( + // GetAddressesMax is the most addresses that we will send in response + // to a getAddress (in practise the most addresses we will return from a + // call to AddressCache()). + GetAddressesMax = 2500 +) + +// ipNet returns a net.IPNet struct given the passed IP address string, number +// of one bits to include at the start of the mask, and the total number of bits +// for the mask. +func ipNet(ip string, ones, bits int) net.IPNet { + return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} +} + +// IsIPv4 returns whether or not the given address is an IPv4 address. +func IsIPv4(na *appmessage.NetAddress) bool { + return na.IP.To4() != nil +} + +// IsLocal returns whether or not the given address is a local address. +func IsLocal(na *appmessage.NetAddress) bool { + return na.IP.IsLoopback() || zero4Net.Contains(na.IP) +} + +// IsRFC1918 returns whether or not the passed address is part of the IPv4 +// private network address space as defined by RFC1918 (10.0.0.0/8, +// 172.16.0.0/12, or 192.168.0.0/16). +func IsRFC1918(na *appmessage.NetAddress) bool { + for _, rfc := range rfc1918Nets { + if rfc.Contains(na.IP) { + return true + } + } + return false +} + +// IsRFC2544 returns whether or not the passed address is part of the IPv4 +// address space as defined by RFC2544 (198.18.0.0/15) +func IsRFC2544(na *appmessage.NetAddress) bool { + return rfc2544Net.Contains(na.IP) +} + +// IsRFC3849 returns whether or not the passed address is part of the IPv6 +// documentation range as defined by RFC3849 (2001:DB8::/32). +func IsRFC3849(na *appmessage.NetAddress) bool { + return rfc3849Net.Contains(na.IP) +} + +// IsRFC3927 returns whether or not the passed address is part of the IPv4 +// autoconfiguration range as defined by RFC3927 (169.254.0.0/16). +func IsRFC3927(na *appmessage.NetAddress) bool { + return rfc3927Net.Contains(na.IP) +} + +// IsRFC3964 returns whether or not the passed address is part of the IPv6 to +// IPv4 encapsulation range as defined by RFC3964 (2002::/16). +func IsRFC3964(na *appmessage.NetAddress) bool { + return rfc3964Net.Contains(na.IP) +} + +// IsRFC4193 returns whether or not the passed address is part of the IPv6 +// unique local range as defined by RFC4193 (FC00::/7). +func IsRFC4193(na *appmessage.NetAddress) bool { + return rfc4193Net.Contains(na.IP) +} + +// IsRFC4380 returns whether or not the passed address is part of the IPv6 +// teredo tunneling over UDP range as defined by RFC4380 (2001::/32). +func IsRFC4380(na *appmessage.NetAddress) bool { + return rfc4380Net.Contains(na.IP) +} + +// IsRFC4843 returns whether or not the passed address is part of the IPv6 +// ORCHID range as defined by RFC4843 (2001:10::/28). +func IsRFC4843(na *appmessage.NetAddress) bool { + return rfc4843Net.Contains(na.IP) +} + +// IsRFC4862 returns whether or not the passed address is part of the IPv6 +// stateless address autoconfiguration range as defined by RFC4862 (FE80::/64). +func IsRFC4862(na *appmessage.NetAddress) bool { + return rfc4862Net.Contains(na.IP) +} + +// IsRFC5737 returns whether or not the passed address is part of the IPv4 +// documentation address space as defined by RFC5737 (192.0.2.0/24, +// 198.51.100.0/24, 203.0.113.0/24) +func IsRFC5737(na *appmessage.NetAddress) bool { + for _, rfc := range rfc5737Net { + if rfc.Contains(na.IP) { + return true + } + } + + return false +} + +// IsRFC6052 returns whether or not the passed address is part of the IPv6 +// well-known prefix range as defined by RFC6052 (64:FF9B::/96). +func IsRFC6052(na *appmessage.NetAddress) bool { + return rfc6052Net.Contains(na.IP) +} + +// IsRFC6145 returns whether or not the passed address is part of the IPv6 to +// IPv4 translated address range as defined by RFC6145 (::FFFF:0:0:0/96). +func IsRFC6145(na *appmessage.NetAddress) bool { + return rfc6145Net.Contains(na.IP) +} + +// IsRFC6598 returns whether or not the passed address is part of the IPv4 +// shared address space specified by RFC6598 (100.64.0.0/10) +func IsRFC6598(na *appmessage.NetAddress) bool { + return rfc6598Net.Contains(na.IP) +} + +// IsValid returns whether or not the passed address is valid. The address is +// considered invalid under the following circumstances: +// IPv4: It is either a zero or all bits set address. +// IPv6: It is either a zero or RFC3849 documentation address. +func IsValid(na *appmessage.NetAddress) bool { + // IsUnspecified returns if address is 0, so only all bits set, and + // RFC3849 need to be explicitly checked. + return na.IP != nil && !(na.IP.IsUnspecified() || + na.IP.Equal(net.IPv4bcast)) +} + +// IsRoutable returns whether or not the passed address is routable over +// the public internet. This is true as long as the address is valid and is not +// in any reserved ranges. +func IsRoutable(na *appmessage.NetAddress, acceptUnroutable bool) bool { + if acceptUnroutable { + return !IsLocal(na) + } + + return IsValid(na) && !(IsRFC1918(na) || IsRFC2544(na) || + IsRFC3927(na) || IsRFC4862(na) || IsRFC3849(na) || + IsRFC4843(na) || IsRFC5737(na) || IsRFC6598(na) || + IsLocal(na) || (IsRFC4193(na))) +} + +// GroupKey returns a string representing the network group an address is part +// of. This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string +// "local" for a local address, and the string "unroutable" for an unroutable +// address. +func (am *AddressManager) GroupKey(na *appmessage.NetAddress) string { + if IsLocal(na) { + return "local" + } + if !IsRoutable(na, am.cfg.AcceptUnroutable) { + return "unroutable" + } + if IsIPv4(na) { + return na.IP.Mask(net.CIDRMask(16, 32)).String() + } + if IsRFC6145(na) || IsRFC6052(na) { + // last four bytes are the ip address + ip := na.IP[12:16] + return ip.Mask(net.CIDRMask(16, 32)).String() + } + + if IsRFC3964(na) { + ip := na.IP[2:6] + return ip.Mask(net.CIDRMask(16, 32)).String() + + } + if IsRFC4380(na) { + // teredo tunnels have the last 4 bytes as the v4 address XOR + // 0xff. + ip := net.IP(make([]byte, 4)) + for i, byte := range na.IP[12:16] { + ip[i] = byte ^ 0xff + } + return ip.Mask(net.CIDRMask(16, 32)).String() + } + + // OK, so now we know ourselves to be a IPv6 address. + // We use /32 for everything, except for Hurricane Electric's + // (he.net) IP range, which we use /36 for. + bits := 32 + if heNet.Contains(na.IP) { + bits = 36 + } + + return na.IP.Mask(net.CIDRMask(bits, 128)).String() +} diff --git a/infrastructure/network/addressmanager/network_test.go b/infrastructure/network/addressmanager/network_test.go new file mode 100644 index 0000000..714b927 --- /dev/null +++ b/infrastructure/network/addressmanager/network_test.go @@ -0,0 +1,206 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package addressmanager + +import ( + "net" + "testing" + + "github.com/spectre-project/spectred/app/appmessage" +) + +// TestIPTypes ensures the various functions which determine the type of an IP +// address based on RFCs work as intended. +func TestIPTypes(t *testing.T) { + amgr, teardown := newAddressManagerForTest(t, "TestAddAddressByIP") + defer teardown() + type ipTest struct { + in appmessage.NetAddress + rfc1918 bool + rfc2544 bool + rfc3849 bool + rfc3927 bool + rfc3964 bool + rfc4193 bool + rfc4380 bool + rfc4843 bool + rfc4862 bool + rfc5737 bool + rfc6052 bool + rfc6145 bool + rfc6598 bool + local bool + valid bool + routable bool + } + + newIPTest := func(ip string, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, + rfc4193, rfc4380, rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, + local, valid, routable bool) ipTest { + nip := net.ParseIP(ip) + na := *appmessage.NewNetAddressIPPort(nip, 18111) + test := ipTest{na, rfc1918, rfc2544, rfc3849, rfc3927, rfc3964, rfc4193, rfc4380, + rfc4843, rfc4862, rfc5737, rfc6052, rfc6145, rfc6598, local, valid, routable} + return test + } + + tests := []ipTest{ + newIPTest("10.255.255.255", true, false, false, false, false, false, + false, false, false, false, false, false, false, false, true, false), + newIPTest("192.168.0.1", true, false, false, false, false, false, + false, false, false, false, false, false, false, false, true, false), + newIPTest("172.31.255.1", true, false, false, false, false, false, + false, false, false, false, false, false, false, false, true, false), + newIPTest("172.32.1.1", false, false, false, false, false, false, false, false, + false, false, false, false, false, false, true, true), + newIPTest("169.254.250.120", false, false, false, true, false, false, + false, false, false, false, false, false, false, false, true, false), + newIPTest("0.0.0.0", false, false, false, false, false, false, false, + false, false, false, false, false, false, true, false, false), + newIPTest("255.255.255.255", false, false, false, false, false, false, + false, false, false, false, false, false, false, false, false, false), + newIPTest("127.0.0.1", false, false, false, false, false, false, + false, false, false, false, false, false, false, true, true, false), + newIPTest("fd00:dead::1", false, false, false, false, false, true, + false, false, false, false, false, false, false, false, true, false), + newIPTest("2001::1", false, false, false, false, false, false, + true, false, false, false, false, false, false, false, true, true), + newIPTest("2001:10:abcd::1:1", false, false, false, false, false, false, + false, true, false, false, false, false, false, false, true, false), + newIPTest("fe80::1", false, false, false, false, false, false, + false, false, true, false, false, false, false, false, true, false), + newIPTest("fe80:1::1", false, false, false, false, false, false, + false, false, false, false, false, false, false, false, true, true), + newIPTest("64:ff9b::1", false, false, false, false, false, false, + false, false, false, false, true, false, false, false, true, true), + newIPTest("::ffff:abcd:ef12:1", false, false, false, false, false, false, + false, false, false, false, false, false, false, false, true, true), + newIPTest("::1", false, false, false, false, false, false, false, false, + false, false, false, false, false, true, true, false), + newIPTest("198.18.0.1", false, true, false, false, false, false, false, + false, false, false, false, false, false, false, true, false), + newIPTest("100.127.255.1", false, false, false, false, false, false, false, + false, false, false, false, false, true, false, true, false), + newIPTest("203.0.113.1", false, false, false, false, false, false, false, + false, false, false, false, false, false, false, true, false), + } + + t.Logf("Running %d tests", len(tests)) + for _, test := range tests { + if rv := IsRFC1918(&test.in); rv != test.rfc1918 { + t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc1918) + } + + if rv := IsRFC3849(&test.in); rv != test.rfc3849 { + t.Errorf("IsRFC3849 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3849) + } + + if rv := IsRFC3927(&test.in); rv != test.rfc3927 { + t.Errorf("IsRFC3927 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3927) + } + + if rv := IsRFC3964(&test.in); rv != test.rfc3964 { + t.Errorf("IsRFC3964 %s\n got: %v want: %v", test.in.IP, rv, test.rfc3964) + } + + if rv := IsRFC4193(&test.in); rv != test.rfc4193 { + t.Errorf("IsRFC4193 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4193) + } + + if rv := IsRFC4380(&test.in); rv != test.rfc4380 { + t.Errorf("IsRFC4380 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4380) + } + + if rv := IsRFC4843(&test.in); rv != test.rfc4843 { + t.Errorf("IsRFC4843 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4843) + } + + if rv := IsRFC4862(&test.in); rv != test.rfc4862 { + t.Errorf("IsRFC4862 %s\n got: %v want: %v", test.in.IP, rv, test.rfc4862) + } + + if rv := IsRFC6052(&test.in); rv != test.rfc6052 { + t.Errorf("isRFC6052 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6052) + } + + if rv := IsRFC6145(&test.in); rv != test.rfc6145 { + t.Errorf("IsRFC1918 %s\n got: %v want: %v", test.in.IP, rv, test.rfc6145) + } + + if rv := IsLocal(&test.in); rv != test.local { + t.Errorf("IsLocal %s\n got: %v want: %v", test.in.IP, rv, test.local) + } + + if rv := IsValid(&test.in); rv != test.valid { + t.Errorf("IsValid %s\n got: %v want: %v", test.in.IP, rv, test.valid) + } + + if rv := IsRoutable(&test.in, amgr.cfg.AcceptUnroutable); rv != test.routable { + t.Errorf("IsRoutable %s\n got: %v want: %v", test.in.IP, rv, test.routable) + } + } +} + +// TestGroupKey tests the GroupKey function to ensure it properly groups various +// IP addresses. +func TestGroupKey(t *testing.T) { + amgr, teardown := newAddressManagerForTest(t, "TestAddAddressByIP") + defer teardown() + + tests := []struct { + name string + ip string + expected string + }{ + // Local addresses. + {name: "ipv4 localhost", ip: "127.0.0.1", expected: "local"}, + {name: "ipv6 localhost", ip: "::1", expected: "local"}, + {name: "ipv4 zero", ip: "0.0.0.0", expected: "local"}, + {name: "ipv4 first octet zero", ip: "0.1.2.3", expected: "local"}, + + // Unroutable addresses. + {name: "ipv4 invalid bcast", ip: "255.255.255.255", expected: "unroutable"}, + {name: "ipv4 rfc1918 10/8", ip: "10.1.2.3", expected: "unroutable"}, + {name: "ipv4 rfc1918 172.16/12", ip: "172.16.1.2", expected: "unroutable"}, + {name: "ipv4 rfc1918 192.168/16", ip: "192.168.1.2", expected: "unroutable"}, + {name: "ipv6 rfc3849 2001:db8::/32", ip: "2001:db8::1234", expected: "unroutable"}, + {name: "ipv4 rfc3927 169.254/16", ip: "169.254.1.2", expected: "unroutable"}, + {name: "ipv6 rfc4193 fc00::/7", ip: "fc00::1234", expected: "unroutable"}, + {name: "ipv6 rfc4843 2001:10::/28", ip: "2001:10::1234", expected: "unroutable"}, + {name: "ipv6 rfc4862 fe80::/64", ip: "fe80::1234", expected: "unroutable"}, + + // IPv4 normal. + {name: "ipv4 normal class a", ip: "12.1.2.3", expected: "12.1.0.0"}, + {name: "ipv4 normal class b", ip: "173.1.2.3", expected: "173.1.0.0"}, + {name: "ipv4 normal class c", ip: "196.1.2.3", expected: "196.1.0.0"}, + + // IPv6/IPv4 translations. + {name: "ipv6 rfc3964 with ipv4 encap", ip: "2002:0c01:0203::", expected: "12.1.0.0"}, + {name: "ipv6 rfc4380 toredo ipv4", ip: "2001:0:1234::f3fe:fdfc", expected: "12.1.0.0"}, + {name: "ipv6 rfc6052 well-known prefix with ipv4", ip: "64:ff9b::0c01:0203", expected: "12.1.0.0"}, + {name: "ipv6 rfc6145 translated ipv4", ip: "::ffff:0:0c01:0203", expected: "12.1.0.0"}, + + // Tor. + {name: "ipv6 tor onioncat", ip: "fd87:d87e:eb43:1234::5678", expected: "unroutable"}, + {name: "ipv6 tor onioncat 2", ip: "fd87:d87e:eb43:1245::6789", expected: "unroutable"}, + {name: "ipv6 tor onioncat 3", ip: "fd87:d87e:eb43:1345::6789", expected: "unroutable"}, + + // IPv6 normal. + {name: "ipv6 normal", ip: "2602:100::1", expected: "2602:100::"}, + {name: "ipv6 normal 2", ip: "2602:0100::1234", expected: "2602:100::"}, + {name: "ipv6 hurricane electric", ip: "2001:470:1f10:a1::2", expected: "2001:470:1000::"}, + {name: "ipv6 hurricane electric 2", ip: "2001:0470:1f10:a1::2", expected: "2001:470:1000::"}, + } + + for i, test := range tests { + nip := net.ParseIP(test.ip) + na := *appmessage.NewNetAddressIPPort(nip, 8333) + if key := amgr.GroupKey(&na); key != test.expected { + t.Errorf("TestGroupKey #%d (%s): unexpected group key "+ + "- got '%s', want '%s'", i, test.name, + key, test.expected) + } + } +} diff --git a/infrastructure/network/addressmanager/store.go b/infrastructure/network/addressmanager/store.go new file mode 100644 index 0000000..f93efd1 --- /dev/null +++ b/infrastructure/network/addressmanager/store.go @@ -0,0 +1,271 @@ +package addressmanager + +import ( + "encoding/binary" + "net" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/util/mstime" +) + +var notBannedAddressBucket = database.MakeBucket([]byte("not-banned-addresses")) +var bannedAddressBucket = database.MakeBucket([]byte("banned-addresses")) + +type addressStore struct { + database database.Database + notBannedAddresses map[addressKey]*address + bannedAddresses map[ipv6]*address +} + +func newAddressStore(database database.Database) (*addressStore, error) { + addressStore := &addressStore{ + database: database, + notBannedAddresses: map[addressKey]*address{}, + bannedAddresses: map[ipv6]*address{}, + } + err := addressStore.restoreNotBannedAddresses() + if err != nil { + return nil, err + } + err = addressStore.restoreBannedAddresses() + if err != nil { + return nil, err + } + + log.Infof("Loaded %d addresses and %d banned addresses", + len(addressStore.notBannedAddresses), len(addressStore.bannedAddresses)) + + return addressStore, nil +} + +func (as *addressStore) restoreNotBannedAddresses() error { + cursor, err := as.database.Cursor(notBannedAddressBucket) + if err != nil { + return err + } + defer cursor.Close() + for ok := cursor.First(); ok; ok = cursor.Next() { + databaseKey, err := cursor.Key() + if err != nil { + return err + } + serializedKey := databaseKey.Suffix() + key := as.deserializeAddressKey(serializedKey) + + serializedNetAddress, err := cursor.Value() + if err != nil { + return err + } + netAddress := as.deserializeAddress(serializedNetAddress) + as.notBannedAddresses[key] = netAddress + } + return nil +} + +func (as *addressStore) restoreBannedAddresses() error { + cursor, err := as.database.Cursor(bannedAddressBucket) + if err != nil { + return err + } + defer cursor.Close() + for ok := cursor.First(); ok; ok = cursor.Next() { + databaseKey, err := cursor.Key() + if err != nil { + return err + } + var ipv6 ipv6 + copy(ipv6[:], databaseKey.Suffix()) + + serializedNetAddress, err := cursor.Value() + if err != nil { + return err + } + netAddress := as.deserializeAddress(serializedNetAddress) + as.bannedAddresses[ipv6] = netAddress + } + return nil +} + +func (as *addressStore) notBannedCount() int { + return len(as.notBannedAddresses) +} + +func (as *addressStore) add(key addressKey, address *address) error { + if _, ok := as.notBannedAddresses[key]; ok { + return nil + } + + as.notBannedAddresses[key] = address + + databaseKey := as.notBannedDatabaseKey(key) + serializedAddress := as.serializeAddress(address) + return as.database.Put(databaseKey, serializedAddress) +} + +// updateNotBanned updates the not-banned address collection +func (as *addressStore) updateNotBanned(key addressKey, address *address) error { + if _, ok := as.notBannedAddresses[key]; !ok { + return errors.Errorf("address %s is not in the store", address.netAddress.TCPAddress()) + } + + as.notBannedAddresses[key] = address + + databaseKey := as.notBannedDatabaseKey(key) + serializedAddress := as.serializeAddress(address) + return as.database.Put(databaseKey, serializedAddress) +} + +func (as *addressStore) getNotBanned(key addressKey) (*address, bool) { + address, ok := as.notBannedAddresses[key] + return address, ok +} + +func (as *addressStore) remove(key addressKey) error { + delete(as.notBannedAddresses, key) + + databaseKey := as.notBannedDatabaseKey(key) + return as.database.Delete(databaseKey) +} + +func (as *addressStore) getAllNotBanned() []*address { + addresses := make([]*address, 0, len(as.notBannedAddresses)) + for _, address := range as.notBannedAddresses { + addresses = append(addresses, address) + } + return addresses +} + +func (as *addressStore) getAllNotBannedNetAddresses() []*appmessage.NetAddress { + addresses := make([]*appmessage.NetAddress, 0, len(as.notBannedAddresses)) + for _, address := range as.notBannedAddresses { + addresses = append(addresses, address.netAddress) + } + return addresses +} + +func (as *addressStore) getAllNotBannedNetAddressesWithout(ignoredAddresses []*appmessage.NetAddress) []*address { + ignoredKeys := netAddressesKeys(ignoredAddresses) + + addresses := make([]*address, 0, len(as.notBannedAddresses)) + for key, address := range as.notBannedAddresses { + if !ignoredKeys[key] { + addresses = append(addresses, address) + } + } + return addresses +} + +func (as *addressStore) isNotBanned(key addressKey) bool { + _, ok := as.notBannedAddresses[key] + return ok +} + +func (as *addressStore) addBanned(key addressKey, address *address) error { + if _, ok := as.bannedAddresses[key.address]; ok { + return nil + } + + as.bannedAddresses[key.address] = address + + databaseKey := as.bannedDatabaseKey(key) + serializedAddress := as.serializeAddress(address) + return as.database.Put(databaseKey, serializedAddress) +} + +func (as *addressStore) removeBanned(key addressKey) error { + delete(as.bannedAddresses, key.address) + + databaseKey := as.bannedDatabaseKey(key) + return as.database.Delete(databaseKey) +} + +func (as *addressStore) getAllBannedNetAddresses() []*appmessage.NetAddress { + bannedAddresses := make([]*appmessage.NetAddress, 0, len(as.bannedAddresses)) + for _, bannedAddress := range as.bannedAddresses { + bannedAddresses = append(bannedAddresses, bannedAddress.netAddress) + } + return bannedAddresses +} + +func (as *addressStore) isBanned(key addressKey) bool { + _, ok := as.bannedAddresses[key.address] + return ok +} + +func (as *addressStore) getBanned(key addressKey) (*address, bool) { + bannedAddress, ok := as.bannedAddresses[key.address] + return bannedAddress, ok +} + +// netAddressKeys returns a key of the ip address to use it in maps. +func netAddressesKeys(netAddresses []*appmessage.NetAddress) map[addressKey]bool { + result := make(map[addressKey]bool, len(netAddresses)) + for _, netAddress := range netAddresses { + key := netAddressKey(netAddress) + result[key] = true + } + return result +} + +func (as *addressStore) notBannedDatabaseKey(key addressKey) *database.Key { + serializedKey := as.serializeAddressKey(key) + return notBannedAddressBucket.Key(serializedKey) +} + +func (as *addressStore) bannedDatabaseKey(key addressKey) *database.Key { + return bannedAddressBucket.Key(key.address[:]) +} + +func (as *addressStore) serializeAddressKey(key addressKey) []byte { + serializedSize := 16 + 2 // ipv6 + port + serializedKey := make([]byte, serializedSize) + + copy(serializedKey[:], key.address[:]) + binary.LittleEndian.PutUint16(serializedKey[16:], key.port) + + return serializedKey +} + +func (as *addressStore) deserializeAddressKey(serializedKey []byte) addressKey { + var ip ipv6 + copy(ip[:], serializedKey[:]) + + port := binary.LittleEndian.Uint16(serializedKey[16:]) + + return addressKey{ + port: port, + address: ip, + } +} + +func (as *addressStore) serializeAddress(address *address) []byte { + serializedSize := 16 + 2 + 8 + 8 // ipv6 + port + timestamp + connectionFailedCount + serializedNetAddress := make([]byte, serializedSize) + + copy(serializedNetAddress[:], address.netAddress.IP.To16()[:]) + binary.LittleEndian.PutUint16(serializedNetAddress[16:], address.netAddress.Port) + binary.LittleEndian.PutUint64(serializedNetAddress[18:], uint64(address.netAddress.Timestamp.UnixMilliseconds())) + binary.LittleEndian.PutUint64(serializedNetAddress[26:], uint64(address.connectionFailedCount)) + + return serializedNetAddress +} + +func (as *addressStore) deserializeAddress(serializedAddress []byte) *address { + ip := make(net.IP, 16) + copy(ip[:], serializedAddress[:]) + + port := binary.LittleEndian.Uint16(serializedAddress[16:]) + timestamp := mstime.UnixMilliseconds(int64(binary.LittleEndian.Uint64(serializedAddress[18:]))) + connectionFailedCount := binary.LittleEndian.Uint64(serializedAddress[26:]) + + return &address{ + netAddress: &appmessage.NetAddress{ + IP: ip, + Port: port, + Timestamp: timestamp, + }, + connectionFailedCount: connectionFailedCount, + } +} diff --git a/infrastructure/network/addressmanager/store_test.go b/infrastructure/network/addressmanager/store_test.go new file mode 100644 index 0000000..0af860f --- /dev/null +++ b/infrastructure/network/addressmanager/store_test.go @@ -0,0 +1,48 @@ +package addressmanager + +import ( + "net" + "reflect" + "testing" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/util/mstime" +) + +func TestAddressKeySerialization(t *testing.T) { + addressManager, teardown := newAddressManagerForTest(t, "TestAddressKeySerialization") + defer teardown() + addressStore := addressManager.store + + testAddress := &appmessage.NetAddress{IP: net.ParseIP("2602:100:abcd::102"), Port: 12345} + testAddressKey := netAddressKey(testAddress) + + serializedTestAddressKey := addressStore.serializeAddressKey(testAddressKey) + deserializedTestAddressKey := addressStore.deserializeAddressKey(serializedTestAddressKey) + if !reflect.DeepEqual(testAddressKey, deserializedTestAddressKey) { + t.Fatalf("testAddressKey and deserializedTestAddressKey are not equal\n"+ + "testAddressKey:%+v\ndeserializedTestAddressKey:%+v", testAddressKey, deserializedTestAddressKey) + } +} + +func TestAddressSerialization(t *testing.T) { + addressManager, teardown := newAddressManagerForTest(t, "TestAddressSerialization") + defer teardown() + addressStore := addressManager.store + + testAddress := &address{ + netAddress: &appmessage.NetAddress{ + IP: net.ParseIP("2602:100:abcd::102"), + Port: 12345, + Timestamp: mstime.Now(), + }, + connectionFailedCount: 98465, + } + + serializedTestAddress := addressStore.serializeAddress(testAddress) + deserializedTestAddress := addressStore.deserializeAddress(serializedTestAddress) + if !reflect.DeepEqual(testAddress, deserializedTestAddress) { + t.Fatalf("testAddress and deserializedTestAddress are not equal\n"+ + "testAddress:%+v\ndeserializedTestAddress:%+v", testAddress, deserializedTestAddress) + } +} diff --git a/infrastructure/network/addressmanager/test_utils.go b/infrastructure/network/addressmanager/test_utils.go new file mode 100644 index 0000000..f9fa0e2 --- /dev/null +++ b/infrastructure/network/addressmanager/test_utils.go @@ -0,0 +1,31 @@ +package addressmanager + +import ( + "net" + "strconv" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +// AddAddressByIP adds an address where we are given an ip:port and not a +// appmessage.NetAddress. +func AddAddressByIP(am *AddressManager, addressIP string, subnetworkID *externalapi.DomainSubnetworkID) error { + // Split IP and port + ipString, portString, err := net.SplitHostPort(addressIP) + if err != nil { + return err + } + // Put it in appmessage.Netaddress + ip := net.ParseIP(ipString) + if ip == nil { + return errors.Errorf("invalid ip %s", ipString) + } + port, err := strconv.ParseUint(portString, 10, 0) + if err != nil { + return errors.Errorf("invalid port %s: %s", portString, err) + } + netAddress := appmessage.NewNetAddressIPPort(ip, uint16(port)) + return am.AddAddresses(netAddress) +} diff --git a/infrastructure/network/connmanager/connection_requests.go b/infrastructure/network/connmanager/connection_requests.go new file mode 100644 index 0000000..f5161ff --- /dev/null +++ b/infrastructure/network/connmanager/connection_requests.go @@ -0,0 +1,115 @@ +package connmanager + +import ( + "time" +) + +const ( + minRetryDuration = 30 * time.Second + maxRetryDuration = 10 * time.Minute +) + +func nextRetryDuration(previousDuration time.Duration) time.Duration { + if previousDuration < minRetryDuration { + return minRetryDuration + } + if previousDuration*2 > maxRetryDuration { + return maxRetryDuration + } + return previousDuration * 2 +} + +// checkRequestedConnections checks that all activeRequested are still active, and initiates connections +// for pendingRequested. +// While doing so, it filters out of connSet all connections that were initiated as a connectionRequest +func (c *ConnectionManager) checkRequestedConnections(connSet connectionSet) { + c.connectionRequestsLock.Lock() + defer c.connectionRequestsLock.Unlock() + + now := time.Now() + + for address, connReq := range c.activeRequested { + connection, ok := connSet.get(address) + if !ok { // a requested connection was disconnected + delete(c.activeRequested, address) + + if connReq.isPermanent { // if is one-try - ignore. If permanent - add to pending list to retry + connReq.nextAttempt = now + connReq.retryDuration = 0 + c.pendingRequested[address] = connReq + } + continue + } + + connSet.remove(connection) + } + + for address, connReq := range c.pendingRequested { + if connReq.nextAttempt.After(now) { // ignore connection requests which are still waiting for retry + continue + } + + connection, ok := connSet.get(address) + // The pending connection request has already connected - move it to active + // This can happen in rare cases such as when the other side has connected to our node + // while it has been pending on our side. + if ok { + delete(c.pendingRequested, address) + c.pendingRequested[address] = connReq + + connSet.remove(connection) + + continue + } + + // try to initiate connection + log.Debugf("Connecting to connection request %s", connReq.address) + err := c.initiateConnection(connReq.address) + if err != nil { + log.Infof("Couldn't connect to requested connection %s: %s", address, err) + // if connection request is one try - remove from pending and ignore failure + if !connReq.isPermanent { + delete(c.pendingRequested, address) + continue + } + // if connection request is permanent - keep in pending, and increase retry time + connReq.retryDuration = nextRetryDuration(connReq.retryDuration) + connReq.nextAttempt = now.Add(connReq.retryDuration) + log.Debugf("Retrying permanent connection to %s in %s", address, connReq.retryDuration) + continue + } + + // if connected successfully - move from pending to active + delete(c.pendingRequested, address) + c.activeRequested[address] = connReq + } +} + +// AddConnectionRequest adds the given address to list of pending connection requests +func (c *ConnectionManager) AddConnectionRequest(address string, isPermanent bool) { + // spawn goroutine so that caller doesn't wait in case connectionManager is in the midst of handling + // connection requests + spawn("ConnectionManager.AddConnectionRequest", func() { + c.addConnectionRequest(address, isPermanent) + c.run() + }) +} + +func (c *ConnectionManager) addConnectionRequest(address string, isPermanent bool) { + c.connectionRequestsLock.Lock() + defer c.connectionRequestsLock.Unlock() + if _, ok := c.activeRequested[address]; ok { + return + } + + c.pendingRequested[address] = &connectionRequest{ + address: address, + isPermanent: isPermanent, + } +} + +// RemoveConnection disconnects the connection for the given address +// and removes it entirely from the connection manager. +func (c *ConnectionManager) RemoveConnection(address string) { + panic("unimplemented") +} diff --git a/infrastructure/network/connmanager/connection_set.go b/infrastructure/network/connmanager/connection_set.go new file mode 100644 index 0000000..eebdeda --- /dev/null +++ b/infrastructure/network/connmanager/connection_set.go @@ -0,0 +1,30 @@ +package connmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/network/netadapter" +) + +type connectionSet map[string]*netadapter.NetConnection + +func (cs connectionSet) add(connection *netadapter.NetConnection) { + cs[connection.Address()] = connection +} + +func (cs connectionSet) remove(connection *netadapter.NetConnection) { + delete(cs, connection.Address()) +} + +func (cs connectionSet) get(address string) (*netadapter.NetConnection, bool) { + connection, ok := cs[address] + return connection, ok +} + +func convertToSet(connections []*netadapter.NetConnection) connectionSet { + connSet := make(connectionSet, len(connections)) + + for _, connection := range connections { + connSet[connection.Address()] = connection + } + + return connSet +} diff --git a/infrastructure/network/connmanager/connmanager.go b/infrastructure/network/connmanager/connmanager.go new file mode 100644 index 0000000..8a03686 --- /dev/null +++ b/infrastructure/network/connmanager/connmanager.go @@ -0,0 +1,272 @@ +package connmanager + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/dnsseed" + + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + + "github.com/spectre-project/spectred/infrastructure/config" +) + +// connectionRequest represents a user request (either through CLI or RPC) to connect to a certain node +type connectionRequest struct { + address string + isPermanent bool + nextAttempt time.Time + retryDuration time.Duration +} + +// ConnectionManager monitors that the current active connections satisfy the requirements of +// outgoing, requested and incoming connections +type ConnectionManager struct { + cfg *config.Config + netAdapter *netadapter.NetAdapter + addressManager *addressmanager.AddressManager + + activeRequested map[string]*connectionRequest + pendingRequested map[string]*connectionRequest + activeOutgoing map[string]struct{} + targetOutgoing int + activeIncoming map[string]struct{} + maxIncoming int + + stop uint32 + connectionRequestsLock sync.RWMutex + + resetLoopChan chan struct{} + loopTicker *time.Ticker +} + +// New instantiates a new instance of a ConnectionManager +func New(cfg *config.Config, netAdapter *netadapter.NetAdapter, addressManager *addressmanager.AddressManager) (*ConnectionManager, error) { + c := &ConnectionManager{ + cfg: cfg, + netAdapter: netAdapter, + addressManager: addressManager, + activeRequested: map[string]*connectionRequest{}, + pendingRequested: map[string]*connectionRequest{}, + activeOutgoing: map[string]struct{}{}, + activeIncoming: map[string]struct{}{}, + resetLoopChan: make(chan struct{}), + loopTicker: time.NewTicker(connectionsLoopInterval), + } + + connectPeers := cfg.AddPeers + if len(cfg.ConnectPeers) > 0 { + connectPeers = cfg.ConnectPeers + } + + c.maxIncoming = cfg.MaxInboundPeers + c.targetOutgoing = cfg.TargetOutboundPeers + + for _, connectPeer := range connectPeers { + c.pendingRequested[connectPeer] = &connectionRequest{ + address: connectPeer, + isPermanent: true, + } + } + + return c, nil +} + +// Start begins the operation of the ConnectionManager +func (c *ConnectionManager) Start() { + spawn("ConnectionManager.connectionsLoop", c.connectionsLoop) +} + +// Stop halts the operation of the ConnectionManager +func (c *ConnectionManager) Stop() { + atomic.StoreUint32(&c.stop, 1) + + for _, connection := range c.netAdapter.P2PConnections() { + connection.Disconnect() + } + + c.loopTicker.Stop() + // Force the next iteration so the connection loop will stop immediately and not after `connectionsLoopInterval`. + c.run() +} + +func (c *ConnectionManager) run() { + c.resetLoopChan <- struct{}{} +} + +func (c *ConnectionManager) initiateConnection(address string) error { + log.Infof("Connecting to %s", address) + return c.netAdapter.P2PConnect(address) +} + +const connectionsLoopInterval = 30 * time.Second + +func (c *ConnectionManager) connectionsLoop() { + + for atomic.LoadUint32(&c.stop) == 0 { + connections := c.netAdapter.P2PConnections() + + // We convert the connections list to a set, so that connections can be found quickly + // Then we go over the set, classifying connection by category: requested, outgoing or incoming. + // Every step removes all matching connections so that once we get to checkIncomingConnections - + // the only connections left are the incoming ones + connSet := convertToSet(connections) + + c.checkRequestedConnections(connSet) + + c.checkOutgoingConnections(connSet) + + c.checkIncomingConnections(connSet) + + c.waitTillNextIteration() + } +} + +// ConnectionCount returns the count of the connected connections +func (c *ConnectionManager) ConnectionCount() int { + return c.netAdapter.P2PConnectionCount() +} + +// ErrCannotBanPermanent is the error returned when trying to ban a permanent peer. +var ErrCannotBanPermanent = errors.New("ErrCannotBanPermanent") + +// Ban marks the given netConnection as banned +func (c *ConnectionManager) Ban(netConnection *netadapter.NetConnection) error { + if c.isPermanent(netConnection.Address()) { + return errors.Wrapf(ErrCannotBanPermanent, "Cannot ban %s because it's a permanent connection", netConnection.Address()) + } + + return c.addressManager.Ban(netConnection.NetAddress()) +} + +// BanByIP bans the given IP and disconnects from all the connection with that IP. +func (c *ConnectionManager) BanByIP(ip net.IP) error { + ipHasPermanentConnection, err := c.ipHasPermanentConnection(ip) + if err != nil { + return err + } + + if ipHasPermanentConnection { + return errors.Wrapf(ErrCannotBanPermanent, "Cannot ban %s because it's a permanent connection", ip) + } + + connections := c.netAdapter.P2PConnections() + for _, conn := range connections { + if conn.NetAddress().IP.Equal(ip) { + conn.Disconnect() + } + } + + return c.addressManager.Ban(appmessage.NewNetAddressIPPort(ip, 0)) +} + +// IsBanned returns whether the given netConnection is banned +func (c *ConnectionManager) IsBanned(netConnection *netadapter.NetConnection) (bool, error) { + if c.isPermanent(netConnection.Address()) { + return false, nil + } + + return c.addressManager.IsBanned(netConnection.NetAddress()) +} + +func (c *ConnectionManager) waitTillNextIteration() { + select { + case <-c.resetLoopChan: + c.loopTicker.Reset(connectionsLoopInterval) + case <-c.loopTicker.C: + } +} + +func (c *ConnectionManager) isPermanent(addressString string) bool { + c.connectionRequestsLock.RLock() + defer c.connectionRequestsLock.RUnlock() + + if conn, ok := c.activeRequested[addressString]; ok { + return conn.isPermanent + } + + if conn, ok := c.pendingRequested[addressString]; ok { + return conn.isPermanent + } + + return false +} + +func (c *ConnectionManager) ipHasPermanentConnection(ip net.IP) (bool, error) { + c.connectionRequestsLock.RLock() + defer c.connectionRequestsLock.RUnlock() + + for addr, conn := range c.activeRequested { + if !conn.isPermanent { + continue + } + + ips, err := c.extractAddressIPs(addr) + if err != nil { + return false, err + } + + for _, extractedIP := range ips { + if extractedIP.Equal(ip) { + return true, nil + } + } + } + + for addr, conn := range c.pendingRequested { + if !conn.isPermanent { + continue + } + + ips, err := c.extractAddressIPs(addr) + if err != nil { + return false, err + } + + for _, extractedIP := range ips { + if extractedIP.Equal(ip) { + return true, nil + } + } + } + + return false, nil +} + +func (c *ConnectionManager) extractAddressIPs(address string) ([]net.IP, error) { + host, _, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + + ip := net.ParseIP(host) + if ip == nil { + return c.cfg.Lookup(host) + } + + return []net.IP{ip}, nil +} + +func (c *ConnectionManager) seedFromDNS() { + cfg := c.cfg + if len(c.activeOutgoing) == 0 && !cfg.DisableDNSSeed { + dnsseed.SeedFromDNS(cfg.NetParams(), cfg.DNSSeed, false, nil, + cfg.Lookup, func(addresses []*appmessage.NetAddress) { + // Spectred uses a lookup of the dns seeder here. Since seeder returns + // IPs of nodes and not its own IP, we can not know real IP of + // source. So we'll take first returned address as source. + _ = c.addressManager.AddAddresses(addresses...) + }) + + dnsseed.SeedFromGRPC(cfg.NetParams(), cfg.GRPCSeed, false, nil, + func(addresses []*appmessage.NetAddress) { + _ = c.addressManager.AddAddresses(addresses...) + }) + } +} diff --git a/infrastructure/network/connmanager/incoming_connections.go b/infrastructure/network/connmanager/incoming_connections.go new file mode 100644 index 0000000..9c206a2 --- /dev/null +++ b/infrastructure/network/connmanager/incoming_connections.go @@ -0,0 +1,24 @@ +package connmanager + +// checkIncomingConnections makes sure there's no more than maxIncoming incoming connections +// if there are - it randomly disconnects enough to go below that number +func (c *ConnectionManager) checkIncomingConnections(incomingConnectionSet connectionSet) { + if len(incomingConnectionSet) <= c.maxIncoming { + return + } + + numConnectionsOverMax := len(incomingConnectionSet) - c.maxIncoming + log.Debugf("Got %d incoming connections while only %d are allowed. Disconnecting "+ + "%d", len(incomingConnectionSet), c.maxIncoming, numConnectionsOverMax) + + // randomly disconnect nodes until the number of incoming connections is smaller than maxIncoming + for _, connection := range incomingConnectionSet { + log.Debugf("Disconnecting %s due to exceeding incoming connections", connection) + connection.Disconnect() + + numConnectionsOverMax-- + if numConnectionsOverMax == 0 { + break + } + } +} diff --git a/infrastructure/network/connmanager/log.go b/infrastructure/network/connmanager/log.go new file mode 100644 index 0000000..d26bcd6 --- /dev/null +++ b/infrastructure/network/connmanager/log.go @@ -0,0 +1,9 @@ +package connmanager + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("CMGR") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/connmanager/outgoing_connections.go b/infrastructure/network/connmanager/outgoing_connections.go new file mode 100644 index 0000000..04d5774 --- /dev/null +++ b/infrastructure/network/connmanager/outgoing_connections.go @@ -0,0 +1,61 @@ +package connmanager + +import "github.com/spectre-project/spectred/app/appmessage" + +// checkOutgoingConnections goes over all activeOutgoing and makes sure they are still active. +// Then it opens connections so that we have targetOutgoing active connections +func (c *ConnectionManager) checkOutgoingConnections(connSet connectionSet) { + for address := range c.activeOutgoing { + connection, ok := connSet.get(address) + if ok { // connection is still connected + connSet.remove(connection) + continue + } + + // if connection is dead - remove from list of active ones + delete(c.activeOutgoing, address) + } + + connections := c.netAdapter.P2PConnections() + connectedAddresses := make([]*appmessage.NetAddress, len(connections)) + for i, connection := range connections { + connectedAddresses[i] = connection.NetAddress() + } + + liveConnections := len(c.activeOutgoing) + if c.targetOutgoing == liveConnections { + return + } + + log.Debugf("Have got %d outgoing connections out of target %d, adding %d more", + liveConnections, c.targetOutgoing, c.targetOutgoing-liveConnections) + + connectionsNeededCount := c.targetOutgoing - len(c.activeOutgoing) + netAddresses := c.addressManager.RandomAddresses(connectionsNeededCount, connectedAddresses) + + for _, netAddress := range netAddresses { + addressString := netAddress.TCPAddress().String() + + log.Debugf("Connecting to %s because we have %d outgoing connections and the target is "+ + "%d", addressString, len(c.activeOutgoing), c.targetOutgoing) + + err := c.initiateConnection(addressString) + if err != nil { + log.Debugf("Couldn't connect to %s: %s", addressString, err) + c.addressManager.MarkConnectionFailure(netAddress) + continue + } + c.addressManager.MarkConnectionSuccess(netAddress) + + c.activeOutgoing[addressString] = struct{}{} + } + + if len(netAddresses) < connectionsNeededCount { + log.Debugf("Need %d more outgoing connections - seeding addresses from DNS", + connectionsNeededCount-len(netAddresses)) + + // seedFromDNS is an asynchronous method, therefore addresses for connection + // should be available on next iteration + c.seedFromDNS() + } +} diff --git a/infrastructure/network/dnsseed/log.go b/infrastructure/network/dnsseed/log.go new file mode 100644 index 0000000..ea1ba15 --- /dev/null +++ b/infrastructure/network/dnsseed/log.go @@ -0,0 +1,13 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dnsseed + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("CMGR") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/dnsseed/pb/generate.go b/infrastructure/network/dnsseed/pb/generate.go new file mode 100644 index 0000000..7ec6288 --- /dev/null +++ b/infrastructure/network/dnsseed/pb/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative peer_service.proto + +package pb diff --git a/infrastructure/network/dnsseed/pb/peer_service.pb.go b/infrastructure/network/dnsseed/pb/peer_service.pb.go new file mode 100644 index 0000000..e85723f --- /dev/null +++ b/infrastructure/network/dnsseed/pb/peer_service.pb.go @@ -0,0 +1,309 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: peer_service.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetPeersListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubnetworkID []byte `protobuf:"bytes,1,opt,name=subnetworkID,proto3" json:"subnetworkID,omitempty"` + IncludeAllSubnetworks bool `protobuf:"varint,2,opt,name=includeAllSubnetworks,proto3" json:"includeAllSubnetworks,omitempty"` +} + +func (x *GetPeersListRequest) Reset() { + *x = GetPeersListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPeersListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPeersListRequest) ProtoMessage() {} + +func (x *GetPeersListRequest) ProtoReflect() protoreflect.Message { + mi := &file_peer_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPeersListRequest.ProtoReflect.Descriptor instead. +func (*GetPeersListRequest) Descriptor() ([]byte, []int) { + return file_peer_service_proto_rawDescGZIP(), []int{0} +} + +func (x *GetPeersListRequest) GetSubnetworkID() []byte { + if x != nil { + return x.SubnetworkID + } + return nil +} + +func (x *GetPeersListRequest) GetIncludeAllSubnetworks() bool { + if x != nil { + return x.IncludeAllSubnetworks + } + return false +} + +type GetPeersListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []*NetAddress `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (x *GetPeersListResponse) Reset() { + *x = GetPeersListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPeersListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPeersListResponse) ProtoMessage() {} + +func (x *GetPeersListResponse) ProtoReflect() protoreflect.Message { + mi := &file_peer_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPeersListResponse.ProtoReflect.Descriptor instead. +func (*GetPeersListResponse) Descriptor() ([]byte, []int) { + return file_peer_service_proto_rawDescGZIP(), []int{1} +} + +func (x *GetPeersListResponse) GetAddresses() []*NetAddress { + if x != nil { + return x.Addresses + } + return nil +} + +type NetAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + IP []byte `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` + Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` +} + +func (x *NetAddress) Reset() { + *x = NetAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_peer_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetAddress) ProtoMessage() {} + +func (x *NetAddress) ProtoReflect() protoreflect.Message { + mi := &file_peer_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetAddress.ProtoReflect.Descriptor instead. +func (*NetAddress) Descriptor() ([]byte, []int) { + return file_peer_service_proto_rawDescGZIP(), []int{2} +} + +func (x *NetAddress) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *NetAddress) GetIP() []byte { + if x != nil { + return x.IP + } + return nil +} + +func (x *NetAddress) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +var File_peer_service_proto protoreflect.FileDescriptor + +var file_peer_service_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, + 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x12, + 0x34, 0x0a, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x62, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x22, 0x41, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, + 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x4e, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x09, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x4e, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x02, 0x49, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x32, 0x4c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, + 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_peer_service_proto_rawDescOnce sync.Once + file_peer_service_proto_rawDescData = file_peer_service_proto_rawDesc +) + +func file_peer_service_proto_rawDescGZIP() []byte { + file_peer_service_proto_rawDescOnce.Do(func() { + file_peer_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_peer_service_proto_rawDescData) + }) + return file_peer_service_proto_rawDescData +} + +var file_peer_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_peer_service_proto_goTypes = []interface{}{ + (*GetPeersListRequest)(nil), // 0: GetPeersListRequest + (*GetPeersListResponse)(nil), // 1: GetPeersListResponse + (*NetAddress)(nil), // 2: NetAddress +} +var file_peer_service_proto_depIdxs = []int32{ + 2, // 0: GetPeersListResponse.addresses:type_name -> NetAddress + 0, // 1: PeerService.GetPeersList:input_type -> GetPeersListRequest + 1, // 2: PeerService.GetPeersList:output_type -> GetPeersListResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_peer_service_proto_init() } +func file_peer_service_proto_init() { + if File_peer_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_peer_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPeersListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPeersListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_peer_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_peer_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_peer_service_proto_goTypes, + DependencyIndexes: file_peer_service_proto_depIdxs, + MessageInfos: file_peer_service_proto_msgTypes, + }.Build() + File_peer_service_proto = out.File + file_peer_service_proto_rawDesc = nil + file_peer_service_proto_goTypes = nil + file_peer_service_proto_depIdxs = nil +} diff --git a/infrastructure/network/dnsseed/pb/peer_service.proto b/infrastructure/network/dnsseed/pb/peer_service.proto new file mode 100644 index 0000000..87f3f65 --- /dev/null +++ b/infrastructure/network/dnsseed/pb/peer_service.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +option go_package = "github.com/spectre-project/spectred/pb"; + +service PeerService { + rpc GetPeersList(GetPeersListRequest) returns (GetPeersListResponse) {} +} + +message GetPeersListRequest { + bytes subnetworkID = 1; + bool includeAllSubnetworks = 2; +} + +message GetPeersListResponse { + repeated NetAddress addresses = 1; +} + +message NetAddress { + int64 timestamp = 1; + bytes IP = 2; + uint32 port = 3; +} diff --git a/infrastructure/network/dnsseed/pb/peer_service_grpc.pb.go b/infrastructure/network/dnsseed/pb/peer_service_grpc.pb.go new file mode 100644 index 0000000..0f46fa6 --- /dev/null +++ b/infrastructure/network/dnsseed/pb/peer_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.3 +// source: peer_service.proto + +package pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + PeerService_GetPeersList_FullMethodName = "/PeerService/GetPeersList" +) + +// PeerServiceClient is the client API for PeerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PeerServiceClient interface { + GetPeersList(ctx context.Context, in *GetPeersListRequest, opts ...grpc.CallOption) (*GetPeersListResponse, error) +} + +type peerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewPeerServiceClient(cc grpc.ClientConnInterface) PeerServiceClient { + return &peerServiceClient{cc} +} + +func (c *peerServiceClient) GetPeersList(ctx context.Context, in *GetPeersListRequest, opts ...grpc.CallOption) (*GetPeersListResponse, error) { + out := new(GetPeersListResponse) + err := c.cc.Invoke(ctx, PeerService_GetPeersList_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PeerServiceServer is the server API for PeerService service. +// All implementations must embed UnimplementedPeerServiceServer +// for forward compatibility +type PeerServiceServer interface { + GetPeersList(context.Context, *GetPeersListRequest) (*GetPeersListResponse, error) + mustEmbedUnimplementedPeerServiceServer() +} + +// UnimplementedPeerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedPeerServiceServer struct { +} + +func (UnimplementedPeerServiceServer) GetPeersList(context.Context, *GetPeersListRequest) (*GetPeersListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPeersList not implemented") +} +func (UnimplementedPeerServiceServer) mustEmbedUnimplementedPeerServiceServer() {} + +// UnsafePeerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PeerServiceServer will +// result in compilation errors. +type UnsafePeerServiceServer interface { + mustEmbedUnimplementedPeerServiceServer() +} + +func RegisterPeerServiceServer(s grpc.ServiceRegistrar, srv PeerServiceServer) { + s.RegisterService(&PeerService_ServiceDesc, srv) +} + +func _PeerService_GetPeersList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPeersListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeerServiceServer).GetPeersList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: PeerService_GetPeersList_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeerServiceServer).GetPeersList(ctx, req.(*GetPeersListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// PeerService_ServiceDesc is the grpc.ServiceDesc for PeerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PeerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "PeerService", + HandlerType: (*PeerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPeersList", + Handler: _PeerService_GetPeersList_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "peer_service.proto", +} diff --git a/infrastructure/network/dnsseed/seed.go b/infrastructure/network/dnsseed/seed.go new file mode 100644 index 0000000..2affd32 --- /dev/null +++ b/infrastructure/network/dnsseed/seed.go @@ -0,0 +1,170 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package dnsseed + +import ( + "context" + "fmt" + "math/rand" + "net" + "strconv" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/app/appmessage" + pb2 "github.com/spectre-project/spectred/infrastructure/network/dnsseed/pb" + "google.golang.org/grpc" + + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/domain/dagconfig" +) + +const ( + // These constants are used by the DNS seed code to pick a random last + // seen time. + secondsIn3Days int32 = 24 * 60 * 60 * 3 + secondsIn4Days int32 = 24 * 60 * 60 * 4 + + // SubnetworkIDPrefixChar is the prefix of subnetworkID, when building a DNS seed request + SubnetworkIDPrefixChar byte = 'n' +) + +// OnSeed is the signature of the callback function which is invoked when DNS +// seeding is successful. +type OnSeed func(addrs []*appmessage.NetAddress) + +// LookupFunc is the signature of the DNS lookup function. +type LookupFunc func(string) ([]net.IP, error) + +// SeedFromDNS uses DNS seeding to populate the address manager with peers. +func SeedFromDNS(dagParams *dagconfig.Params, customSeed string, includeAllSubnetworks bool, + subnetworkID *externalapi.DomainSubnetworkID, lookupFn LookupFunc, seedFn OnSeed) { + + var dnsSeeds []string + if customSeed != "" { + dnsSeeds = []string{customSeed} + } else { + dnsSeeds = dagParams.DNSSeeds + } + + for _, dnsseed := range dnsSeeds { + host := dnsseed + + if !includeAllSubnetworks { + if subnetworkID != nil { + host = fmt.Sprintf("%c%s.%s", SubnetworkIDPrefixChar, subnetworkID, host) + } else { + host = fmt.Sprintf("%c.%s", SubnetworkIDPrefixChar, host) + } + } + + spawn("SeedFromDNS", func() { + randSource := rand.New(rand.NewSource(time.Now().UnixNano())) + + seedPeers, err := lookupFn(host) + if err != nil { + log.Infof("DNS discovery failed on seed %s: %s", host, err) + return + } + numPeers := len(seedPeers) + + log.Infof("%d addresses found from DNS seed %s", numPeers, host) + + if numPeers == 0 { + return + } + addresses := make([]*appmessage.NetAddress, len(seedPeers)) + // if this errors then we have *real* problems + intPort, _ := strconv.Atoi(dagParams.DefaultPort) + for i, peer := range seedPeers { + addresses[i] = appmessage.NewNetAddressTimestamp( + // seed with addresses from a time randomly selected + // between 3 and 7 days ago. + mstime.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+ + randSource.Int31n(secondsIn4Days))), + peer, uint16(intPort)) + } + + seedFn(addresses) + }) + } +} + +// SeedFromGRPC send gRPC request to get list of peers for a given host +func SeedFromGRPC(dagParams *dagconfig.Params, customSeed string, includeAllSubnetworks bool, + subnetworkID *externalapi.DomainSubnetworkID, seedFn OnSeed) { + + var grpcSeeds []string + if customSeed != "" { + grpcSeeds = []string{customSeed} + } else { + grpcSeeds = dagParams.GRPCSeeds + } + + for _, host := range grpcSeeds { + spawn("SeedFromGRPC", func() { + randSource := rand.New(rand.NewSource(time.Now().UnixNano())) + + conn, err := grpc.Dial(host, grpc.WithInsecure()) + client := pb2.NewPeerServiceClient(conn) + if err != nil { + log.Warnf("Failed to connect to gRPC server: %s", host) + } + + var subnetID []byte + if subnetworkID != nil { + subnetID = subnetworkID[:] + } else { + subnetID = nil + } + + req := &pb2.GetPeersListRequest{ + SubnetworkID: subnetID, + IncludeAllSubnetworks: includeAllSubnetworks, + } + res, err := client.GetPeersList(context.Background(), req) + + if err != nil { + log.Infof("gRPC request to get peers failed (host=%s): %s", host, err) + return + } + + seedPeers := fromProtobufAddresses(res.Addresses) + + numPeers := len(seedPeers) + + log.Infof("%d addresses found from DNS seed %s", numPeers, host) + + if numPeers == 0 { + return + } + addresses := make([]*appmessage.NetAddress, len(seedPeers)) + // if this errors then we have *real* problems + intPort, _ := strconv.Atoi(dagParams.DefaultPort) + for i, peer := range seedPeers { + addresses[i] = appmessage.NewNetAddressTimestamp( + // seed with addresses from a time randomly selected + // between 3 and 7 days ago. + mstime.Now().Add(-1*time.Second*time.Duration(secondsIn3Days+ + randSource.Int31n(secondsIn4Days))), + peer, uint16(intPort)) + } + + seedFn(addresses) + }) + } +} + +func fromProtobufAddresses(proto []*pb2.NetAddress) []net.IP { + var addresses []net.IP + + for _, pbAddr := range proto { + addresses = append(addresses, net.IP(pbAddr.IP)) + } + + return addresses +} diff --git a/infrastructure/network/netadapter/id/id.go b/infrastructure/network/netadapter/id/id.go new file mode 100644 index 0000000..8e273dd --- /dev/null +++ b/infrastructure/network/netadapter/id/id.go @@ -0,0 +1,69 @@ +package id + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "io" +) + +// IDLength of array used to store the ID. +const IDLength = 16 + +// ID identifies a network connection +type ID struct { + bytes [IDLength]byte +} + +// GenerateID generates a new ID +func GenerateID() (*ID, error) { + id := new(ID) + err := id.Deserialize(rand.Reader) + if err != nil { + return nil, err + } + return id, nil +} + +// IsEqual returns whether id equals to other. +func (id *ID) IsEqual(other *ID) bool { + return *id == *other +} + +func (id *ID) String() string { + return hex.EncodeToString(id.bytes[:]) +} + +// Deserialize decodes a block from r into the receiver. +func (id *ID) Deserialize(r io.Reader) error { + _, err := io.ReadFull(r, id.bytes[:]) + return err +} + +// Serialize serializes the receiver into the given writer. +func (id *ID) Serialize(w io.Writer) error { + _, err := w.Write(id.bytes[:]) + return err +} + +// SerializeToBytes serializes the receiver and returns a byte slice of the serialized id. +func (id *ID) SerializeToBytes() ([]byte, error) { + w := &bytes.Buffer{} + err := id.Serialize(w) + if err != nil { + return nil, err + } + + return w.Bytes(), nil +} + +// FromBytes returns an ID deserialized from the given byte slice. +func FromBytes(serializedID []byte) (*ID, error) { + r := bytes.NewReader(serializedID) + newID := new(ID) + err := newID.Deserialize(r) + if err != nil { + return nil, err + } + return newID, nil +} diff --git a/infrastructure/network/netadapter/log.go b/infrastructure/network/netadapter/log.go new file mode 100644 index 0000000..0245e63 --- /dev/null +++ b/infrastructure/network/netadapter/log.go @@ -0,0 +1,7 @@ +package netadapter + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("NTAR") diff --git a/infrastructure/network/netadapter/netadapter.go b/infrastructure/network/netadapter/netadapter.go new file mode 100644 index 0000000..2edd6a0 --- /dev/null +++ b/infrastructure/network/netadapter/netadapter.go @@ -0,0 +1,192 @@ +package netadapter + +import ( + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver" +) + +// RouterInitializer is a function that initializes a new +// router to be used with a new connection +type RouterInitializer func(*routerpkg.Router, *NetConnection) + +// NetAdapter is an abstraction layer over networking. +// This type expects a RouteInitializer function. This +// function weaves together the various "routes" (messages +// and message handlers) without exposing anything related +// to networking internals. +type NetAdapter struct { + cfg *config.Config + id *id.ID + p2pServer server.P2PServer + p2pRouterInitializer RouterInitializer + rpcServer server.Server + rpcRouterInitializer RouterInitializer + stop uint32 + + p2pConnections map[*NetConnection]struct{} + p2pConnectionsLock sync.RWMutex +} + +// NewNetAdapter creates and starts a new NetAdapter on the +// given listeningPort +func NewNetAdapter(cfg *config.Config) (*NetAdapter, error) { + netAdapterID, err := id.GenerateID() + if err != nil { + return nil, err + } + p2pServer, err := grpcserver.NewP2PServer(cfg.Listeners) + if err != nil { + return nil, err + } + rpcServer, err := grpcserver.NewRPCServer(cfg.RPCListeners, cfg.RPCMaxClients) + if err != nil { + return nil, err + } + adapter := NetAdapter{ + cfg: cfg, + id: netAdapterID, + p2pServer: p2pServer, + rpcServer: rpcServer, + + p2pConnections: make(map[*NetConnection]struct{}), + } + + adapter.p2pServer.SetOnConnectedHandler(adapter.onP2PConnectedHandler) + adapter.rpcServer.SetOnConnectedHandler(adapter.onRPCConnectedHandler) + + return &adapter, nil +} + +// Start begins the operation of the NetAdapter +func (na *NetAdapter) Start() error { + if na.p2pRouterInitializer == nil { + return errors.New("p2pRouterInitializer was not set") + } + if na.rpcRouterInitializer == nil { + return errors.New("rpcRouterInitializer was not set") + } + + err := na.p2pServer.Start() + if err != nil { + return err + } + err = na.rpcServer.Start() + if err != nil { + return err + } + + return nil +} + +// Stop safely closes the NetAdapter +func (na *NetAdapter) Stop() error { + if atomic.AddUint32(&na.stop, 1) != 1 { + return errors.New("net adapter stopped more than once") + } + err := na.p2pServer.Stop() + if err != nil { + return err + } + return na.rpcServer.Stop() +} + +// P2PConnect tells the NetAdapter's underlying p2p server to initiate a connection +// to the given address +func (na *NetAdapter) P2PConnect(address string) error { + _, err := na.p2pServer.Connect(address) + return err +} + +// P2PConnections returns a list of p2p connections currently connected and active +func (na *NetAdapter) P2PConnections() []*NetConnection { + na.p2pConnectionsLock.RLock() + defer na.p2pConnectionsLock.RUnlock() + + netConnections := make([]*NetConnection, 0, len(na.p2pConnections)) + + for netConnection := range na.p2pConnections { + netConnections = append(netConnections, netConnection) + } + + return netConnections +} + +// P2PConnectionCount returns the count of the connected p2p connections +func (na *NetAdapter) P2PConnectionCount() int { + na.p2pConnectionsLock.RLock() + defer na.p2pConnectionsLock.RUnlock() + + return len(na.p2pConnections) +} + +func (na *NetAdapter) onP2PConnectedHandler(connection server.Connection) error { + netConnection := newNetConnection(connection, na.p2pRouterInitializer, "on P2P connected") + + na.p2pConnectionsLock.Lock() + defer na.p2pConnectionsLock.Unlock() + + netConnection.setOnDisconnectedHandler(func() { + na.p2pConnectionsLock.Lock() + defer na.p2pConnectionsLock.Unlock() + + delete(na.p2pConnections, netConnection) + }) + + na.p2pConnections[netConnection] = struct{}{} + + netConnection.start() + + return nil +} + +func (na *NetAdapter) onRPCConnectedHandler(connection server.Connection) error { + netConnection := newNetConnection(connection, na.rpcRouterInitializer, "on RPC connected") + netConnection.setOnDisconnectedHandler(func() {}) + netConnection.start() + + return nil +} + +// SetP2PRouterInitializer sets the p2pRouterInitializer function +// for the net adapter +func (na *NetAdapter) SetP2PRouterInitializer(routerInitializer RouterInitializer) { + na.p2pRouterInitializer = routerInitializer +} + +// SetRPCRouterInitializer sets the rpcRouterInitializer function +// for the net adapter +func (na *NetAdapter) SetRPCRouterInitializer(routerInitializer RouterInitializer) { + na.rpcRouterInitializer = routerInitializer +} + +// ID returns this netAdapter's ID in the network +func (na *NetAdapter) ID() *id.ID { + return na.id +} + +// P2PBroadcast sends the given `message` to every peer corresponding +// to each NetConnection in the given netConnections +func (na *NetAdapter) P2PBroadcast(netConnections []*NetConnection, message appmessage.Message) error { + na.p2pConnectionsLock.RLock() + defer na.p2pConnectionsLock.RUnlock() + + for _, netConnection := range netConnections { + err := netConnection.router.OutgoingRoute().Enqueue(message) + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + log.Debugf("Cannot enqueue message to %s: router is closed", netConnection) + continue + } + return err + } + } + return nil +} diff --git a/infrastructure/network/netadapter/netadapter_test.go b/infrastructure/network/netadapter/netadapter_test.go new file mode 100644 index 0000000..4b007ed --- /dev/null +++ b/infrastructure/network/netadapter/netadapter_test.go @@ -0,0 +1,166 @@ +package netadapter + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/spectre-project/spectred/app/appmessage" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// routerInitializerForTest returns new RouterInitializer which simply sets +// new incoming route for router and stores this route in map for further usage in tests +func routerInitializerForTest(t *testing.T, routes *sync.Map, + routeName string, wg *sync.WaitGroup) func(*router.Router, *NetConnection) { + return func(router *router.Router, connection *NetConnection) { + route, err := router.AddIncomingRoute(routeName, []appmessage.MessageCommand{appmessage.CmdPing}) + if err != nil { + t.Fatalf("TestNetAdapter: AddIncomingRoute failed: %+v", err) + } + routes.Store(routeName, route) + wg.Done() + } +} + +func TestNetAdapter(t *testing.T) { + const ( + timeout = time.Second * 5 + nonce = uint64(1) + + host = "127.0.0.1" + portA = 3000 + portB = 3001 + portC = 3002 + ) + + addressA := fmt.Sprintf("%s:%d", host, portA) + addressB := fmt.Sprintf("%s:%d", host, portB) + addressC := fmt.Sprintf("%s:%d", host, portC) + + cfgA, cfgB, cfgC := config.DefaultConfig(), config.DefaultConfig(), config.DefaultConfig() + cfgA.Listeners = []string{addressA} + cfgB.Listeners = []string{addressB} + cfgC.Listeners = []string{addressC} + + routes := &sync.Map{} + wg := &sync.WaitGroup{} + wg.Add(2) + + adapterA, err := NewNetAdapter(cfgA) + if err != nil { + t.Fatalf("TestNetAdapter: NetAdapter instantiation failed: %+v", err) + } + + adapterA.SetP2PRouterInitializer(func(router *router.Router, connection *NetConnection) {}) + adapterA.SetRPCRouterInitializer(func(router *router.Router, connection *NetConnection) {}) + err = adapterA.Start() + if err != nil { + t.Fatalf("TestNetAdapter: Start() failed: %+v", err) + } + + adapterB, err := NewNetAdapter(cfgB) + if err != nil { + t.Fatalf("TestNetAdapter: NetAdapter instantiation failed: %+v", err) + } + + initializer := routerInitializerForTest(t, routes, "B", wg) + adapterB.SetP2PRouterInitializer(initializer) + adapterB.SetRPCRouterInitializer(func(router *router.Router, connection *NetConnection) {}) + err = adapterB.Start() + if err != nil { + t.Fatalf("TestNetAdapter: Start() failed: %+v", err) + } + + adapterC, err := NewNetAdapter(cfgC) + if err != nil { + t.Fatalf("TestNetAdapter: NetAdapter instantiation failed: %+v", err) + } + + initializer = routerInitializerForTest(t, routes, "C", wg) + adapterC.SetP2PRouterInitializer(initializer) + adapterC.SetRPCRouterInitializer(func(router *router.Router, connection *NetConnection) {}) + err = adapterC.Start() + if err != nil { + t.Fatalf("TestNetAdapter: Start() failed: %+v", err) + } + + err = adapterA.P2PConnect(addressB) + if err != nil { + t.Fatalf("TestNetAdapter: connection to %s failed: %+v", addressB, err) + } + + err = adapterA.P2PConnect(addressC) + if err != nil { + t.Fatalf("TestNetAdapter: connection to %s failed: %+v", addressC, err) + } + + // Ensure adapter has two connections + if count := adapterA.P2PConnectionCount(); count != 2 { + t.Fatalf("TestNetAdapter: expected 2 connections, got - %d", count) + } + + // Ensure all connected peers have received broadcasted message + connections := adapterA.P2PConnections() + err = adapterA.P2PBroadcast(connections, appmessage.NewMsgPing(1)) + if err != nil { + t.Fatalf("TestNetAdapter: broadcast failed: %+v", err) + } + + // wait for routes to be added to map, then they can be used to receive broadcasted message + wg.Wait() + + r, ok := routes.Load("B") + if !ok { + t.Fatal("TestNetAdapter: route loading failed") + } + + msg, err := r.(*router.Route).DequeueWithTimeout(timeout) + if err != nil { + t.Fatalf("TestNetAdapter: dequeuing message failed: %+v", err) + } + + if command := msg.Command(); command != appmessage.CmdPing { + t.Fatalf("TestNetAdapter: expected '%s' message to be received but got '%s'", + appmessage.ProtocolMessageCommandToString[appmessage.CmdPing], + appmessage.ProtocolMessageCommandToString[command]) + } + + if number := msg.MessageNumber(); number != nonce { + t.Fatalf("TestNetAdapter: expected '%d' message number but got %d", nonce, number) + } + + r, ok = routes.Load("C") + if !ok { + t.Fatal("TestNetAdapter: route loading failed") + } + + msg, err = r.(*router.Route).DequeueWithTimeout(timeout) + if err != nil { + t.Fatalf("TestNetAdapter: dequeuing message failed: %+v", err) + } + + if command := msg.Command(); command != appmessage.CmdPing { + t.Fatalf("TestNetAdapter: expected '%s' message to be received but got '%s'", + appmessage.ProtocolMessageCommandToString[appmessage.CmdPing], + appmessage.ProtocolMessageCommandToString[command]) + } + + if number := msg.MessageNumber(); number != nonce { + t.Fatalf("TestNetAdapter: expected '%d' message number but got %d", nonce, number) + } + + err = adapterA.Stop() + if err != nil { + t.Fatalf("TestNetAdapter: stopping adapter failed: %+v", err) + } + + // Ensure adapter can't be stopped multiple times + err = adapterA.Stop() + if err == nil { + t.Fatalf("TestNetAdapter: error expected at attempt to stop adapter second time, but got nothing") + } +} diff --git a/infrastructure/network/netadapter/netconnection.go b/infrastructure/network/netadapter/netconnection.go new file mode 100644 index 0000000..057f4d9 --- /dev/null +++ b/infrastructure/network/netadapter/netconnection.go @@ -0,0 +1,98 @@ +package netadapter + +import ( + "fmt" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" +) + +// NetConnection is a wrapper to a server connection for use by services external to NetAdapter +type NetConnection struct { + connection server.Connection + id *id.ID + router *routerpkg.Router + onDisconnectedHandler server.OnDisconnectedHandler + isRouterClosed uint32 +} + +func newNetConnection(connection server.Connection, routerInitializer RouterInitializer, name string) *NetConnection { + router := routerpkg.NewRouter(name) + + netConnection := &NetConnection{ + connection: connection, + router: router, + } + + netConnection.connection.SetOnDisconnectedHandler(func() { + log.Infof("Disconnected from %s", netConnection) + // If the disconnection came because of a network error and not because of the application layer, we + // need to close the router as well. + if atomic.AddUint32(&netConnection.isRouterClosed, 1) == 1 { + netConnection.router.Close() + } + netConnection.onDisconnectedHandler() + }) + + routerInitializer(router, netConnection) + + return netConnection +} + +func (c *NetConnection) start() { + if c.onDisconnectedHandler == nil { + panic(errors.New("onDisconnectedHandler is nil")) + } + + c.connection.Start(c.router) +} + +func (c *NetConnection) String() string { + return fmt.Sprintf("<%s: %s>", c.id, c.connection) +} + +// ID returns the ID associated with this connection +func (c *NetConnection) ID() *id.ID { + return c.id +} + +// SetID sets the ID associated with this connection +func (c *NetConnection) SetID(peerID *id.ID) { + c.id = peerID +} + +// Address returns the address associated with this connection +func (c *NetConnection) Address() string { + return c.connection.Address().String() +} + +// IsOutbound returns whether the connection is outbound +func (c *NetConnection) IsOutbound() bool { + return c.connection.IsOutbound() +} + +// NetAddress returns the NetAddress associated with this connection +func (c *NetConnection) NetAddress() *appmessage.NetAddress { + return appmessage.NewNetAddress(c.connection.Address()) +} + +func (c *NetConnection) setOnDisconnectedHandler(onDisconnectedHandler server.OnDisconnectedHandler) { + c.onDisconnectedHandler = onDisconnectedHandler +} + +// Disconnect disconnects the given connection +func (c *NetConnection) Disconnect() { + if atomic.AddUint32(&c.isRouterClosed, 1) == 1 { + c.router.Close() + } +} + +// SetOnInvalidMessageHandler sets the invalid message handler for this connection +func (c *NetConnection) SetOnInvalidMessageHandler(onInvalidMessageHandler server.OnInvalidMessageHandler) { + c.connection.SetOnInvalidMessageHandler(onInvalidMessageHandler) +} diff --git a/infrastructure/network/netadapter/router/log.go b/infrastructure/network/netadapter/router/log.go new file mode 100644 index 0000000..174c8bd --- /dev/null +++ b/infrastructure/network/netadapter/router/log.go @@ -0,0 +1,7 @@ +package router + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("ROUT") diff --git a/infrastructure/network/netadapter/router/route.go b/infrastructure/network/netadapter/router/route.go new file mode 100644 index 0000000..b1d6184 --- /dev/null +++ b/infrastructure/network/netadapter/router/route.go @@ -0,0 +1,116 @@ +package router + +import ( + "sync" + "time" + + "github.com/spectre-project/spectred/app/protocol/protocolerrors" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +const ( + // DefaultMaxMessages is the default capacity for a route with a capacity defined + DefaultMaxMessages = 200 +) + +var ( + // ErrTimeout signifies that one of the router functions had a timeout. + ErrTimeout = protocolerrors.New(false, "timeout expired") + + // ErrRouteClosed indicates that a route was closed while reading/writing. + ErrRouteClosed = errors.New("route is closed") + + // ErrRouteCapacityReached indicates that route's capacity has been reached + ErrRouteCapacityReached = protocolerrors.New(false, "route capacity has been reached") +) + +// Route represents an incoming or outgoing Router route +type Route struct { + name string + channel chan appmessage.Message + // closed and closeLock are used to protect us from writing to a closed channel + // reads use the channel's built-in mechanism to check if the channel is closed + closed bool + closeLock sync.Mutex + capacity int +} + +// NewRoute create a new Route +func NewRoute(name string) *Route { + return newRouteWithCapacity(name, DefaultMaxMessages) +} + +func newRouteWithCapacity(name string, capacity int) *Route { + return &Route{ + name: name, + channel: make(chan appmessage.Message, capacity), + closed: false, + capacity: capacity, + } +} + +// Enqueue enqueues a message to the Route +func (r *Route) Enqueue(message appmessage.Message) error { + r.closeLock.Lock() + defer r.closeLock.Unlock() + + if r.closed { + return errors.WithStack(ErrRouteClosed) + } + if len(r.channel) == r.capacity { + return errors.Wrapf(ErrRouteCapacityReached, "route '%s' reached capacity of %d", r.name, r.capacity) + } + r.channel <- message + return nil +} + +// MaybeEnqueue enqueues a message to the route, but doesn't throw an error +// if it's closed or its capacity has been reached. +func (r *Route) MaybeEnqueue(message appmessage.Message) error { + err := r.Enqueue(message) + if errors.Is(err, ErrRouteClosed) { + log.Infof("Couldn't send message to closed route '%s'", r.name) + return nil + } + + if errors.Is(err, ErrRouteCapacityReached) { + log.Infof("Capacity (%d) of route '%s' has been reached. Couldn't send message", r.capacity, r.name) + return nil + } + + return err +} + +// Dequeue dequeues a message from the Route +func (r *Route) Dequeue() (appmessage.Message, error) { + message, isOpen := <-r.channel + if !isOpen { + return nil, errors.Wrapf(ErrRouteClosed, "route '%s' is closed", r.name) + } + return message, nil +} + +// DequeueWithTimeout attempts to dequeue a message from the Route +// and returns an error if the given timeout expires first. +func (r *Route) DequeueWithTimeout(timeout time.Duration) (appmessage.Message, error) { + select { + case <-time.After(timeout): + return nil, errors.Wrapf(ErrTimeout, "route '%s' got timeout after %s", r.name, timeout) + case message, isOpen := <-r.channel: + if !isOpen { + return nil, errors.WithStack(ErrRouteClosed) + } + return message, nil + } +} + +// Close closes this route +func (r *Route) Close() { + r.closeLock.Lock() + defer r.closeLock.Unlock() + + r.closed = true + close(r.channel) +} diff --git a/infrastructure/network/netadapter/router/router.go b/infrastructure/network/netadapter/router/router.go new file mode 100644 index 0000000..1f094a3 --- /dev/null +++ b/infrastructure/network/netadapter/router/router.go @@ -0,0 +1,138 @@ +package router + +import ( + "fmt" + "sync" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +const outgoingRouteMaxMessages = appmessage.MaxInvPerMsg + DefaultMaxMessages + +// OnRouteCapacityReachedHandler is a function that is to +// be called when one of the routes reaches capacity. +type OnRouteCapacityReachedHandler func() + +// Router routes messages by type to their respective +// input channels +type Router struct { + incomingRoutes map[appmessage.MessageCommand]*Route + incomingRoutesLock sync.RWMutex + + outgoingRoute *Route +} + +// NewRouter creates a new empty router +func NewRouter(name string) *Router { + router := Router{ + incomingRoutes: make(map[appmessage.MessageCommand]*Route), + outgoingRoute: newRouteWithCapacity(fmt.Sprintf("%s - outgoing", name), outgoingRouteMaxMessages), + } + return &router +} + +// AddIncomingRoute registers the messages of types `messageTypes` to +// be routed to the given `route` +func (r *Router) AddIncomingRoute(name string, messageTypes []appmessage.MessageCommand) (*Route, error) { + route := NewRoute(fmt.Sprintf("%s - incoming", name)) + err := r.initializeIncomingRoute(route, messageTypes) + if err != nil { + return nil, err + } + return route, nil +} + +// AddIncomingRouteWithCapacity registers the messages of types `messageTypes` to +// be routed to the given `route` with a capacity of `capacity` +func (r *Router) AddIncomingRouteWithCapacity(name string, capacity int, messageTypes []appmessage.MessageCommand) (*Route, error) { + route := newRouteWithCapacity(fmt.Sprintf("%s - incoming", name), capacity) + err := r.initializeIncomingRoute(route, messageTypes) + if err != nil { + return nil, err + } + return route, nil +} + +func (r *Router) initializeIncomingRoute(route *Route, messageTypes []appmessage.MessageCommand) error { + for _, messageType := range messageTypes { + if r.doesIncomingRouteExist(messageType) { + return errors.Errorf("a route for '%s' already exists", messageType) + } + r.setIncomingRoute(messageType, route) + } + return nil +} + +// RemoveRoute unregisters the messages of types `messageTypes` from +// the router +func (r *Router) RemoveRoute(messageTypes []appmessage.MessageCommand) error { + for _, messageType := range messageTypes { + if !r.doesIncomingRouteExist(messageType) { + return errors.Errorf("a route for '%s' does not exist", messageType) + } + r.deleteIncomingRoute(messageType) + } + return nil +} + +// EnqueueIncomingMessage enqueues the given message to the +// appropriate route +func (r *Router) EnqueueIncomingMessage(message appmessage.Message) error { + route, ok := r.incomingRoute(message.Command()) + if !ok { + return errors.Errorf("a route for '%s' does not exist", message.Command()) + } + return route.Enqueue(message) +} + +// OutgoingRoute returns the outgoing route +func (r *Router) OutgoingRoute() *Route { + return r.outgoingRoute +} + +// Close shuts down the router by closing all registered +// incoming routes and the outgoing route +func (r *Router) Close() { + r.incomingRoutesLock.Lock() + defer r.incomingRoutesLock.Unlock() + + incomingRoutes := make(map[*Route]struct{}) + for _, route := range r.incomingRoutes { + incomingRoutes[route] = struct{}{} + } + for route := range incomingRoutes { + route.Close() + } + r.outgoingRoute.Close() +} + +func (r *Router) incomingRoute(messageType appmessage.MessageCommand) (*Route, bool) { + r.incomingRoutesLock.RLock() + defer r.incomingRoutesLock.RUnlock() + + route, ok := r.incomingRoutes[messageType] + return route, ok +} + +func (r *Router) doesIncomingRouteExist(messageType appmessage.MessageCommand) bool { + r.incomingRoutesLock.RLock() + defer r.incomingRoutesLock.RUnlock() + + _, ok := r.incomingRoutes[messageType] + return ok +} + +func (r *Router) setIncomingRoute(messageType appmessage.MessageCommand, route *Route) { + r.incomingRoutesLock.Lock() + defer r.incomingRoutesLock.Unlock() + + r.incomingRoutes[messageType] = route +} + +func (r *Router) deleteIncomingRoute(messageType appmessage.MessageCommand) { + r.incomingRoutesLock.Lock() + defer r.incomingRoutesLock.Unlock() + + delete(r.incomingRoutes, messageType) +} diff --git a/infrastructure/network/netadapter/server/grpcserver/connection_loops.go b/infrastructure/network/netadapter/server/grpcserver/connection_loops.go new file mode 100644 index 0000000..dd08cc1 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/connection_loops.go @@ -0,0 +1,127 @@ +package grpcserver + +import ( + "io" + "os" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/logger" + + "github.com/pkg/errors" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" +) + +func (c *gRPCConnection) connectionLoops() error { + errChan := make(chan error, 1) // buffered channel because one of the loops might try write after disconnect + + spawn("gRPCConnection.receiveLoop", func() { errChan <- c.receiveLoop() }) + spawn("gRPCConnection.sendLoop", func() { errChan <- c.sendLoop() }) + + err := <-errChan + + c.Disconnect() + + return err +} + +var blockDelayOnce sync.Once +var blockDelay = 0 + +func (c *gRPCConnection) sendLoop() error { + outgoingRoute := c.router.OutgoingRoute() + for c.IsConnected() { + message, err := outgoingRoute.Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + return nil + } + return err + } + + blockDelayOnce.Do(func() { + experimentalDelayEnv := os.Getenv("SPECTRE_EXPERIMENTAL_DELAY") + if experimentalDelayEnv != "" { + blockDelay, err = strconv.Atoi(experimentalDelayEnv) + if err != nil { + panic(err) + } + } + }) + + if blockDelay != 0 && message.Command() == appmessage.CmdBlock { + time.Sleep(time.Duration(blockDelay) * time.Second) + } + + log.Debugf("outgoing '%s' message to %s", message.Command(), c) + log.Tracef("outgoing '%s' message to %s: %s", message.Command(), c, logger.NewLogClosure(func() string { + return spew.Sdump(message) + })) + + messageProto, err := protowire.FromAppMessage(message) + if err != nil { + return err + } + + err = c.send(messageProto) + if err != nil { + return err + } + } + return nil +} + +func (c *gRPCConnection) receiveLoop() error { + messageNumber := uint64(0) + for c.IsConnected() { + protoMessage, err := c.receive() + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + message, err := protoMessage.ToAppMessage() + if err != nil { + if c.onInvalidMessageHandler != nil { + c.onInvalidMessageHandler(err) + } + return err + } + + messageNumber++ + message.SetMessageNumber(messageNumber) + message.SetReceivedAt(time.Now()) + + log.Debugf("incoming '%s' message from %s (message number %d)", message.Command(), c, + message.MessageNumber()) + + log.Tracef("incoming '%s' message from %s (message number %d): %s", message.Command(), + c, message.MessageNumber(), logger.NewLogClosure(func() string { + return spew.Sdump(message) + })) + + err = c.router.EnqueueIncomingMessage(message) + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + return nil + } + + // ErrRouteCapacityReached isn't an invalid message error, so + // we return it in order to log it later on. + if errors.Is(err, routerpkg.ErrRouteCapacityReached) { + return err + } + if c.onInvalidMessageHandler != nil { + c.onInvalidMessageHandler(err) + } + return err + } + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/grpc_connection.go b/infrastructure/network/netadapter/server/grpcserver/grpc_connection.go new file mode 100644 index 0000000..c98d430 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/grpc_connection.go @@ -0,0 +1,159 @@ +package grpcserver + +import ( + "net" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" + "google.golang.org/grpc" +) + +type gRPCConnection struct { + server *gRPCServer + address *net.TCPAddr + stream grpcStream + router *router.Router + lowLevelClientConnection *grpc.ClientConn + + // streamLock protects concurrent access to stream. + // Note that it's an RWMutex. Despite what the name + // implies, we use it to RLock() send() and receive() because + // they can work perfectly fine in parallel, and Lock() + // closeSend() because it must run alone. + streamLock sync.RWMutex + + stopChan chan struct{} + onDisconnectedHandler server.OnDisconnectedHandler + onInvalidMessageHandler server.OnInvalidMessageHandler + + isConnected uint32 +} + +type grpcStream interface { + Send(*protowire.SpectredMessage) error + Recv() (*protowire.SpectredMessage, error) +} + +func newConnection(server *gRPCServer, address *net.TCPAddr, stream grpcStream, + lowLevelClientConnection *grpc.ClientConn) *gRPCConnection { + connection := &gRPCConnection{ + server: server, + address: address, + stream: stream, + stopChan: make(chan struct{}), + isConnected: 1, + lowLevelClientConnection: lowLevelClientConnection, + } + + return connection +} + +func (c *gRPCConnection) Start(router *router.Router) { + if c.onDisconnectedHandler == nil { + panic(errors.New("onDisconnectedHandler is nil")) + } + + c.router = router + + spawn("gRPCConnection.Start-connectionLoops", func() { + err := c.connectionLoops() + if err != nil { + status, isStatus := status.FromError(err) + if isStatus { + switch status.Code() { + case codes.Canceled: + log.Debugf("connectionLoop canceled connection for %s: %s", c.address, err) + default: + log.Errorf("status error from connectionLoops for %s: %s", c.address, err) + } + } else { + log.Errorf("unknown error from connectionLoops for %s: %s", c.address, err) + } + } + }) +} + +func (c *gRPCConnection) String() string { + return c.Address().String() +} + +func (c *gRPCConnection) IsConnected() bool { + return atomic.LoadUint32(&c.isConnected) != 0 +} + +func (c *gRPCConnection) SetOnDisconnectedHandler(onDisconnectedHandler server.OnDisconnectedHandler) { + c.onDisconnectedHandler = onDisconnectedHandler +} + +func (c *gRPCConnection) SetOnInvalidMessageHandler(onInvalidMessageHandler server.OnInvalidMessageHandler) { + c.onInvalidMessageHandler = onInvalidMessageHandler +} + +func (c *gRPCConnection) IsOutbound() bool { + return c.lowLevelClientConnection != nil +} + +// Disconnect disconnects the connection +// Calling this function a second time doesn't do anything +// +// This is part of the Connection interface +func (c *gRPCConnection) Disconnect() { + if !c.IsConnected() { + return + } + atomic.StoreUint32(&c.isConnected, 0) + + close(c.stopChan) + + if c.IsOutbound() { + c.closeSend() + log.Debugf("Disconnected from %s", c) + } + + log.Debugf("Disconnecting from %s", c) + if c.onDisconnectedHandler != nil { + c.onDisconnectedHandler() + } +} + +func (c *gRPCConnection) Address() *net.TCPAddr { + return c.address +} + +func (c *gRPCConnection) receive() (*protowire.SpectredMessage, error) { + // We use RLock here and in send() because they can work + // in parallel. closeSend(), however, must not have either + // receive() nor send() running while it's running. + c.streamLock.RLock() + defer c.streamLock.RUnlock() + + return c.stream.Recv() +} + +func (c *gRPCConnection) send(message *protowire.SpectredMessage) error { + // We use RLock here and in receive() because they can work + // in parallel. closeSend(), however, must not have either + // receive() nor send() running while it's running. + c.streamLock.RLock() + defer c.streamLock.RUnlock() + + return c.stream.Send(message) +} + +func (c *gRPCConnection) closeSend() { + c.streamLock.Lock() + defer c.streamLock.Unlock() + + clientStream := c.stream.(grpc.ClientStream) + + // ignore error because we don't really know what's the status of the connection + _ = clientStream.CloseSend() + _ = c.lowLevelClientConnection.Close() +} diff --git a/infrastructure/network/netadapter/server/grpcserver/grpc_server.go b/infrastructure/network/netadapter/server/grpcserver/grpc_server.go new file mode 100644 index 0000000..f28d8e6 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/grpc_server.go @@ -0,0 +1,144 @@ +package grpcserver + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" + "github.com/spectre-project/spectred/util/panics" + "google.golang.org/grpc" + "google.golang.org/grpc/peer" +) + +type gRPCServer struct { + onConnectedHandler server.OnConnectedHandler + listeningAddresses []string + server *grpc.Server + name string + + maxInboundConnections int + inboundConnectionCount int + inboundConnectionCountLock *sync.Mutex +} + +// newGRPCServer creates a gRPC server +func newGRPCServer(listeningAddresses []string, maxMessageSize int, maxInboundConnections int, name string) *gRPCServer { + log.Debugf("Created new %s GRPC server with maxMessageSize %d and maxInboundConnections %d", name, maxMessageSize, maxInboundConnections) + return &gRPCServer{ + server: grpc.NewServer(grpc.MaxRecvMsgSize(maxMessageSize), grpc.MaxSendMsgSize(maxMessageSize)), + listeningAddresses: listeningAddresses, + name: name, + maxInboundConnections: maxInboundConnections, + inboundConnectionCount: 0, + inboundConnectionCountLock: &sync.Mutex{}, + } +} + +func (s *gRPCServer) Start() error { + if s.onConnectedHandler == nil { + return errors.New("onConnectedHandler is nil") + } + + for _, listenAddress := range s.listeningAddresses { + err := s.listenOn(listenAddress) + if err != nil { + return err + } + } + + return nil +} + +func (s *gRPCServer) listenOn(listenAddr string) error { + listener, err := net.Listen("tcp", listenAddr) + if err != nil { + return errors.Wrapf(err, "%s error listening on %s", s.name, listenAddr) + } + + spawn(fmt.Sprintf("%s.gRPCServer.listenOn-Serve", s.name), func() { + err := s.server.Serve(listener) + if err != nil { + panics.Exit(log, fmt.Sprintf("error serving %s on %s: %+v", s.name, listenAddr, err)) + } + }) + + log.Infof("%s Server listening on %s", s.name, listener.Addr()) + return nil +} + +func (s *gRPCServer) Stop() error { + const stopTimeout = 2 * time.Second + + stopChan := make(chan interface{}) + spawn("gRPCServer.Stop", func() { + s.server.GracefulStop() + close(stopChan) + }) + + select { + case <-stopChan: + case <-time.After(stopTimeout): + log.Warnf("Could not gracefully stop %s: timed out after %s", s.name, stopTimeout) + s.server.Stop() + } + return nil +} + +// SetOnConnectedHandler sets the peer connected handler +// function for the server +func (s *gRPCServer) SetOnConnectedHandler(onConnectedHandler server.OnConnectedHandler) { + s.onConnectedHandler = onConnectedHandler +} + +func (s *gRPCServer) handleInboundConnection(ctx context.Context, stream grpcStream) error { + connectionCount, err := s.incrementInboundConnectionCountAndLimitIfRequired() + if err != nil { + return err + } + defer s.decrementInboundConnectionCount() + + peerInfo, ok := peer.FromContext(ctx) + if !ok { + return errors.Errorf("Error getting stream peer info from context") + } + tcpAddress, ok := peerInfo.Addr.(*net.TCPAddr) + if !ok { + return errors.Errorf("non-tcp connections are not supported") + } + + connection := newConnection(s, tcpAddress, stream, nil) + + err = s.onConnectedHandler(connection) + if err != nil { + return err + } + + log.Infof("%s Incoming connection from %s #%d", s.name, peerInfo.Addr, connectionCount) + + <-connection.stopChan + return nil +} + +func (s *gRPCServer) incrementInboundConnectionCountAndLimitIfRequired() (int, error) { + s.inboundConnectionCountLock.Lock() + defer s.inboundConnectionCountLock.Unlock() + + if s.maxInboundConnections > 0 && s.inboundConnectionCount == s.maxInboundConnections { + log.Warnf("Limit of %d %s inbound connections has been exceeded", s.maxInboundConnections, s.name) + return s.inboundConnectionCount, errors.Errorf("limit of %d %s inbound connections has been exceeded", s.maxInboundConnections, s.name) + } + + s.inboundConnectionCount++ + return s.inboundConnectionCount, nil +} + +func (s *gRPCServer) decrementInboundConnectionCount() { + s.inboundConnectionCountLock.Lock() + defer s.inboundConnectionCountLock.Unlock() + + s.inboundConnectionCount-- +} diff --git a/infrastructure/network/netadapter/server/grpcserver/log.go b/infrastructure/network/netadapter/server/grpcserver/log.go new file mode 100644 index 0000000..193deb7 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/log.go @@ -0,0 +1,13 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package grpcserver + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("TXMP") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/netadapter/server/grpcserver/p2pserver.go b/infrastructure/network/netadapter/server/grpcserver/p2pserver.go new file mode 100644 index 0000000..e7d358c --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/p2pserver.go @@ -0,0 +1,84 @@ +package grpcserver + +import ( + "context" + "net" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + "github.com/spectre-project/spectred/util/panics" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/peer" +) + +type p2pServer struct { + protowire.UnimplementedP2PServer + gRPCServer +} + +const p2pMaxMessageSize = 1024 * 1024 * 1024 // 1GB + +// p2pMaxInboundConnections is the max amount of inbound connections for the P2P server. +// Note that inbound connections are not limited by the gRPC server. (A value of 0 means +// unlimited inbound connections.) The P2P limiting logic is more applicative, and as such +// is handled in the ConnectionManager instead. +const p2pMaxInboundConnections = 0 + +// NewP2PServer creates a new P2PServer +func NewP2PServer(listeningAddresses []string) (server.P2PServer, error) { + gRPCServer := newGRPCServer(listeningAddresses, p2pMaxMessageSize, p2pMaxInboundConnections, "P2P") + p2pServer := &p2pServer{gRPCServer: *gRPCServer} + protowire.RegisterP2PServer(gRPCServer.server, p2pServer) + return p2pServer, nil +} + +func (p *p2pServer) MessageStream(stream protowire.P2P_MessageStreamServer) error { + defer panics.HandlePanic(log, "p2pServer.MessageStream", nil) + + return p.handleInboundConnection(stream.Context(), stream) +} + +// Connect connects to the given address +// This is part of the P2PServer interface +func (p *p2pServer) Connect(address string) (server.Connection, error) { + log.Debugf("%s Dialing to %s", p.name, address) + + const dialTimeout = 1 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), dialTimeout) + defer cancel() + + gRPCClientConnection, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, errors.Wrapf(err, "%s error connecting to %s", p.name, address) + } + + client := protowire.NewP2PClient(gRPCClientConnection) + stream, err := client.MessageStream(context.Background(), grpc.UseCompressor(gzip.Name), + grpc.MaxCallRecvMsgSize(p2pMaxMessageSize), grpc.MaxCallSendMsgSize(p2pMaxMessageSize)) + if err != nil { + return nil, errors.Wrapf(err, "%s error getting client stream for %s", p.name, address) + } + + peerInfo, ok := peer.FromContext(stream.Context()) + if !ok { + return nil, errors.Errorf("%s error getting stream peer info from context for %s", p.name, address) + } + tcpAddress, ok := peerInfo.Addr.(*net.TCPAddr) + if !ok { + return nil, errors.Errorf("non-tcp addresses are not supported") + } + + connection := newConnection(&p.gRPCServer, tcpAddress, stream, gRPCClientConnection) + + err = p.onConnectedHandler(connection) + if err != nil { + return nil, err + } + + log.Infof("%s Connected to %s", p.name, address) + + return connection, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/README.md b/infrastructure/network/netadapter/server/grpcserver/protowire/README.md new file mode 100644 index 0000000..7962617 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/README.md @@ -0,0 +1,14 @@ +# protowire + +1. Download and place in your PATH: + https://github.com/protocolbuffers/protobuf/releases/download/v3.12.3/protoc-3.12.3-linux-x86_64.zip +2. `go install github.com/golang/protobuf/protoc-gen-go` +3. `go install google.golang.org/grpc/cmd/protoc-gen-go-grpc` +4. In the protowire directory: `go generate .` + +# Documentation + +To generate `rpc.md`: + +1. `go install -u github.com/spectre-project/protoc-gen-doc/cmd/protoc-gen-doc` +2. In the protowire directory: `protoc --doc_out=. --doc_opt=markdown,rpc.md rpc.proto` diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/common.go b/infrastructure/network/netadapter/server/grpcserver/protowire/common.go new file mode 100644 index 0000000..c5cdb0a --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/common.go @@ -0,0 +1,162 @@ +package protowire + +import ( + "math" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/util/mstime" +) + +var errorNil = errors.New("a required field is nil") + +func (x *Hash) toDomain() (*externalapi.DomainHash, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "Hash is nil") + } + return externalapi.NewDomainHashFromByteSlice(x.Bytes) +} + +func protoHashesToDomain(protoHashes []*Hash) ([]*externalapi.DomainHash, error) { + domainHashes := make([]*externalapi.DomainHash, len(protoHashes)) + for i, protoHash := range protoHashes { + var err error + domainHashes[i], err = protoHash.toDomain() + if err != nil { + return nil, err + } + } + return domainHashes, nil +} + +func domainHashToProto(hash *externalapi.DomainHash) *Hash { + return &Hash{ + Bytes: hash.ByteSlice(), + } +} + +func domainHashesToProto(hashes []*externalapi.DomainHash) []*Hash { + protoHashes := make([]*Hash, len(hashes)) + for i, hash := range hashes { + protoHashes[i] = domainHashToProto(hash) + } + return protoHashes +} + +func (x *TransactionId) toDomain() (*externalapi.DomainTransactionID, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "TransactionId is nil") + } + return transactionid.FromBytes(x.Bytes) +} + +func protoTransactionIDsToDomain(protoIDs []*TransactionId) ([]*externalapi.DomainTransactionID, error) { + txIDs := make([]*externalapi.DomainTransactionID, len(protoIDs)) + for i, protoID := range protoIDs { + var err error + txIDs[i], err = protoID.toDomain() + if err != nil { + return nil, err + } + } + return txIDs, nil +} + +func domainTransactionIDToProto(id *externalapi.DomainTransactionID) *TransactionId { + return &TransactionId{ + Bytes: id.ByteSlice(), + } +} + +func wireTransactionIDsToProto(ids []*externalapi.DomainTransactionID) []*TransactionId { + protoIDs := make([]*TransactionId, len(ids)) + for i, hash := range ids { + protoIDs[i] = domainTransactionIDToProto(hash) + } + return protoIDs +} + +func (x *SubnetworkId) toDomain() (*externalapi.DomainSubnetworkID, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "SubnetworkId is nil") + } + return subnetworks.FromBytes(x.Bytes) +} + +func domainSubnetworkIDToProto(id *externalapi.DomainSubnetworkID) *SubnetworkId { + if id == nil { + return nil + } + return &SubnetworkId{ + Bytes: id[:], + } +} + +func (x *NetAddress) toAppMessage() (*appmessage.NetAddress, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "NetAddress is nil") + } + if x.Port > math.MaxUint16 { + return nil, errors.Errorf("port number is larger than %d", math.MaxUint16) + } + return &appmessage.NetAddress{ + Timestamp: mstime.UnixMilliseconds(x.Timestamp), + IP: x.Ip, + Port: uint16(x.Port), + }, nil +} + +func appMessageNetAddressToProto(address *appmessage.NetAddress) *NetAddress { + return &NetAddress{ + Timestamp: address.Timestamp.UnixMilliseconds(), + Ip: address.IP, + Port: uint32(address.Port), + } +} + +func (x *Outpoint) toAppMessage() (*appmessage.Outpoint, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "Outpoint is nil") + } + transactionID, err := x.TransactionId.toDomain() + if err != nil { + return nil, err + } + return &appmessage.Outpoint{ + TxID: *transactionID, + Index: x.Index, + }, nil +} + +func (x *UtxoEntry) toAppMessage() (*appmessage.UTXOEntry, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "UtxoEntry is nil") + } + scriptPublicKey, err := x.ScriptPublicKey.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.UTXOEntry{ + Amount: x.Amount, + ScriptPublicKey: scriptPublicKey, + BlockDAAScore: x.BlockDaaScore, + IsCoinbase: x.IsCoinbase, + }, nil +} + +func (x *ScriptPublicKey) toAppMessage() (*externalapi.ScriptPublicKey, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "ScriptPublicKey is nil") + } + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("ScriptPublicKey version is bigger then uint16.") + } + return &externalapi.ScriptPublicKey{ + Script: x.Script, + Version: uint16(x.Version), + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/generate.go b/infrastructure/network/netadapter/server/grpcserver/protowire/generate.go new file mode 100644 index 0000000..03b5ba7 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative p2p.proto rpc.proto messages.proto + +package protowire diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/messages.pb.go b/infrastructure/network/netadapter/server/grpcserver/protowire/messages.pb.go new file mode 100644 index 0000000..ee20f0c --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/messages.pb.go @@ -0,0 +1,3257 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: messages.proto + +package protowire + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SpectredMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // + // *SpectredMessage_Addresses + // *SpectredMessage_Block + // *SpectredMessage_Transaction + // *SpectredMessage_BlockLocator + // *SpectredMessage_RequestAddresses + // *SpectredMessage_RequestRelayBlocks + // *SpectredMessage_RequestTransactions + // *SpectredMessage_IbdBlock + // *SpectredMessage_InvRelayBlock + // *SpectredMessage_InvTransactions + // *SpectredMessage_Ping + // *SpectredMessage_Pong + // *SpectredMessage_Verack + // *SpectredMessage_Version + // *SpectredMessage_TransactionNotFound + // *SpectredMessage_Reject + // *SpectredMessage_PruningPointUtxoSetChunk + // *SpectredMessage_RequestIBDBlocks + // *SpectredMessage_UnexpectedPruningPoint + // *SpectredMessage_IbdBlockLocator + // *SpectredMessage_IbdBlockLocatorHighestHash + // *SpectredMessage_RequestNextPruningPointUtxoSetChunk + // *SpectredMessage_DonePruningPointUtxoSetChunks + // *SpectredMessage_IbdBlockLocatorHighestHashNotFound + // *SpectredMessage_BlockWithTrustedData + // *SpectredMessage_DoneBlocksWithTrustedData + // *SpectredMessage_RequestPruningPointAndItsAnticone + // *SpectredMessage_BlockHeaders + // *SpectredMessage_RequestNextHeaders + // *SpectredMessage_DoneHeaders + // *SpectredMessage_RequestPruningPointUTXOSet + // *SpectredMessage_RequestHeaders + // *SpectredMessage_RequestBlockLocator + // *SpectredMessage_PruningPoints + // *SpectredMessage_RequestPruningPointProof + // *SpectredMessage_PruningPointProof + // *SpectredMessage_Ready + // *SpectredMessage_BlockWithTrustedDataV4 + // *SpectredMessage_TrustedData + // *SpectredMessage_RequestIBDChainBlockLocator + // *SpectredMessage_IbdChainBlockLocator + // *SpectredMessage_RequestAnticone + // *SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks + // *SpectredMessage_GetCurrentNetworkRequest + // *SpectredMessage_GetCurrentNetworkResponse + // *SpectredMessage_SubmitBlockRequest + // *SpectredMessage_SubmitBlockResponse + // *SpectredMessage_GetBlockTemplateRequest + // *SpectredMessage_GetBlockTemplateResponse + // *SpectredMessage_NotifyBlockAddedRequest + // *SpectredMessage_NotifyBlockAddedResponse + // *SpectredMessage_BlockAddedNotification + // *SpectredMessage_GetPeerAddressesRequest + // *SpectredMessage_GetPeerAddressesResponse + // *SpectredMessage_GetSelectedTipHashRequest + // *SpectredMessage_GetSelectedTipHashResponse + // *SpectredMessage_GetMempoolEntryRequest + // *SpectredMessage_GetMempoolEntryResponse + // *SpectredMessage_GetConnectedPeerInfoRequest + // *SpectredMessage_GetConnectedPeerInfoResponse + // *SpectredMessage_AddPeerRequest + // *SpectredMessage_AddPeerResponse + // *SpectredMessage_SubmitTransactionRequest + // *SpectredMessage_SubmitTransactionResponse + // *SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest + // *SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse + // *SpectredMessage_VirtualSelectedParentChainChangedNotification + // *SpectredMessage_GetBlockRequest + // *SpectredMessage_GetBlockResponse + // *SpectredMessage_GetSubnetworkRequest + // *SpectredMessage_GetSubnetworkResponse + // *SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest + // *SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse + // *SpectredMessage_GetBlocksRequest + // *SpectredMessage_GetBlocksResponse + // *SpectredMessage_GetBlockCountRequest + // *SpectredMessage_GetBlockCountResponse + // *SpectredMessage_GetBlockDagInfoRequest + // *SpectredMessage_GetBlockDagInfoResponse + // *SpectredMessage_ResolveFinalityConflictRequest + // *SpectredMessage_ResolveFinalityConflictResponse + // *SpectredMessage_NotifyFinalityConflictsRequest + // *SpectredMessage_NotifyFinalityConflictsResponse + // *SpectredMessage_FinalityConflictNotification + // *SpectredMessage_FinalityConflictResolvedNotification + // *SpectredMessage_GetMempoolEntriesRequest + // *SpectredMessage_GetMempoolEntriesResponse + // *SpectredMessage_ShutDownRequest + // *SpectredMessage_ShutDownResponse + // *SpectredMessage_GetHeadersRequest + // *SpectredMessage_GetHeadersResponse + // *SpectredMessage_NotifyUtxosChangedRequest + // *SpectredMessage_NotifyUtxosChangedResponse + // *SpectredMessage_UtxosChangedNotification + // *SpectredMessage_GetUtxosByAddressesRequest + // *SpectredMessage_GetUtxosByAddressesResponse + // *SpectredMessage_GetVirtualSelectedParentBlueScoreRequest + // *SpectredMessage_GetVirtualSelectedParentBlueScoreResponse + // *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest + // *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse + // *SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification + // *SpectredMessage_BanRequest + // *SpectredMessage_BanResponse + // *SpectredMessage_UnbanRequest + // *SpectredMessage_UnbanResponse + // *SpectredMessage_GetInfoRequest + // *SpectredMessage_GetInfoResponse + // *SpectredMessage_StopNotifyingUtxosChangedRequest + // *SpectredMessage_StopNotifyingUtxosChangedResponse + // *SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest + // *SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse + // *SpectredMessage_PruningPointUTXOSetOverrideNotification + // *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest + // *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse + // *SpectredMessage_EstimateNetworkHashesPerSecondRequest + // *SpectredMessage_EstimateNetworkHashesPerSecondResponse + // *SpectredMessage_NotifyVirtualDaaScoreChangedRequest + // *SpectredMessage_NotifyVirtualDaaScoreChangedResponse + // *SpectredMessage_VirtualDaaScoreChangedNotification + // *SpectredMessage_GetBalanceByAddressRequest + // *SpectredMessage_GetBalanceByAddressResponse + // *SpectredMessage_GetBalancesByAddressesRequest + // *SpectredMessage_GetBalancesByAddressesResponse + // *SpectredMessage_NotifyNewBlockTemplateRequest + // *SpectredMessage_NotifyNewBlockTemplateResponse + // *SpectredMessage_NewBlockTemplateNotification + // *SpectredMessage_GetMempoolEntriesByAddressesRequest + // *SpectredMessage_GetMempoolEntriesByAddressesResponse + // *SpectredMessage_GetCoinSupplyRequest + // *SpectredMessage_GetCoinSupplyResponse + Payload isSpectredMessage_Payload `protobuf_oneof:"payload"` +} + +func (x *SpectredMessage) Reset() { + *x = SpectredMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messages_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpectredMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpectredMessage) ProtoMessage() {} + +func (x *SpectredMessage) ProtoReflect() protoreflect.Message { + mi := &file_messages_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpectredMessage.ProtoReflect.Descriptor instead. +func (*SpectredMessage) Descriptor() ([]byte, []int) { + return file_messages_proto_rawDescGZIP(), []int{0} +} + +func (m *SpectredMessage) GetPayload() isSpectredMessage_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *SpectredMessage) GetAddresses() *AddressesMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Addresses); ok { + return x.Addresses + } + return nil +} + +func (x *SpectredMessage) GetBlock() *BlockMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Block); ok { + return x.Block + } + return nil +} + +func (x *SpectredMessage) GetTransaction() *TransactionMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Transaction); ok { + return x.Transaction + } + return nil +} + +func (x *SpectredMessage) GetBlockLocator() *BlockLocatorMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BlockLocator); ok { + return x.BlockLocator + } + return nil +} + +func (x *SpectredMessage) GetRequestAddresses() *RequestAddressesMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestAddresses); ok { + return x.RequestAddresses + } + return nil +} + +func (x *SpectredMessage) GetRequestRelayBlocks() *RequestRelayBlocksMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestRelayBlocks); ok { + return x.RequestRelayBlocks + } + return nil +} + +func (x *SpectredMessage) GetRequestTransactions() *RequestTransactionsMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestTransactions); ok { + return x.RequestTransactions + } + return nil +} + +func (x *SpectredMessage) GetIbdBlock() *BlockMessage { + if x, ok := x.GetPayload().(*SpectredMessage_IbdBlock); ok { + return x.IbdBlock + } + return nil +} + +func (x *SpectredMessage) GetInvRelayBlock() *InvRelayBlockMessage { + if x, ok := x.GetPayload().(*SpectredMessage_InvRelayBlock); ok { + return x.InvRelayBlock + } + return nil +} + +func (x *SpectredMessage) GetInvTransactions() *InvTransactionsMessage { + if x, ok := x.GetPayload().(*SpectredMessage_InvTransactions); ok { + return x.InvTransactions + } + return nil +} + +func (x *SpectredMessage) GetPing() *PingMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Ping); ok { + return x.Ping + } + return nil +} + +func (x *SpectredMessage) GetPong() *PongMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Pong); ok { + return x.Pong + } + return nil +} + +func (x *SpectredMessage) GetVerack() *VerackMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Verack); ok { + return x.Verack + } + return nil +} + +func (x *SpectredMessage) GetVersion() *VersionMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Version); ok { + return x.Version + } + return nil +} + +func (x *SpectredMessage) GetTransactionNotFound() *TransactionNotFoundMessage { + if x, ok := x.GetPayload().(*SpectredMessage_TransactionNotFound); ok { + return x.TransactionNotFound + } + return nil +} + +func (x *SpectredMessage) GetReject() *RejectMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Reject); ok { + return x.Reject + } + return nil +} + +func (x *SpectredMessage) GetPruningPointUtxoSetChunk() *PruningPointUtxoSetChunkMessage { + if x, ok := x.GetPayload().(*SpectredMessage_PruningPointUtxoSetChunk); ok { + return x.PruningPointUtxoSetChunk + } + return nil +} + +func (x *SpectredMessage) GetRequestIBDBlocks() *RequestIBDBlocksMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestIBDBlocks); ok { + return x.RequestIBDBlocks + } + return nil +} + +func (x *SpectredMessage) GetUnexpectedPruningPoint() *UnexpectedPruningPointMessage { + if x, ok := x.GetPayload().(*SpectredMessage_UnexpectedPruningPoint); ok { + return x.UnexpectedPruningPoint + } + return nil +} + +func (x *SpectredMessage) GetIbdBlockLocator() *IbdBlockLocatorMessage { + if x, ok := x.GetPayload().(*SpectredMessage_IbdBlockLocator); ok { + return x.IbdBlockLocator + } + return nil +} + +func (x *SpectredMessage) GetIbdBlockLocatorHighestHash() *IbdBlockLocatorHighestHashMessage { + if x, ok := x.GetPayload().(*SpectredMessage_IbdBlockLocatorHighestHash); ok { + return x.IbdBlockLocatorHighestHash + } + return nil +} + +func (x *SpectredMessage) GetRequestNextPruningPointUtxoSetChunk() *RequestNextPruningPointUtxoSetChunkMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestNextPruningPointUtxoSetChunk); ok { + return x.RequestNextPruningPointUtxoSetChunk + } + return nil +} + +func (x *SpectredMessage) GetDonePruningPointUtxoSetChunks() *DonePruningPointUtxoSetChunksMessage { + if x, ok := x.GetPayload().(*SpectredMessage_DonePruningPointUtxoSetChunks); ok { + return x.DonePruningPointUtxoSetChunks + } + return nil +} + +func (x *SpectredMessage) GetIbdBlockLocatorHighestHashNotFound() *IbdBlockLocatorHighestHashNotFoundMessage { + if x, ok := x.GetPayload().(*SpectredMessage_IbdBlockLocatorHighestHashNotFound); ok { + return x.IbdBlockLocatorHighestHashNotFound + } + return nil +} + +func (x *SpectredMessage) GetBlockWithTrustedData() *BlockWithTrustedDataMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BlockWithTrustedData); ok { + return x.BlockWithTrustedData + } + return nil +} + +func (x *SpectredMessage) GetDoneBlocksWithTrustedData() *DoneBlocksWithTrustedDataMessage { + if x, ok := x.GetPayload().(*SpectredMessage_DoneBlocksWithTrustedData); ok { + return x.DoneBlocksWithTrustedData + } + return nil +} + +func (x *SpectredMessage) GetRequestPruningPointAndItsAnticone() *RequestPruningPointAndItsAnticoneMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestPruningPointAndItsAnticone); ok { + return x.RequestPruningPointAndItsAnticone + } + return nil +} + +func (x *SpectredMessage) GetBlockHeaders() *BlockHeadersMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BlockHeaders); ok { + return x.BlockHeaders + } + return nil +} + +func (x *SpectredMessage) GetRequestNextHeaders() *RequestNextHeadersMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestNextHeaders); ok { + return x.RequestNextHeaders + } + return nil +} + +func (x *SpectredMessage) GetDoneHeaders() *DoneHeadersMessage { + if x, ok := x.GetPayload().(*SpectredMessage_DoneHeaders); ok { + return x.DoneHeaders + } + return nil +} + +func (x *SpectredMessage) GetRequestPruningPointUTXOSet() *RequestPruningPointUTXOSetMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestPruningPointUTXOSet); ok { + return x.RequestPruningPointUTXOSet + } + return nil +} + +func (x *SpectredMessage) GetRequestHeaders() *RequestHeadersMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestHeaders); ok { + return x.RequestHeaders + } + return nil +} + +func (x *SpectredMessage) GetRequestBlockLocator() *RequestBlockLocatorMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestBlockLocator); ok { + return x.RequestBlockLocator + } + return nil +} + +func (x *SpectredMessage) GetPruningPoints() *PruningPointsMessage { + if x, ok := x.GetPayload().(*SpectredMessage_PruningPoints); ok { + return x.PruningPoints + } + return nil +} + +func (x *SpectredMessage) GetRequestPruningPointProof() *RequestPruningPointProofMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestPruningPointProof); ok { + return x.RequestPruningPointProof + } + return nil +} + +func (x *SpectredMessage) GetPruningPointProof() *PruningPointProofMessage { + if x, ok := x.GetPayload().(*SpectredMessage_PruningPointProof); ok { + return x.PruningPointProof + } + return nil +} + +func (x *SpectredMessage) GetReady() *ReadyMessage { + if x, ok := x.GetPayload().(*SpectredMessage_Ready); ok { + return x.Ready + } + return nil +} + +func (x *SpectredMessage) GetBlockWithTrustedDataV4() *BlockWithTrustedDataV4Message { + if x, ok := x.GetPayload().(*SpectredMessage_BlockWithTrustedDataV4); ok { + return x.BlockWithTrustedDataV4 + } + return nil +} + +func (x *SpectredMessage) GetTrustedData() *TrustedDataMessage { + if x, ok := x.GetPayload().(*SpectredMessage_TrustedData); ok { + return x.TrustedData + } + return nil +} + +func (x *SpectredMessage) GetRequestIBDChainBlockLocator() *RequestIBDChainBlockLocatorMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestIBDChainBlockLocator); ok { + return x.RequestIBDChainBlockLocator + } + return nil +} + +func (x *SpectredMessage) GetIbdChainBlockLocator() *IbdChainBlockLocatorMessage { + if x, ok := x.GetPayload().(*SpectredMessage_IbdChainBlockLocator); ok { + return x.IbdChainBlockLocator + } + return nil +} + +func (x *SpectredMessage) GetRequestAnticone() *RequestAnticoneMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestAnticone); ok { + return x.RequestAnticone + } + return nil +} + +func (x *SpectredMessage) GetRequestNextPruningPointAndItsAnticoneBlocks() *RequestNextPruningPointAndItsAnticoneBlocksMessage { + if x, ok := x.GetPayload().(*SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks); ok { + return x.RequestNextPruningPointAndItsAnticoneBlocks + } + return nil +} + +func (x *SpectredMessage) GetGetCurrentNetworkRequest() *GetCurrentNetworkRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetCurrentNetworkRequest); ok { + return x.GetCurrentNetworkRequest + } + return nil +} + +func (x *SpectredMessage) GetGetCurrentNetworkResponse() *GetCurrentNetworkResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetCurrentNetworkResponse); ok { + return x.GetCurrentNetworkResponse + } + return nil +} + +func (x *SpectredMessage) GetSubmitBlockRequest() *SubmitBlockRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_SubmitBlockRequest); ok { + return x.SubmitBlockRequest + } + return nil +} + +func (x *SpectredMessage) GetSubmitBlockResponse() *SubmitBlockResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_SubmitBlockResponse); ok { + return x.SubmitBlockResponse + } + return nil +} + +func (x *SpectredMessage) GetGetBlockTemplateRequest() *GetBlockTemplateRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockTemplateRequest); ok { + return x.GetBlockTemplateRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBlockTemplateResponse() *GetBlockTemplateResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockTemplateResponse); ok { + return x.GetBlockTemplateResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyBlockAddedRequest() *NotifyBlockAddedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyBlockAddedRequest); ok { + return x.NotifyBlockAddedRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyBlockAddedResponse() *NotifyBlockAddedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyBlockAddedResponse); ok { + return x.NotifyBlockAddedResponse + } + return nil +} + +func (x *SpectredMessage) GetBlockAddedNotification() *BlockAddedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BlockAddedNotification); ok { + return x.BlockAddedNotification + } + return nil +} + +func (x *SpectredMessage) GetGetPeerAddressesRequest() *GetPeerAddressesRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetPeerAddressesRequest); ok { + return x.GetPeerAddressesRequest + } + return nil +} + +func (x *SpectredMessage) GetGetPeerAddressesResponse() *GetPeerAddressesResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetPeerAddressesResponse); ok { + return x.GetPeerAddressesResponse + } + return nil +} + +func (x *SpectredMessage) GetGetSelectedTipHashRequest() *GetSelectedTipHashRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetSelectedTipHashRequest); ok { + return x.GetSelectedTipHashRequest + } + return nil +} + +func (x *SpectredMessage) GetGetSelectedTipHashResponse() *GetSelectedTipHashResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetSelectedTipHashResponse); ok { + return x.GetSelectedTipHashResponse + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntryRequest() *GetMempoolEntryRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntryRequest); ok { + return x.GetMempoolEntryRequest + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntryResponse() *GetMempoolEntryResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntryResponse); ok { + return x.GetMempoolEntryResponse + } + return nil +} + +func (x *SpectredMessage) GetGetConnectedPeerInfoRequest() *GetConnectedPeerInfoRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetConnectedPeerInfoRequest); ok { + return x.GetConnectedPeerInfoRequest + } + return nil +} + +func (x *SpectredMessage) GetGetConnectedPeerInfoResponse() *GetConnectedPeerInfoResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetConnectedPeerInfoResponse); ok { + return x.GetConnectedPeerInfoResponse + } + return nil +} + +func (x *SpectredMessage) GetAddPeerRequest() *AddPeerRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_AddPeerRequest); ok { + return x.AddPeerRequest + } + return nil +} + +func (x *SpectredMessage) GetAddPeerResponse() *AddPeerResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_AddPeerResponse); ok { + return x.AddPeerResponse + } + return nil +} + +func (x *SpectredMessage) GetSubmitTransactionRequest() *SubmitTransactionRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_SubmitTransactionRequest); ok { + return x.SubmitTransactionRequest + } + return nil +} + +func (x *SpectredMessage) GetSubmitTransactionResponse() *SubmitTransactionResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_SubmitTransactionResponse); ok { + return x.SubmitTransactionResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualSelectedParentChainChangedRequest() *NotifyVirtualSelectedParentChainChangedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest); ok { + return x.NotifyVirtualSelectedParentChainChangedRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualSelectedParentChainChangedResponse() *NotifyVirtualSelectedParentChainChangedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse); ok { + return x.NotifyVirtualSelectedParentChainChangedResponse + } + return nil +} + +func (x *SpectredMessage) GetVirtualSelectedParentChainChangedNotification() *VirtualSelectedParentChainChangedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_VirtualSelectedParentChainChangedNotification); ok { + return x.VirtualSelectedParentChainChangedNotification + } + return nil +} + +func (x *SpectredMessage) GetGetBlockRequest() *GetBlockRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockRequest); ok { + return x.GetBlockRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBlockResponse() *GetBlockResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockResponse); ok { + return x.GetBlockResponse + } + return nil +} + +func (x *SpectredMessage) GetGetSubnetworkRequest() *GetSubnetworkRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetSubnetworkRequest); ok { + return x.GetSubnetworkRequest + } + return nil +} + +func (x *SpectredMessage) GetGetSubnetworkResponse() *GetSubnetworkResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetSubnetworkResponse); ok { + return x.GetSubnetworkResponse + } + return nil +} + +func (x *SpectredMessage) GetGetVirtualSelectedParentChainFromBlockRequest() *GetVirtualSelectedParentChainFromBlockRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest); ok { + return x.GetVirtualSelectedParentChainFromBlockRequest + } + return nil +} + +func (x *SpectredMessage) GetGetVirtualSelectedParentChainFromBlockResponse() *GetVirtualSelectedParentChainFromBlockResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse); ok { + return x.GetVirtualSelectedParentChainFromBlockResponse + } + return nil +} + +func (x *SpectredMessage) GetGetBlocksRequest() *GetBlocksRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlocksRequest); ok { + return x.GetBlocksRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBlocksResponse() *GetBlocksResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlocksResponse); ok { + return x.GetBlocksResponse + } + return nil +} + +func (x *SpectredMessage) GetGetBlockCountRequest() *GetBlockCountRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockCountRequest); ok { + return x.GetBlockCountRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBlockCountResponse() *GetBlockCountResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockCountResponse); ok { + return x.GetBlockCountResponse + } + return nil +} + +func (x *SpectredMessage) GetGetBlockDagInfoRequest() *GetBlockDagInfoRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockDagInfoRequest); ok { + return x.GetBlockDagInfoRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBlockDagInfoResponse() *GetBlockDagInfoResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBlockDagInfoResponse); ok { + return x.GetBlockDagInfoResponse + } + return nil +} + +func (x *SpectredMessage) GetResolveFinalityConflictRequest() *ResolveFinalityConflictRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_ResolveFinalityConflictRequest); ok { + return x.ResolveFinalityConflictRequest + } + return nil +} + +func (x *SpectredMessage) GetResolveFinalityConflictResponse() *ResolveFinalityConflictResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_ResolveFinalityConflictResponse); ok { + return x.ResolveFinalityConflictResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyFinalityConflictsRequest() *NotifyFinalityConflictsRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyFinalityConflictsRequest); ok { + return x.NotifyFinalityConflictsRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyFinalityConflictsResponse() *NotifyFinalityConflictsResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyFinalityConflictsResponse); ok { + return x.NotifyFinalityConflictsResponse + } + return nil +} + +func (x *SpectredMessage) GetFinalityConflictNotification() *FinalityConflictNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_FinalityConflictNotification); ok { + return x.FinalityConflictNotification + } + return nil +} + +func (x *SpectredMessage) GetFinalityConflictResolvedNotification() *FinalityConflictResolvedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_FinalityConflictResolvedNotification); ok { + return x.FinalityConflictResolvedNotification + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntriesRequest() *GetMempoolEntriesRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntriesRequest); ok { + return x.GetMempoolEntriesRequest + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntriesResponse() *GetMempoolEntriesResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntriesResponse); ok { + return x.GetMempoolEntriesResponse + } + return nil +} + +func (x *SpectredMessage) GetShutDownRequest() *ShutDownRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_ShutDownRequest); ok { + return x.ShutDownRequest + } + return nil +} + +func (x *SpectredMessage) GetShutDownResponse() *ShutDownResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_ShutDownResponse); ok { + return x.ShutDownResponse + } + return nil +} + +func (x *SpectredMessage) GetGetHeadersRequest() *GetHeadersRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetHeadersRequest); ok { + return x.GetHeadersRequest + } + return nil +} + +func (x *SpectredMessage) GetGetHeadersResponse() *GetHeadersResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetHeadersResponse); ok { + return x.GetHeadersResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyUtxosChangedRequest() *NotifyUtxosChangedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyUtxosChangedRequest); ok { + return x.NotifyUtxosChangedRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyUtxosChangedResponse() *NotifyUtxosChangedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyUtxosChangedResponse); ok { + return x.NotifyUtxosChangedResponse + } + return nil +} + +func (x *SpectredMessage) GetUtxosChangedNotification() *UtxosChangedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_UtxosChangedNotification); ok { + return x.UtxosChangedNotification + } + return nil +} + +func (x *SpectredMessage) GetGetUtxosByAddressesRequest() *GetUtxosByAddressesRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetUtxosByAddressesRequest); ok { + return x.GetUtxosByAddressesRequest + } + return nil +} + +func (x *SpectredMessage) GetGetUtxosByAddressesResponse() *GetUtxosByAddressesResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetUtxosByAddressesResponse); ok { + return x.GetUtxosByAddressesResponse + } + return nil +} + +func (x *SpectredMessage) GetGetVirtualSelectedParentBlueScoreRequest() *GetVirtualSelectedParentBlueScoreRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetVirtualSelectedParentBlueScoreRequest); ok { + return x.GetVirtualSelectedParentBlueScoreRequest + } + return nil +} + +func (x *SpectredMessage) GetGetVirtualSelectedParentBlueScoreResponse() *GetVirtualSelectedParentBlueScoreResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetVirtualSelectedParentBlueScoreResponse); ok { + return x.GetVirtualSelectedParentBlueScoreResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualSelectedParentBlueScoreChangedRequest() *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest); ok { + return x.NotifyVirtualSelectedParentBlueScoreChangedRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualSelectedParentBlueScoreChangedResponse() *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse); ok { + return x.NotifyVirtualSelectedParentBlueScoreChangedResponse + } + return nil +} + +func (x *SpectredMessage) GetVirtualSelectedParentBlueScoreChangedNotification() *VirtualSelectedParentBlueScoreChangedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification); ok { + return x.VirtualSelectedParentBlueScoreChangedNotification + } + return nil +} + +func (x *SpectredMessage) GetBanRequest() *BanRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BanRequest); ok { + return x.BanRequest + } + return nil +} + +func (x *SpectredMessage) GetBanResponse() *BanResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_BanResponse); ok { + return x.BanResponse + } + return nil +} + +func (x *SpectredMessage) GetUnbanRequest() *UnbanRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_UnbanRequest); ok { + return x.UnbanRequest + } + return nil +} + +func (x *SpectredMessage) GetUnbanResponse() *UnbanResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_UnbanResponse); ok { + return x.UnbanResponse + } + return nil +} + +func (x *SpectredMessage) GetGetInfoRequest() *GetInfoRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetInfoRequest); ok { + return x.GetInfoRequest + } + return nil +} + +func (x *SpectredMessage) GetGetInfoResponse() *GetInfoResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetInfoResponse); ok { + return x.GetInfoResponse + } + return nil +} + +func (x *SpectredMessage) GetStopNotifyingUtxosChangedRequest() *StopNotifyingUtxosChangedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_StopNotifyingUtxosChangedRequest); ok { + return x.StopNotifyingUtxosChangedRequest + } + return nil +} + +func (x *SpectredMessage) GetStopNotifyingUtxosChangedResponse() *StopNotifyingUtxosChangedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_StopNotifyingUtxosChangedResponse); ok { + return x.StopNotifyingUtxosChangedResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyPruningPointUTXOSetOverrideRequest() *NotifyPruningPointUTXOSetOverrideRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest); ok { + return x.NotifyPruningPointUTXOSetOverrideRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyPruningPointUTXOSetOverrideResponse() *NotifyPruningPointUTXOSetOverrideResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse); ok { + return x.NotifyPruningPointUTXOSetOverrideResponse + } + return nil +} + +func (x *SpectredMessage) GetPruningPointUTXOSetOverrideNotification() *PruningPointUTXOSetOverrideNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_PruningPointUTXOSetOverrideNotification); ok { + return x.PruningPointUTXOSetOverrideNotification + } + return nil +} + +func (x *SpectredMessage) GetStopNotifyingPruningPointUTXOSetOverrideRequest() *StopNotifyingPruningPointUTXOSetOverrideRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest); ok { + return x.StopNotifyingPruningPointUTXOSetOverrideRequest + } + return nil +} + +func (x *SpectredMessage) GetStopNotifyingPruningPointUTXOSetOverrideResponse() *StopNotifyingPruningPointUTXOSetOverrideResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse); ok { + return x.StopNotifyingPruningPointUTXOSetOverrideResponse + } + return nil +} + +func (x *SpectredMessage) GetEstimateNetworkHashesPerSecondRequest() *EstimateNetworkHashesPerSecondRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_EstimateNetworkHashesPerSecondRequest); ok { + return x.EstimateNetworkHashesPerSecondRequest + } + return nil +} + +func (x *SpectredMessage) GetEstimateNetworkHashesPerSecondResponse() *EstimateNetworkHashesPerSecondResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_EstimateNetworkHashesPerSecondResponse); ok { + return x.EstimateNetworkHashesPerSecondResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualDaaScoreChangedRequest() *NotifyVirtualDaaScoreChangedRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualDaaScoreChangedRequest); ok { + return x.NotifyVirtualDaaScoreChangedRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyVirtualDaaScoreChangedResponse() *NotifyVirtualDaaScoreChangedResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyVirtualDaaScoreChangedResponse); ok { + return x.NotifyVirtualDaaScoreChangedResponse + } + return nil +} + +func (x *SpectredMessage) GetVirtualDaaScoreChangedNotification() *VirtualDaaScoreChangedNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_VirtualDaaScoreChangedNotification); ok { + return x.VirtualDaaScoreChangedNotification + } + return nil +} + +func (x *SpectredMessage) GetGetBalanceByAddressRequest() *GetBalanceByAddressRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBalanceByAddressRequest); ok { + return x.GetBalanceByAddressRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBalanceByAddressResponse() *GetBalanceByAddressResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBalanceByAddressResponse); ok { + return x.GetBalanceByAddressResponse + } + return nil +} + +func (x *SpectredMessage) GetGetBalancesByAddressesRequest() *GetBalancesByAddressesRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBalancesByAddressesRequest); ok { + return x.GetBalancesByAddressesRequest + } + return nil +} + +func (x *SpectredMessage) GetGetBalancesByAddressesResponse() *GetBalancesByAddressesResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetBalancesByAddressesResponse); ok { + return x.GetBalancesByAddressesResponse + } + return nil +} + +func (x *SpectredMessage) GetNotifyNewBlockTemplateRequest() *NotifyNewBlockTemplateRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyNewBlockTemplateRequest); ok { + return x.NotifyNewBlockTemplateRequest + } + return nil +} + +func (x *SpectredMessage) GetNotifyNewBlockTemplateResponse() *NotifyNewBlockTemplateResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NotifyNewBlockTemplateResponse); ok { + return x.NotifyNewBlockTemplateResponse + } + return nil +} + +func (x *SpectredMessage) GetNewBlockTemplateNotification() *NewBlockTemplateNotificationMessage { + if x, ok := x.GetPayload().(*SpectredMessage_NewBlockTemplateNotification); ok { + return x.NewBlockTemplateNotification + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntriesByAddressesRequest() *GetMempoolEntriesByAddressesRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntriesByAddressesRequest); ok { + return x.GetMempoolEntriesByAddressesRequest + } + return nil +} + +func (x *SpectredMessage) GetGetMempoolEntriesByAddressesResponse() *GetMempoolEntriesByAddressesResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetMempoolEntriesByAddressesResponse); ok { + return x.GetMempoolEntriesByAddressesResponse + } + return nil +} + +func (x *SpectredMessage) GetGetCoinSupplyRequest() *GetCoinSupplyRequestMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetCoinSupplyRequest); ok { + return x.GetCoinSupplyRequest + } + return nil +} + +func (x *SpectredMessage) GetGetCoinSupplyResponse() *GetCoinSupplyResponseMessage { + if x, ok := x.GetPayload().(*SpectredMessage_GetCoinSupplyResponse); ok { + return x.GetCoinSupplyResponse + } + return nil +} + +type isSpectredMessage_Payload interface { + isSpectredMessage_Payload() +} + +type SpectredMessage_Addresses struct { + Addresses *AddressesMessage `protobuf:"bytes,1,opt,name=addresses,proto3,oneof"` +} + +type SpectredMessage_Block struct { + Block *BlockMessage `protobuf:"bytes,2,opt,name=block,proto3,oneof"` +} + +type SpectredMessage_Transaction struct { + Transaction *TransactionMessage `protobuf:"bytes,3,opt,name=transaction,proto3,oneof"` +} + +type SpectredMessage_BlockLocator struct { + BlockLocator *BlockLocatorMessage `protobuf:"bytes,5,opt,name=blockLocator,proto3,oneof"` +} + +type SpectredMessage_RequestAddresses struct { + RequestAddresses *RequestAddressesMessage `protobuf:"bytes,6,opt,name=requestAddresses,proto3,oneof"` +} + +type SpectredMessage_RequestRelayBlocks struct { + RequestRelayBlocks *RequestRelayBlocksMessage `protobuf:"bytes,10,opt,name=requestRelayBlocks,proto3,oneof"` +} + +type SpectredMessage_RequestTransactions struct { + RequestTransactions *RequestTransactionsMessage `protobuf:"bytes,12,opt,name=requestTransactions,proto3,oneof"` +} + +type SpectredMessage_IbdBlock struct { + IbdBlock *BlockMessage `protobuf:"bytes,13,opt,name=ibdBlock,proto3,oneof"` +} + +type SpectredMessage_InvRelayBlock struct { + InvRelayBlock *InvRelayBlockMessage `protobuf:"bytes,14,opt,name=invRelayBlock,proto3,oneof"` +} + +type SpectredMessage_InvTransactions struct { + InvTransactions *InvTransactionsMessage `protobuf:"bytes,15,opt,name=invTransactions,proto3,oneof"` +} + +type SpectredMessage_Ping struct { + Ping *PingMessage `protobuf:"bytes,16,opt,name=ping,proto3,oneof"` +} + +type SpectredMessage_Pong struct { + Pong *PongMessage `protobuf:"bytes,17,opt,name=pong,proto3,oneof"` +} + +type SpectredMessage_Verack struct { + Verack *VerackMessage `protobuf:"bytes,19,opt,name=verack,proto3,oneof"` +} + +type SpectredMessage_Version struct { + Version *VersionMessage `protobuf:"bytes,20,opt,name=version,proto3,oneof"` +} + +type SpectredMessage_TransactionNotFound struct { + TransactionNotFound *TransactionNotFoundMessage `protobuf:"bytes,21,opt,name=transactionNotFound,proto3,oneof"` +} + +type SpectredMessage_Reject struct { + Reject *RejectMessage `protobuf:"bytes,22,opt,name=reject,proto3,oneof"` +} + +type SpectredMessage_PruningPointUtxoSetChunk struct { + PruningPointUtxoSetChunk *PruningPointUtxoSetChunkMessage `protobuf:"bytes,25,opt,name=pruningPointUtxoSetChunk,proto3,oneof"` +} + +type SpectredMessage_RequestIBDBlocks struct { + RequestIBDBlocks *RequestIBDBlocksMessage `protobuf:"bytes,26,opt,name=requestIBDBlocks,proto3,oneof"` +} + +type SpectredMessage_UnexpectedPruningPoint struct { + UnexpectedPruningPoint *UnexpectedPruningPointMessage `protobuf:"bytes,27,opt,name=unexpectedPruningPoint,proto3,oneof"` +} + +type SpectredMessage_IbdBlockLocator struct { + IbdBlockLocator *IbdBlockLocatorMessage `protobuf:"bytes,30,opt,name=ibdBlockLocator,proto3,oneof"` +} + +type SpectredMessage_IbdBlockLocatorHighestHash struct { + IbdBlockLocatorHighestHash *IbdBlockLocatorHighestHashMessage `protobuf:"bytes,31,opt,name=ibdBlockLocatorHighestHash,proto3,oneof"` +} + +type SpectredMessage_RequestNextPruningPointUtxoSetChunk struct { + RequestNextPruningPointUtxoSetChunk *RequestNextPruningPointUtxoSetChunkMessage `protobuf:"bytes,33,opt,name=requestNextPruningPointUtxoSetChunk,proto3,oneof"` +} + +type SpectredMessage_DonePruningPointUtxoSetChunks struct { + DonePruningPointUtxoSetChunks *DonePruningPointUtxoSetChunksMessage `protobuf:"bytes,34,opt,name=donePruningPointUtxoSetChunks,proto3,oneof"` +} + +type SpectredMessage_IbdBlockLocatorHighestHashNotFound struct { + IbdBlockLocatorHighestHashNotFound *IbdBlockLocatorHighestHashNotFoundMessage `protobuf:"bytes,35,opt,name=ibdBlockLocatorHighestHashNotFound,proto3,oneof"` +} + +type SpectredMessage_BlockWithTrustedData struct { + BlockWithTrustedData *BlockWithTrustedDataMessage `protobuf:"bytes,36,opt,name=blockWithTrustedData,proto3,oneof"` +} + +type SpectredMessage_DoneBlocksWithTrustedData struct { + DoneBlocksWithTrustedData *DoneBlocksWithTrustedDataMessage `protobuf:"bytes,37,opt,name=doneBlocksWithTrustedData,proto3,oneof"` +} + +type SpectredMessage_RequestPruningPointAndItsAnticone struct { + RequestPruningPointAndItsAnticone *RequestPruningPointAndItsAnticoneMessage `protobuf:"bytes,40,opt,name=requestPruningPointAndItsAnticone,proto3,oneof"` +} + +type SpectredMessage_BlockHeaders struct { + BlockHeaders *BlockHeadersMessage `protobuf:"bytes,41,opt,name=blockHeaders,proto3,oneof"` +} + +type SpectredMessage_RequestNextHeaders struct { + RequestNextHeaders *RequestNextHeadersMessage `protobuf:"bytes,42,opt,name=requestNextHeaders,proto3,oneof"` +} + +type SpectredMessage_DoneHeaders struct { + DoneHeaders *DoneHeadersMessage `protobuf:"bytes,43,opt,name=DoneHeaders,proto3,oneof"` +} + +type SpectredMessage_RequestPruningPointUTXOSet struct { + RequestPruningPointUTXOSet *RequestPruningPointUTXOSetMessage `protobuf:"bytes,44,opt,name=requestPruningPointUTXOSet,proto3,oneof"` +} + +type SpectredMessage_RequestHeaders struct { + RequestHeaders *RequestHeadersMessage `protobuf:"bytes,45,opt,name=requestHeaders,proto3,oneof"` +} + +type SpectredMessage_RequestBlockLocator struct { + RequestBlockLocator *RequestBlockLocatorMessage `protobuf:"bytes,46,opt,name=requestBlockLocator,proto3,oneof"` +} + +type SpectredMessage_PruningPoints struct { + PruningPoints *PruningPointsMessage `protobuf:"bytes,47,opt,name=pruningPoints,proto3,oneof"` +} + +type SpectredMessage_RequestPruningPointProof struct { + RequestPruningPointProof *RequestPruningPointProofMessage `protobuf:"bytes,48,opt,name=requestPruningPointProof,proto3,oneof"` +} + +type SpectredMessage_PruningPointProof struct { + PruningPointProof *PruningPointProofMessage `protobuf:"bytes,49,opt,name=pruningPointProof,proto3,oneof"` +} + +type SpectredMessage_Ready struct { + Ready *ReadyMessage `protobuf:"bytes,50,opt,name=ready,proto3,oneof"` +} + +type SpectredMessage_BlockWithTrustedDataV4 struct { + BlockWithTrustedDataV4 *BlockWithTrustedDataV4Message `protobuf:"bytes,51,opt,name=blockWithTrustedDataV4,proto3,oneof"` +} + +type SpectredMessage_TrustedData struct { + TrustedData *TrustedDataMessage `protobuf:"bytes,52,opt,name=trustedData,proto3,oneof"` +} + +type SpectredMessage_RequestIBDChainBlockLocator struct { + RequestIBDChainBlockLocator *RequestIBDChainBlockLocatorMessage `protobuf:"bytes,53,opt,name=requestIBDChainBlockLocator,proto3,oneof"` +} + +type SpectredMessage_IbdChainBlockLocator struct { + IbdChainBlockLocator *IbdChainBlockLocatorMessage `protobuf:"bytes,54,opt,name=ibdChainBlockLocator,proto3,oneof"` +} + +type SpectredMessage_RequestAnticone struct { + RequestAnticone *RequestAnticoneMessage `protobuf:"bytes,55,opt,name=requestAnticone,proto3,oneof"` +} + +type SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks struct { + RequestNextPruningPointAndItsAnticoneBlocks *RequestNextPruningPointAndItsAnticoneBlocksMessage `protobuf:"bytes,56,opt,name=requestNextPruningPointAndItsAnticoneBlocks,proto3,oneof"` +} + +type SpectredMessage_GetCurrentNetworkRequest struct { + GetCurrentNetworkRequest *GetCurrentNetworkRequestMessage `protobuf:"bytes,1001,opt,name=getCurrentNetworkRequest,proto3,oneof"` +} + +type SpectredMessage_GetCurrentNetworkResponse struct { + GetCurrentNetworkResponse *GetCurrentNetworkResponseMessage `protobuf:"bytes,1002,opt,name=getCurrentNetworkResponse,proto3,oneof"` +} + +type SpectredMessage_SubmitBlockRequest struct { + SubmitBlockRequest *SubmitBlockRequestMessage `protobuf:"bytes,1003,opt,name=submitBlockRequest,proto3,oneof"` +} + +type SpectredMessage_SubmitBlockResponse struct { + SubmitBlockResponse *SubmitBlockResponseMessage `protobuf:"bytes,1004,opt,name=submitBlockResponse,proto3,oneof"` +} + +type SpectredMessage_GetBlockTemplateRequest struct { + GetBlockTemplateRequest *GetBlockTemplateRequestMessage `protobuf:"bytes,1005,opt,name=getBlockTemplateRequest,proto3,oneof"` +} + +type SpectredMessage_GetBlockTemplateResponse struct { + GetBlockTemplateResponse *GetBlockTemplateResponseMessage `protobuf:"bytes,1006,opt,name=getBlockTemplateResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyBlockAddedRequest struct { + NotifyBlockAddedRequest *NotifyBlockAddedRequestMessage `protobuf:"bytes,1007,opt,name=notifyBlockAddedRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyBlockAddedResponse struct { + NotifyBlockAddedResponse *NotifyBlockAddedResponseMessage `protobuf:"bytes,1008,opt,name=notifyBlockAddedResponse,proto3,oneof"` +} + +type SpectredMessage_BlockAddedNotification struct { + BlockAddedNotification *BlockAddedNotificationMessage `protobuf:"bytes,1009,opt,name=blockAddedNotification,proto3,oneof"` +} + +type SpectredMessage_GetPeerAddressesRequest struct { + GetPeerAddressesRequest *GetPeerAddressesRequestMessage `protobuf:"bytes,1010,opt,name=getPeerAddressesRequest,proto3,oneof"` +} + +type SpectredMessage_GetPeerAddressesResponse struct { + GetPeerAddressesResponse *GetPeerAddressesResponseMessage `protobuf:"bytes,1011,opt,name=getPeerAddressesResponse,proto3,oneof"` +} + +type SpectredMessage_GetSelectedTipHashRequest struct { + GetSelectedTipHashRequest *GetSelectedTipHashRequestMessage `protobuf:"bytes,1012,opt,name=getSelectedTipHashRequest,proto3,oneof"` +} + +type SpectredMessage_GetSelectedTipHashResponse struct { + GetSelectedTipHashResponse *GetSelectedTipHashResponseMessage `protobuf:"bytes,1013,opt,name=getSelectedTipHashResponse,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntryRequest struct { + GetMempoolEntryRequest *GetMempoolEntryRequestMessage `protobuf:"bytes,1014,opt,name=getMempoolEntryRequest,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntryResponse struct { + GetMempoolEntryResponse *GetMempoolEntryResponseMessage `protobuf:"bytes,1015,opt,name=getMempoolEntryResponse,proto3,oneof"` +} + +type SpectredMessage_GetConnectedPeerInfoRequest struct { + GetConnectedPeerInfoRequest *GetConnectedPeerInfoRequestMessage `protobuf:"bytes,1016,opt,name=getConnectedPeerInfoRequest,proto3,oneof"` +} + +type SpectredMessage_GetConnectedPeerInfoResponse struct { + GetConnectedPeerInfoResponse *GetConnectedPeerInfoResponseMessage `protobuf:"bytes,1017,opt,name=getConnectedPeerInfoResponse,proto3,oneof"` +} + +type SpectredMessage_AddPeerRequest struct { + AddPeerRequest *AddPeerRequestMessage `protobuf:"bytes,1018,opt,name=addPeerRequest,proto3,oneof"` +} + +type SpectredMessage_AddPeerResponse struct { + AddPeerResponse *AddPeerResponseMessage `protobuf:"bytes,1019,opt,name=addPeerResponse,proto3,oneof"` +} + +type SpectredMessage_SubmitTransactionRequest struct { + SubmitTransactionRequest *SubmitTransactionRequestMessage `protobuf:"bytes,1020,opt,name=submitTransactionRequest,proto3,oneof"` +} + +type SpectredMessage_SubmitTransactionResponse struct { + SubmitTransactionResponse *SubmitTransactionResponseMessage `protobuf:"bytes,1021,opt,name=submitTransactionResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest struct { + NotifyVirtualSelectedParentChainChangedRequest *NotifyVirtualSelectedParentChainChangedRequestMessage `protobuf:"bytes,1022,opt,name=notifyVirtualSelectedParentChainChangedRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse struct { + NotifyVirtualSelectedParentChainChangedResponse *NotifyVirtualSelectedParentChainChangedResponseMessage `protobuf:"bytes,1023,opt,name=notifyVirtualSelectedParentChainChangedResponse,proto3,oneof"` +} + +type SpectredMessage_VirtualSelectedParentChainChangedNotification struct { + VirtualSelectedParentChainChangedNotification *VirtualSelectedParentChainChangedNotificationMessage `protobuf:"bytes,1024,opt,name=virtualSelectedParentChainChangedNotification,proto3,oneof"` +} + +type SpectredMessage_GetBlockRequest struct { + GetBlockRequest *GetBlockRequestMessage `protobuf:"bytes,1025,opt,name=getBlockRequest,proto3,oneof"` +} + +type SpectredMessage_GetBlockResponse struct { + GetBlockResponse *GetBlockResponseMessage `protobuf:"bytes,1026,opt,name=getBlockResponse,proto3,oneof"` +} + +type SpectredMessage_GetSubnetworkRequest struct { + GetSubnetworkRequest *GetSubnetworkRequestMessage `protobuf:"bytes,1027,opt,name=getSubnetworkRequest,proto3,oneof"` +} + +type SpectredMessage_GetSubnetworkResponse struct { + GetSubnetworkResponse *GetSubnetworkResponseMessage `protobuf:"bytes,1028,opt,name=getSubnetworkResponse,proto3,oneof"` +} + +type SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest struct { + GetVirtualSelectedParentChainFromBlockRequest *GetVirtualSelectedParentChainFromBlockRequestMessage `protobuf:"bytes,1029,opt,name=getVirtualSelectedParentChainFromBlockRequest,proto3,oneof"` +} + +type SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse struct { + GetVirtualSelectedParentChainFromBlockResponse *GetVirtualSelectedParentChainFromBlockResponseMessage `protobuf:"bytes,1030,opt,name=getVirtualSelectedParentChainFromBlockResponse,proto3,oneof"` +} + +type SpectredMessage_GetBlocksRequest struct { + GetBlocksRequest *GetBlocksRequestMessage `protobuf:"bytes,1031,opt,name=getBlocksRequest,proto3,oneof"` +} + +type SpectredMessage_GetBlocksResponse struct { + GetBlocksResponse *GetBlocksResponseMessage `protobuf:"bytes,1032,opt,name=getBlocksResponse,proto3,oneof"` +} + +type SpectredMessage_GetBlockCountRequest struct { + GetBlockCountRequest *GetBlockCountRequestMessage `protobuf:"bytes,1033,opt,name=getBlockCountRequest,proto3,oneof"` +} + +type SpectredMessage_GetBlockCountResponse struct { + GetBlockCountResponse *GetBlockCountResponseMessage `protobuf:"bytes,1034,opt,name=getBlockCountResponse,proto3,oneof"` +} + +type SpectredMessage_GetBlockDagInfoRequest struct { + GetBlockDagInfoRequest *GetBlockDagInfoRequestMessage `protobuf:"bytes,1035,opt,name=getBlockDagInfoRequest,proto3,oneof"` +} + +type SpectredMessage_GetBlockDagInfoResponse struct { + GetBlockDagInfoResponse *GetBlockDagInfoResponseMessage `protobuf:"bytes,1036,opt,name=getBlockDagInfoResponse,proto3,oneof"` +} + +type SpectredMessage_ResolveFinalityConflictRequest struct { + ResolveFinalityConflictRequest *ResolveFinalityConflictRequestMessage `protobuf:"bytes,1037,opt,name=resolveFinalityConflictRequest,proto3,oneof"` +} + +type SpectredMessage_ResolveFinalityConflictResponse struct { + ResolveFinalityConflictResponse *ResolveFinalityConflictResponseMessage `protobuf:"bytes,1038,opt,name=resolveFinalityConflictResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyFinalityConflictsRequest struct { + NotifyFinalityConflictsRequest *NotifyFinalityConflictsRequestMessage `protobuf:"bytes,1039,opt,name=notifyFinalityConflictsRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyFinalityConflictsResponse struct { + NotifyFinalityConflictsResponse *NotifyFinalityConflictsResponseMessage `protobuf:"bytes,1040,opt,name=notifyFinalityConflictsResponse,proto3,oneof"` +} + +type SpectredMessage_FinalityConflictNotification struct { + FinalityConflictNotification *FinalityConflictNotificationMessage `protobuf:"bytes,1041,opt,name=finalityConflictNotification,proto3,oneof"` +} + +type SpectredMessage_FinalityConflictResolvedNotification struct { + FinalityConflictResolvedNotification *FinalityConflictResolvedNotificationMessage `protobuf:"bytes,1042,opt,name=finalityConflictResolvedNotification,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntriesRequest struct { + GetMempoolEntriesRequest *GetMempoolEntriesRequestMessage `protobuf:"bytes,1043,opt,name=getMempoolEntriesRequest,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntriesResponse struct { + GetMempoolEntriesResponse *GetMempoolEntriesResponseMessage `protobuf:"bytes,1044,opt,name=getMempoolEntriesResponse,proto3,oneof"` +} + +type SpectredMessage_ShutDownRequest struct { + ShutDownRequest *ShutDownRequestMessage `protobuf:"bytes,1045,opt,name=shutDownRequest,proto3,oneof"` +} + +type SpectredMessage_ShutDownResponse struct { + ShutDownResponse *ShutDownResponseMessage `protobuf:"bytes,1046,opt,name=shutDownResponse,proto3,oneof"` +} + +type SpectredMessage_GetHeadersRequest struct { + GetHeadersRequest *GetHeadersRequestMessage `protobuf:"bytes,1047,opt,name=getHeadersRequest,proto3,oneof"` +} + +type SpectredMessage_GetHeadersResponse struct { + GetHeadersResponse *GetHeadersResponseMessage `protobuf:"bytes,1048,opt,name=getHeadersResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyUtxosChangedRequest struct { + NotifyUtxosChangedRequest *NotifyUtxosChangedRequestMessage `protobuf:"bytes,1049,opt,name=notifyUtxosChangedRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyUtxosChangedResponse struct { + NotifyUtxosChangedResponse *NotifyUtxosChangedResponseMessage `protobuf:"bytes,1050,opt,name=notifyUtxosChangedResponse,proto3,oneof"` +} + +type SpectredMessage_UtxosChangedNotification struct { + UtxosChangedNotification *UtxosChangedNotificationMessage `protobuf:"bytes,1051,opt,name=utxosChangedNotification,proto3,oneof"` +} + +type SpectredMessage_GetUtxosByAddressesRequest struct { + GetUtxosByAddressesRequest *GetUtxosByAddressesRequestMessage `protobuf:"bytes,1052,opt,name=getUtxosByAddressesRequest,proto3,oneof"` +} + +type SpectredMessage_GetUtxosByAddressesResponse struct { + GetUtxosByAddressesResponse *GetUtxosByAddressesResponseMessage `protobuf:"bytes,1053,opt,name=getUtxosByAddressesResponse,proto3,oneof"` +} + +type SpectredMessage_GetVirtualSelectedParentBlueScoreRequest struct { + GetVirtualSelectedParentBlueScoreRequest *GetVirtualSelectedParentBlueScoreRequestMessage `protobuf:"bytes,1054,opt,name=getVirtualSelectedParentBlueScoreRequest,proto3,oneof"` +} + +type SpectredMessage_GetVirtualSelectedParentBlueScoreResponse struct { + GetVirtualSelectedParentBlueScoreResponse *GetVirtualSelectedParentBlueScoreResponseMessage `protobuf:"bytes,1055,opt,name=getVirtualSelectedParentBlueScoreResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest struct { + NotifyVirtualSelectedParentBlueScoreChangedRequest *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage `protobuf:"bytes,1056,opt,name=notifyVirtualSelectedParentBlueScoreChangedRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse struct { + NotifyVirtualSelectedParentBlueScoreChangedResponse *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage `protobuf:"bytes,1057,opt,name=notifyVirtualSelectedParentBlueScoreChangedResponse,proto3,oneof"` +} + +type SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification struct { + VirtualSelectedParentBlueScoreChangedNotification *VirtualSelectedParentBlueScoreChangedNotificationMessage `protobuf:"bytes,1058,opt,name=virtualSelectedParentBlueScoreChangedNotification,proto3,oneof"` +} + +type SpectredMessage_BanRequest struct { + BanRequest *BanRequestMessage `protobuf:"bytes,1059,opt,name=banRequest,proto3,oneof"` +} + +type SpectredMessage_BanResponse struct { + BanResponse *BanResponseMessage `protobuf:"bytes,1060,opt,name=banResponse,proto3,oneof"` +} + +type SpectredMessage_UnbanRequest struct { + UnbanRequest *UnbanRequestMessage `protobuf:"bytes,1061,opt,name=unbanRequest,proto3,oneof"` +} + +type SpectredMessage_UnbanResponse struct { + UnbanResponse *UnbanResponseMessage `protobuf:"bytes,1062,opt,name=unbanResponse,proto3,oneof"` +} + +type SpectredMessage_GetInfoRequest struct { + GetInfoRequest *GetInfoRequestMessage `protobuf:"bytes,1063,opt,name=getInfoRequest,proto3,oneof"` +} + +type SpectredMessage_GetInfoResponse struct { + GetInfoResponse *GetInfoResponseMessage `protobuf:"bytes,1064,opt,name=getInfoResponse,proto3,oneof"` +} + +type SpectredMessage_StopNotifyingUtxosChangedRequest struct { + StopNotifyingUtxosChangedRequest *StopNotifyingUtxosChangedRequestMessage `protobuf:"bytes,1065,opt,name=stopNotifyingUtxosChangedRequest,proto3,oneof"` +} + +type SpectredMessage_StopNotifyingUtxosChangedResponse struct { + StopNotifyingUtxosChangedResponse *StopNotifyingUtxosChangedResponseMessage `protobuf:"bytes,1066,opt,name=stopNotifyingUtxosChangedResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest struct { + NotifyPruningPointUTXOSetOverrideRequest *NotifyPruningPointUTXOSetOverrideRequestMessage `protobuf:"bytes,1067,opt,name=notifyPruningPointUTXOSetOverrideRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse struct { + NotifyPruningPointUTXOSetOverrideResponse *NotifyPruningPointUTXOSetOverrideResponseMessage `protobuf:"bytes,1068,opt,name=notifyPruningPointUTXOSetOverrideResponse,proto3,oneof"` +} + +type SpectredMessage_PruningPointUTXOSetOverrideNotification struct { + PruningPointUTXOSetOverrideNotification *PruningPointUTXOSetOverrideNotificationMessage `protobuf:"bytes,1069,opt,name=pruningPointUTXOSetOverrideNotification,proto3,oneof"` +} + +type SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest struct { + StopNotifyingPruningPointUTXOSetOverrideRequest *StopNotifyingPruningPointUTXOSetOverrideRequestMessage `protobuf:"bytes,1070,opt,name=stopNotifyingPruningPointUTXOSetOverrideRequest,proto3,oneof"` +} + +type SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse struct { + StopNotifyingPruningPointUTXOSetOverrideResponse *StopNotifyingPruningPointUTXOSetOverrideResponseMessage `protobuf:"bytes,1071,opt,name=stopNotifyingPruningPointUTXOSetOverrideResponse,proto3,oneof"` +} + +type SpectredMessage_EstimateNetworkHashesPerSecondRequest struct { + EstimateNetworkHashesPerSecondRequest *EstimateNetworkHashesPerSecondRequestMessage `protobuf:"bytes,1072,opt,name=estimateNetworkHashesPerSecondRequest,proto3,oneof"` +} + +type SpectredMessage_EstimateNetworkHashesPerSecondResponse struct { + EstimateNetworkHashesPerSecondResponse *EstimateNetworkHashesPerSecondResponseMessage `protobuf:"bytes,1073,opt,name=estimateNetworkHashesPerSecondResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualDaaScoreChangedRequest struct { + NotifyVirtualDaaScoreChangedRequest *NotifyVirtualDaaScoreChangedRequestMessage `protobuf:"bytes,1074,opt,name=notifyVirtualDaaScoreChangedRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyVirtualDaaScoreChangedResponse struct { + NotifyVirtualDaaScoreChangedResponse *NotifyVirtualDaaScoreChangedResponseMessage `protobuf:"bytes,1075,opt,name=notifyVirtualDaaScoreChangedResponse,proto3,oneof"` +} + +type SpectredMessage_VirtualDaaScoreChangedNotification struct { + VirtualDaaScoreChangedNotification *VirtualDaaScoreChangedNotificationMessage `protobuf:"bytes,1076,opt,name=virtualDaaScoreChangedNotification,proto3,oneof"` +} + +type SpectredMessage_GetBalanceByAddressRequest struct { + GetBalanceByAddressRequest *GetBalanceByAddressRequestMessage `protobuf:"bytes,1077,opt,name=getBalanceByAddressRequest,proto3,oneof"` +} + +type SpectredMessage_GetBalanceByAddressResponse struct { + GetBalanceByAddressResponse *GetBalanceByAddressResponseMessage `protobuf:"bytes,1078,opt,name=getBalanceByAddressResponse,proto3,oneof"` +} + +type SpectredMessage_GetBalancesByAddressesRequest struct { + GetBalancesByAddressesRequest *GetBalancesByAddressesRequestMessage `protobuf:"bytes,1079,opt,name=getBalancesByAddressesRequest,proto3,oneof"` +} + +type SpectredMessage_GetBalancesByAddressesResponse struct { + GetBalancesByAddressesResponse *GetBalancesByAddressesResponseMessage `protobuf:"bytes,1080,opt,name=getBalancesByAddressesResponse,proto3,oneof"` +} + +type SpectredMessage_NotifyNewBlockTemplateRequest struct { + NotifyNewBlockTemplateRequest *NotifyNewBlockTemplateRequestMessage `protobuf:"bytes,1081,opt,name=notifyNewBlockTemplateRequest,proto3,oneof"` +} + +type SpectredMessage_NotifyNewBlockTemplateResponse struct { + NotifyNewBlockTemplateResponse *NotifyNewBlockTemplateResponseMessage `protobuf:"bytes,1082,opt,name=notifyNewBlockTemplateResponse,proto3,oneof"` +} + +type SpectredMessage_NewBlockTemplateNotification struct { + NewBlockTemplateNotification *NewBlockTemplateNotificationMessage `protobuf:"bytes,1083,opt,name=newBlockTemplateNotification,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntriesByAddressesRequest struct { + GetMempoolEntriesByAddressesRequest *GetMempoolEntriesByAddressesRequestMessage `protobuf:"bytes,1084,opt,name=getMempoolEntriesByAddressesRequest,proto3,oneof"` +} + +type SpectredMessage_GetMempoolEntriesByAddressesResponse struct { + GetMempoolEntriesByAddressesResponse *GetMempoolEntriesByAddressesResponseMessage `protobuf:"bytes,1085,opt,name=getMempoolEntriesByAddressesResponse,proto3,oneof"` +} + +type SpectredMessage_GetCoinSupplyRequest struct { + GetCoinSupplyRequest *GetCoinSupplyRequestMessage `protobuf:"bytes,1086,opt,name=getCoinSupplyRequest,proto3,oneof"` +} + +type SpectredMessage_GetCoinSupplyResponse struct { + GetCoinSupplyResponse *GetCoinSupplyResponseMessage `protobuf:"bytes,1087,opt,name=getCoinSupplyResponse,proto3,oneof"` +} + +func (*SpectredMessage_Addresses) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Block) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Transaction) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BlockLocator) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestAddresses) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestRelayBlocks) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestTransactions) isSpectredMessage_Payload() {} + +func (*SpectredMessage_IbdBlock) isSpectredMessage_Payload() {} + +func (*SpectredMessage_InvRelayBlock) isSpectredMessage_Payload() {} + +func (*SpectredMessage_InvTransactions) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Ping) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Pong) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Verack) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Version) isSpectredMessage_Payload() {} + +func (*SpectredMessage_TransactionNotFound) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Reject) isSpectredMessage_Payload() {} + +func (*SpectredMessage_PruningPointUtxoSetChunk) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestIBDBlocks) isSpectredMessage_Payload() {} + +func (*SpectredMessage_UnexpectedPruningPoint) isSpectredMessage_Payload() {} + +func (*SpectredMessage_IbdBlockLocator) isSpectredMessage_Payload() {} + +func (*SpectredMessage_IbdBlockLocatorHighestHash) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestNextPruningPointUtxoSetChunk) isSpectredMessage_Payload() {} + +func (*SpectredMessage_DonePruningPointUtxoSetChunks) isSpectredMessage_Payload() {} + +func (*SpectredMessage_IbdBlockLocatorHighestHashNotFound) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BlockWithTrustedData) isSpectredMessage_Payload() {} + +func (*SpectredMessage_DoneBlocksWithTrustedData) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestPruningPointAndItsAnticone) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BlockHeaders) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestNextHeaders) isSpectredMessage_Payload() {} + +func (*SpectredMessage_DoneHeaders) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestPruningPointUTXOSet) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestHeaders) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestBlockLocator) isSpectredMessage_Payload() {} + +func (*SpectredMessage_PruningPoints) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestPruningPointProof) isSpectredMessage_Payload() {} + +func (*SpectredMessage_PruningPointProof) isSpectredMessage_Payload() {} + +func (*SpectredMessage_Ready) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BlockWithTrustedDataV4) isSpectredMessage_Payload() {} + +func (*SpectredMessage_TrustedData) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestIBDChainBlockLocator) isSpectredMessage_Payload() {} + +func (*SpectredMessage_IbdChainBlockLocator) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestAnticone) isSpectredMessage_Payload() {} + +func (*SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetCurrentNetworkRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetCurrentNetworkResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_SubmitBlockRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_SubmitBlockResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockTemplateRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockTemplateResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyBlockAddedRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyBlockAddedResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BlockAddedNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetPeerAddressesRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetPeerAddressesResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetSelectedTipHashRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetSelectedTipHashResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntryRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntryResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetConnectedPeerInfoRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetConnectedPeerInfoResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_AddPeerRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_AddPeerResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_SubmitTransactionRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_SubmitTransactionResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_VirtualSelectedParentChainChangedNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetSubnetworkRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetSubnetworkResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlocksRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlocksResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockCountRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockCountResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockDagInfoRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBlockDagInfoResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_ResolveFinalityConflictRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_ResolveFinalityConflictResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyFinalityConflictsRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyFinalityConflictsResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_FinalityConflictNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_FinalityConflictResolvedNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntriesRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntriesResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_ShutDownRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_ShutDownResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetHeadersRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetHeadersResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyUtxosChangedRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyUtxosChangedResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_UtxosChangedNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetUtxosByAddressesRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetUtxosByAddressesResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetVirtualSelectedParentBlueScoreRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetVirtualSelectedParentBlueScoreResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest) isSpectredMessage_Payload() { +} + +func (*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse) isSpectredMessage_Payload() { +} + +func (*SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification) isSpectredMessage_Payload() { +} + +func (*SpectredMessage_BanRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_BanResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_UnbanRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_UnbanResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetInfoRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetInfoResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_StopNotifyingUtxosChangedRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_StopNotifyingUtxosChangedResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_PruningPointUTXOSetOverrideNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse) isSpectredMessage_Payload() { +} + +func (*SpectredMessage_EstimateNetworkHashesPerSecondRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_EstimateNetworkHashesPerSecondResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyVirtualDaaScoreChangedRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyVirtualDaaScoreChangedResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_VirtualDaaScoreChangedNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBalanceByAddressRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBalanceByAddressResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBalancesByAddressesRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetBalancesByAddressesResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyNewBlockTemplateRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NotifyNewBlockTemplateResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_NewBlockTemplateNotification) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntriesByAddressesRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetMempoolEntriesByAddressesResponse) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetCoinSupplyRequest) isSpectredMessage_Payload() {} + +func (*SpectredMessage_GetCoinSupplyResponse) isSpectredMessage_Payload() {} + +var File_messages_proto protoreflect.FileDescriptor + +var file_messages_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x1a, 0x09, 0x70, 0x32, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xc1, 0x6d, 0x0a, 0x0f, 0x53, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0c, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x50, 0x0a, 0x10, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x56, + 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, + 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x6c, 0x61, 0x79, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x59, 0x0a, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x13, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, + 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x47, 0x0a, 0x0d, 0x69, 0x6e, 0x76, 0x52, + 0x65, 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x52, + 0x65, 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x0d, 0x69, 0x6e, 0x76, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x6e, 0x76, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x49, 0x6e, 0x76, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x69, 0x6e, 0x76, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x2c, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x2c, + 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x06, + 0x76, 0x65, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x56, 0x65, 0x72, 0x61, 0x63, 0x6b, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x06, 0x76, 0x65, 0x72, 0x61, 0x63, 0x6b, + 0x12, 0x35, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x13, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x46, + 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x13, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x06, + 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x68, 0x0a, 0x18, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x18, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, + 0x12, 0x50, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, + 0x44, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x12, 0x62, 0x0a, 0x16, 0x75, 0x6e, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x55, + 0x6e, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x16, + 0x75, 0x6e, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x49, 0x62, 0x64, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x6e, 0x0a, 0x1a, 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x49, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1a, 0x69, 0x62, 0x64, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x89, 0x01, 0x0a, 0x23, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, + 0x75, 0x6e, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x23, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x12, 0x77, 0x0a, 0x1d, 0x64, 0x6f, 0x6e, 0x65, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x44, 0x6f, 0x6e, 0x65, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1d, 0x64, 0x6f, 0x6e, + 0x65, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, + 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x86, 0x01, 0x0a, 0x22, 0x69, + 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x69, + 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, + 0x64, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x49, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x6f, + 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x22, 0x69, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x12, 0x5c, 0x0a, 0x14, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x18, 0x24, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x6b, 0x0a, 0x19, 0x64, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x18, 0x25, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x44, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x57, 0x69, 0x74, 0x68, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x19, 0x64, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x83, + 0x01, 0x0a, 0x21, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, + 0x63, 0x6f, 0x6e, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, + 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, + 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x21, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, + 0x63, 0x6f, 0x6e, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x56, 0x0a, 0x12, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x2a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x12, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x6e, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, + 0x53, 0x65, 0x74, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, + 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, + 0x58, 0x4f, 0x53, 0x65, 0x74, 0x12, 0x4a, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x12, 0x59, 0x0a, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x47, 0x0a, 0x0d, + 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x2f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x30, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, + 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, + 0x53, 0x0a, 0x11, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x31, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x11, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x32, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x12, 0x62, 0x0a, 0x16, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, + 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x56, 0x34, 0x18, + 0x33, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x56, 0x34, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x16, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, 0x73, + 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x56, 0x34, 0x12, 0x41, 0x0a, 0x0b, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x71, 0x0a, 0x1b, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x35, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x5c, 0x0a, 0x14, 0x69, 0x62, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x49, 0x62, 0x64, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x14, 0x69, 0x62, 0x64, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4d, 0x0a, + 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, + 0x18, 0x37, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, + 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x12, 0xa1, 0x01, 0x0a, + 0x2b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, + 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x38, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, + 0x63, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x2b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, + 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x12, 0x69, 0x0a, 0x18, 0x67, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xe9, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x19, 0x67, + 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x19, + 0x67, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x73, 0x75, 0x62, + 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0xeb, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x12, + 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x13, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xec, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, + 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x62, 0x6d, 0x69, + 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, + 0x0a, 0x17, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xed, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x17, 0x67, + 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x69, 0x0a, 0x18, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0xee, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x66, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xef, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x17, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x69, 0x0a, 0x18, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xf0, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, + 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xf1, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x16, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x66, 0x0a, 0x17, 0x67, 0x65, 0x74, + 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0xf2, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, + 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x69, 0x0a, 0x18, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xf3, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xf4, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x19, 0x67, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6f, 0x0a, 0x1a, 0x67, 0x65, + 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xf5, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x1a, 0x67, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x67, + 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xf6, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, + 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, + 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x66, 0x0a, 0x17, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xf7, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x17, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1b, 0x67, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xf8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x1b, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x75, 0x0a, 0x1c, + 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xf9, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1c, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xfa, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0e, 0x61, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4e, 0x0a, 0x0f, 0x61, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0xfb, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x61, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x69, 0x0a, 0x18, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xfc, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x18, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x19, 0x73, + 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xfd, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x6d, + 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x19, + 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xab, 0x01, 0x0a, 0x2e, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xfe, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x2e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0xae, 0x01, 0x0a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xff, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x2d, 0x76, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x80, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x2d, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x81, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x0f, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x82, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x14, 0x67, 0x65, 0x74, 0x53, 0x75, 0x62, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x83, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x14, 0x67, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x84, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x15, 0x67, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa8, 0x01, 0x0a, 0x2d, 0x67, 0x65, 0x74, 0x56, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x85, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x2d, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x2e, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x86, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x2e, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, + 0x72, 0x6f, 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x51, 0x0a, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x87, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x10, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x88, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x14, 0x67, 0x65, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x89, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x8a, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x15, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x67, 0x65, + 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x8b, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x66, 0x0a, 0x17, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x8c, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, + 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x17, + 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1e, 0x72, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x8d, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x1e, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x46, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x7e, 0x0a, 0x1f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x46, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x8e, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x1f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, + 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x8f, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, + 0x00, 0x52, 0x1e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x7e, 0x0a, 0x1f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x90, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, + 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x1f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x75, 0x0a, 0x1c, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x91, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1c, 0x66, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x24, 0x66, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x24, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, + 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x69, 0x0a, 0x18, 0x67, 0x65, 0x74, 0x4d, + 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x93, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x4d, 0x65, + 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, + 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x94, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x19, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x95, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0f, 0x73, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x51, 0x0a, 0x10, 0x73, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x96, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x10, 0x73, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x11, 0x67, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x97, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11, 0x67, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x12, 0x67, 0x65, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x98, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x12, 0x67, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x19, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, 0x78, + 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x19, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, + 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x6f, 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, 0x78, 0x6f, 0x73, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x9a, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x74, + 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x69, 0x0a, 0x18, 0x75, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x9b, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x18, 0x75, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x6f, 0x0a, + 0x1a, 0x67, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x9c, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x1a, 0x67, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x72, + 0x0a, 0x1b, 0x67, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x9d, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x67, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x99, 0x01, 0x0a, 0x28, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, + 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x9e, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, + 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x28, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, + 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x9c, + 0x01, 0x0a, 0x29, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, + 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x9f, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x29, 0x67, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb7, 0x01, + 0x0a, 0x32, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, + 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0xa0, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, + 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x32, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0xba, 0x01, 0x0a, 0x33, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0xa1, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, + 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, + 0x33, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x31, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, + 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xa2, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x43, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x56, 0x69, + 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x31, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, + 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0a, 0x62, + 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xa3, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x0a, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0b, + 0x62, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xa4, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, + 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x0b, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x45, 0x0a, 0x0c, 0x75, 0x6e, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0xa5, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x55, 0x6e, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x75, 0x6e, 0x62, 0x61, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x0d, 0x75, 0x6e, 0x62, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xa6, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x55, 0x6e, 0x62, 0x61, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x0d, 0x75, 0x6e, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0xa7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0e, + 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, + 0x0a, 0x0f, 0x67, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0xa8, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x67, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, + 0x01, 0x0a, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, + 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0xa9, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x69, 0x6e, 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x55, + 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x84, 0x01, 0x0a, 0x21, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x69, 0x6e, 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xaa, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x70, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x21, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x69, 0x6e, 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x99, 0x01, 0x0a, 0x28, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x79, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xab, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, + 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x28, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x79, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, + 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x9c, 0x01, 0x0a, 0x29, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, + 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0xac, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x72, 0x75, + 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, + 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x29, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, + 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x27, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xad, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x27, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xae, 0x01, + 0x0a, 0x2f, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x50, + 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, + 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0xae, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, + 0x6e, 0x67, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, + 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x2f, 0x73, + 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x75, 0x6e, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0xb1, + 0x01, 0x0a, 0x30, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, + 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, + 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0xaf, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, + 0x52, 0x30, 0x73, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x50, + 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, + 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x90, 0x01, 0x0a, 0x25, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xb0, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x25, + 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x93, 0x01, 0x0a, 0x26, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, + 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0xb1, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x26, 0x65, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x23, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0xb2, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x23, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x8d, 0x01, 0x0a, 0x24, 0x6e, 0x6f, 0x74, + 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, + 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0xb3, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x24, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x22, 0x76, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xb4, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, + 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x22, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x6f, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0xb5, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1a, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x72, 0x0a, 0x1b, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0xb6, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x67, 0x65, 0x74, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x42, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0xb7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x1d, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x7b, 0x0a, 0x1e, 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0xb8, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1e, + 0x67, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, + 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0xb9, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x7b, 0x0a, 0x1e, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0xba, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x1c, 0x6e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xbb, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x1c, + 0x6e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8a, 0x01, 0x0a, + 0x23, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0xbc, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x23, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x8d, 0x01, 0x0a, 0x24, 0x67, 0x65, + 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0xbd, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x24, 0x67, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x14, 0x67, 0x65, 0x74, + 0x43, 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0xbe, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, 0x6c, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x60, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x43, + 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0xbf, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x48, 0x00, 0x52, 0x15, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, 0x53, 0x75, 0x70, 0x70, + 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x54, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x12, 0x4d, 0x0a, 0x0d, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1a, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x74, 0x72, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0x54, 0x0a, 0x03, 0x52, + 0x50, 0x43, 0x12, 0x4d, 0x0a, 0x0d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x53, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, + 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x65, 0x63, + 0x74, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messages_proto_rawDescOnce sync.Once + file_messages_proto_rawDescData = file_messages_proto_rawDesc +) + +func file_messages_proto_rawDescGZIP() []byte { + file_messages_proto_rawDescOnce.Do(func() { + file_messages_proto_rawDescData = protoimpl.X.CompressGZIP(file_messages_proto_rawDescData) + }) + return file_messages_proto_rawDescData +} + +var file_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_messages_proto_goTypes = []interface{}{ + (*SpectredMessage)(nil), // 0: protowire.SpectredMessage + (*AddressesMessage)(nil), // 1: protowire.AddressesMessage + (*BlockMessage)(nil), // 2: protowire.BlockMessage + (*TransactionMessage)(nil), // 3: protowire.TransactionMessage + (*BlockLocatorMessage)(nil), // 4: protowire.BlockLocatorMessage + (*RequestAddressesMessage)(nil), // 5: protowire.RequestAddressesMessage + (*RequestRelayBlocksMessage)(nil), // 6: protowire.RequestRelayBlocksMessage + (*RequestTransactionsMessage)(nil), // 7: protowire.RequestTransactionsMessage + (*InvRelayBlockMessage)(nil), // 8: protowire.InvRelayBlockMessage + (*InvTransactionsMessage)(nil), // 9: protowire.InvTransactionsMessage + (*PingMessage)(nil), // 10: protowire.PingMessage + (*PongMessage)(nil), // 11: protowire.PongMessage + (*VerackMessage)(nil), // 12: protowire.VerackMessage + (*VersionMessage)(nil), // 13: protowire.VersionMessage + (*TransactionNotFoundMessage)(nil), // 14: protowire.TransactionNotFoundMessage + (*RejectMessage)(nil), // 15: protowire.RejectMessage + (*PruningPointUtxoSetChunkMessage)(nil), // 16: protowire.PruningPointUtxoSetChunkMessage + (*RequestIBDBlocksMessage)(nil), // 17: protowire.RequestIBDBlocksMessage + (*UnexpectedPruningPointMessage)(nil), // 18: protowire.UnexpectedPruningPointMessage + (*IbdBlockLocatorMessage)(nil), // 19: protowire.IbdBlockLocatorMessage + (*IbdBlockLocatorHighestHashMessage)(nil), // 20: protowire.IbdBlockLocatorHighestHashMessage + (*RequestNextPruningPointUtxoSetChunkMessage)(nil), // 21: protowire.RequestNextPruningPointUtxoSetChunkMessage + (*DonePruningPointUtxoSetChunksMessage)(nil), // 22: protowire.DonePruningPointUtxoSetChunksMessage + (*IbdBlockLocatorHighestHashNotFoundMessage)(nil), // 23: protowire.IbdBlockLocatorHighestHashNotFoundMessage + (*BlockWithTrustedDataMessage)(nil), // 24: protowire.BlockWithTrustedDataMessage + (*DoneBlocksWithTrustedDataMessage)(nil), // 25: protowire.DoneBlocksWithTrustedDataMessage + (*RequestPruningPointAndItsAnticoneMessage)(nil), // 26: protowire.RequestPruningPointAndItsAnticoneMessage + (*BlockHeadersMessage)(nil), // 27: protowire.BlockHeadersMessage + (*RequestNextHeadersMessage)(nil), // 28: protowire.RequestNextHeadersMessage + (*DoneHeadersMessage)(nil), // 29: protowire.DoneHeadersMessage + (*RequestPruningPointUTXOSetMessage)(nil), // 30: protowire.RequestPruningPointUTXOSetMessage + (*RequestHeadersMessage)(nil), // 31: protowire.RequestHeadersMessage + (*RequestBlockLocatorMessage)(nil), // 32: protowire.RequestBlockLocatorMessage + (*PruningPointsMessage)(nil), // 33: protowire.PruningPointsMessage + (*RequestPruningPointProofMessage)(nil), // 34: protowire.RequestPruningPointProofMessage + (*PruningPointProofMessage)(nil), // 35: protowire.PruningPointProofMessage + (*ReadyMessage)(nil), // 36: protowire.ReadyMessage + (*BlockWithTrustedDataV4Message)(nil), // 37: protowire.BlockWithTrustedDataV4Message + (*TrustedDataMessage)(nil), // 38: protowire.TrustedDataMessage + (*RequestIBDChainBlockLocatorMessage)(nil), // 39: protowire.RequestIBDChainBlockLocatorMessage + (*IbdChainBlockLocatorMessage)(nil), // 40: protowire.IbdChainBlockLocatorMessage + (*RequestAnticoneMessage)(nil), // 41: protowire.RequestAnticoneMessage + (*RequestNextPruningPointAndItsAnticoneBlocksMessage)(nil), // 42: protowire.RequestNextPruningPointAndItsAnticoneBlocksMessage + (*GetCurrentNetworkRequestMessage)(nil), // 43: protowire.GetCurrentNetworkRequestMessage + (*GetCurrentNetworkResponseMessage)(nil), // 44: protowire.GetCurrentNetworkResponseMessage + (*SubmitBlockRequestMessage)(nil), // 45: protowire.SubmitBlockRequestMessage + (*SubmitBlockResponseMessage)(nil), // 46: protowire.SubmitBlockResponseMessage + (*GetBlockTemplateRequestMessage)(nil), // 47: protowire.GetBlockTemplateRequestMessage + (*GetBlockTemplateResponseMessage)(nil), // 48: protowire.GetBlockTemplateResponseMessage + (*NotifyBlockAddedRequestMessage)(nil), // 49: protowire.NotifyBlockAddedRequestMessage + (*NotifyBlockAddedResponseMessage)(nil), // 50: protowire.NotifyBlockAddedResponseMessage + (*BlockAddedNotificationMessage)(nil), // 51: protowire.BlockAddedNotificationMessage + (*GetPeerAddressesRequestMessage)(nil), // 52: protowire.GetPeerAddressesRequestMessage + (*GetPeerAddressesResponseMessage)(nil), // 53: protowire.GetPeerAddressesResponseMessage + (*GetSelectedTipHashRequestMessage)(nil), // 54: protowire.GetSelectedTipHashRequestMessage + (*GetSelectedTipHashResponseMessage)(nil), // 55: protowire.GetSelectedTipHashResponseMessage + (*GetMempoolEntryRequestMessage)(nil), // 56: protowire.GetMempoolEntryRequestMessage + (*GetMempoolEntryResponseMessage)(nil), // 57: protowire.GetMempoolEntryResponseMessage + (*GetConnectedPeerInfoRequestMessage)(nil), // 58: protowire.GetConnectedPeerInfoRequestMessage + (*GetConnectedPeerInfoResponseMessage)(nil), // 59: protowire.GetConnectedPeerInfoResponseMessage + (*AddPeerRequestMessage)(nil), // 60: protowire.AddPeerRequestMessage + (*AddPeerResponseMessage)(nil), // 61: protowire.AddPeerResponseMessage + (*SubmitTransactionRequestMessage)(nil), // 62: protowire.SubmitTransactionRequestMessage + (*SubmitTransactionResponseMessage)(nil), // 63: protowire.SubmitTransactionResponseMessage + (*NotifyVirtualSelectedParentChainChangedRequestMessage)(nil), // 64: protowire.NotifyVirtualSelectedParentChainChangedRequestMessage + (*NotifyVirtualSelectedParentChainChangedResponseMessage)(nil), // 65: protowire.NotifyVirtualSelectedParentChainChangedResponseMessage + (*VirtualSelectedParentChainChangedNotificationMessage)(nil), // 66: protowire.VirtualSelectedParentChainChangedNotificationMessage + (*GetBlockRequestMessage)(nil), // 67: protowire.GetBlockRequestMessage + (*GetBlockResponseMessage)(nil), // 68: protowire.GetBlockResponseMessage + (*GetSubnetworkRequestMessage)(nil), // 69: protowire.GetSubnetworkRequestMessage + (*GetSubnetworkResponseMessage)(nil), // 70: protowire.GetSubnetworkResponseMessage + (*GetVirtualSelectedParentChainFromBlockRequestMessage)(nil), // 71: protowire.GetVirtualSelectedParentChainFromBlockRequestMessage + (*GetVirtualSelectedParentChainFromBlockResponseMessage)(nil), // 72: protowire.GetVirtualSelectedParentChainFromBlockResponseMessage + (*GetBlocksRequestMessage)(nil), // 73: protowire.GetBlocksRequestMessage + (*GetBlocksResponseMessage)(nil), // 74: protowire.GetBlocksResponseMessage + (*GetBlockCountRequestMessage)(nil), // 75: protowire.GetBlockCountRequestMessage + (*GetBlockCountResponseMessage)(nil), // 76: protowire.GetBlockCountResponseMessage + (*GetBlockDagInfoRequestMessage)(nil), // 77: protowire.GetBlockDagInfoRequestMessage + (*GetBlockDagInfoResponseMessage)(nil), // 78: protowire.GetBlockDagInfoResponseMessage + (*ResolveFinalityConflictRequestMessage)(nil), // 79: protowire.ResolveFinalityConflictRequestMessage + (*ResolveFinalityConflictResponseMessage)(nil), // 80: protowire.ResolveFinalityConflictResponseMessage + (*NotifyFinalityConflictsRequestMessage)(nil), // 81: protowire.NotifyFinalityConflictsRequestMessage + (*NotifyFinalityConflictsResponseMessage)(nil), // 82: protowire.NotifyFinalityConflictsResponseMessage + (*FinalityConflictNotificationMessage)(nil), // 83: protowire.FinalityConflictNotificationMessage + (*FinalityConflictResolvedNotificationMessage)(nil), // 84: protowire.FinalityConflictResolvedNotificationMessage + (*GetMempoolEntriesRequestMessage)(nil), // 85: protowire.GetMempoolEntriesRequestMessage + (*GetMempoolEntriesResponseMessage)(nil), // 86: protowire.GetMempoolEntriesResponseMessage + (*ShutDownRequestMessage)(nil), // 87: protowire.ShutDownRequestMessage + (*ShutDownResponseMessage)(nil), // 88: protowire.ShutDownResponseMessage + (*GetHeadersRequestMessage)(nil), // 89: protowire.GetHeadersRequestMessage + (*GetHeadersResponseMessage)(nil), // 90: protowire.GetHeadersResponseMessage + (*NotifyUtxosChangedRequestMessage)(nil), // 91: protowire.NotifyUtxosChangedRequestMessage + (*NotifyUtxosChangedResponseMessage)(nil), // 92: protowire.NotifyUtxosChangedResponseMessage + (*UtxosChangedNotificationMessage)(nil), // 93: protowire.UtxosChangedNotificationMessage + (*GetUtxosByAddressesRequestMessage)(nil), // 94: protowire.GetUtxosByAddressesRequestMessage + (*GetUtxosByAddressesResponseMessage)(nil), // 95: protowire.GetUtxosByAddressesResponseMessage + (*GetVirtualSelectedParentBlueScoreRequestMessage)(nil), // 96: protowire.GetVirtualSelectedParentBlueScoreRequestMessage + (*GetVirtualSelectedParentBlueScoreResponseMessage)(nil), // 97: protowire.GetVirtualSelectedParentBlueScoreResponseMessage + (*NotifyVirtualSelectedParentBlueScoreChangedRequestMessage)(nil), // 98: protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage + (*NotifyVirtualSelectedParentBlueScoreChangedResponseMessage)(nil), // 99: protowire.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage + (*VirtualSelectedParentBlueScoreChangedNotificationMessage)(nil), // 100: protowire.VirtualSelectedParentBlueScoreChangedNotificationMessage + (*BanRequestMessage)(nil), // 101: protowire.BanRequestMessage + (*BanResponseMessage)(nil), // 102: protowire.BanResponseMessage + (*UnbanRequestMessage)(nil), // 103: protowire.UnbanRequestMessage + (*UnbanResponseMessage)(nil), // 104: protowire.UnbanResponseMessage + (*GetInfoRequestMessage)(nil), // 105: protowire.GetInfoRequestMessage + (*GetInfoResponseMessage)(nil), // 106: protowire.GetInfoResponseMessage + (*StopNotifyingUtxosChangedRequestMessage)(nil), // 107: protowire.StopNotifyingUtxosChangedRequestMessage + (*StopNotifyingUtxosChangedResponseMessage)(nil), // 108: protowire.StopNotifyingUtxosChangedResponseMessage + (*NotifyPruningPointUTXOSetOverrideRequestMessage)(nil), // 109: protowire.NotifyPruningPointUTXOSetOverrideRequestMessage + (*NotifyPruningPointUTXOSetOverrideResponseMessage)(nil), // 110: protowire.NotifyPruningPointUTXOSetOverrideResponseMessage + (*PruningPointUTXOSetOverrideNotificationMessage)(nil), // 111: protowire.PruningPointUTXOSetOverrideNotificationMessage + (*StopNotifyingPruningPointUTXOSetOverrideRequestMessage)(nil), // 112: protowire.StopNotifyingPruningPointUTXOSetOverrideRequestMessage + (*StopNotifyingPruningPointUTXOSetOverrideResponseMessage)(nil), // 113: protowire.StopNotifyingPruningPointUTXOSetOverrideResponseMessage + (*EstimateNetworkHashesPerSecondRequestMessage)(nil), // 114: protowire.EstimateNetworkHashesPerSecondRequestMessage + (*EstimateNetworkHashesPerSecondResponseMessage)(nil), // 115: protowire.EstimateNetworkHashesPerSecondResponseMessage + (*NotifyVirtualDaaScoreChangedRequestMessage)(nil), // 116: protowire.NotifyVirtualDaaScoreChangedRequestMessage + (*NotifyVirtualDaaScoreChangedResponseMessage)(nil), // 117: protowire.NotifyVirtualDaaScoreChangedResponseMessage + (*VirtualDaaScoreChangedNotificationMessage)(nil), // 118: protowire.VirtualDaaScoreChangedNotificationMessage + (*GetBalanceByAddressRequestMessage)(nil), // 119: protowire.GetBalanceByAddressRequestMessage + (*GetBalanceByAddressResponseMessage)(nil), // 120: protowire.GetBalanceByAddressResponseMessage + (*GetBalancesByAddressesRequestMessage)(nil), // 121: protowire.GetBalancesByAddressesRequestMessage + (*GetBalancesByAddressesResponseMessage)(nil), // 122: protowire.GetBalancesByAddressesResponseMessage + (*NotifyNewBlockTemplateRequestMessage)(nil), // 123: protowire.NotifyNewBlockTemplateRequestMessage + (*NotifyNewBlockTemplateResponseMessage)(nil), // 124: protowire.NotifyNewBlockTemplateResponseMessage + (*NewBlockTemplateNotificationMessage)(nil), // 125: protowire.NewBlockTemplateNotificationMessage + (*GetMempoolEntriesByAddressesRequestMessage)(nil), // 126: protowire.GetMempoolEntriesByAddressesRequestMessage + (*GetMempoolEntriesByAddressesResponseMessage)(nil), // 127: protowire.GetMempoolEntriesByAddressesResponseMessage + (*GetCoinSupplyRequestMessage)(nil), // 128: protowire.GetCoinSupplyRequestMessage + (*GetCoinSupplyResponseMessage)(nil), // 129: protowire.GetCoinSupplyResponseMessage +} +var file_messages_proto_depIdxs = []int32{ + 1, // 0: protowire.SpectredMessage.addresses:type_name -> protowire.AddressesMessage + 2, // 1: protowire.SpectredMessage.block:type_name -> protowire.BlockMessage + 3, // 2: protowire.SpectredMessage.transaction:type_name -> protowire.TransactionMessage + 4, // 3: protowire.SpectredMessage.blockLocator:type_name -> protowire.BlockLocatorMessage + 5, // 4: protowire.SpectredMessage.requestAddresses:type_name -> protowire.RequestAddressesMessage + 6, // 5: protowire.SpectredMessage.requestRelayBlocks:type_name -> protowire.RequestRelayBlocksMessage + 7, // 6: protowire.SpectredMessage.requestTransactions:type_name -> protowire.RequestTransactionsMessage + 2, // 7: protowire.SpectredMessage.ibdBlock:type_name -> protowire.BlockMessage + 8, // 8: protowire.SpectredMessage.invRelayBlock:type_name -> protowire.InvRelayBlockMessage + 9, // 9: protowire.SpectredMessage.invTransactions:type_name -> protowire.InvTransactionsMessage + 10, // 10: protowire.SpectredMessage.ping:type_name -> protowire.PingMessage + 11, // 11: protowire.SpectredMessage.pong:type_name -> protowire.PongMessage + 12, // 12: protowire.SpectredMessage.verack:type_name -> protowire.VerackMessage + 13, // 13: protowire.SpectredMessage.version:type_name -> protowire.VersionMessage + 14, // 14: protowire.SpectredMessage.transactionNotFound:type_name -> protowire.TransactionNotFoundMessage + 15, // 15: protowire.SpectredMessage.reject:type_name -> protowire.RejectMessage + 16, // 16: protowire.SpectredMessage.pruningPointUtxoSetChunk:type_name -> protowire.PruningPointUtxoSetChunkMessage + 17, // 17: protowire.SpectredMessage.requestIBDBlocks:type_name -> protowire.RequestIBDBlocksMessage + 18, // 18: protowire.SpectredMessage.unexpectedPruningPoint:type_name -> protowire.UnexpectedPruningPointMessage + 19, // 19: protowire.SpectredMessage.ibdBlockLocator:type_name -> protowire.IbdBlockLocatorMessage + 20, // 20: protowire.SpectredMessage.ibdBlockLocatorHighestHash:type_name -> protowire.IbdBlockLocatorHighestHashMessage + 21, // 21: protowire.SpectredMessage.requestNextPruningPointUtxoSetChunk:type_name -> protowire.RequestNextPruningPointUtxoSetChunkMessage + 22, // 22: protowire.SpectredMessage.donePruningPointUtxoSetChunks:type_name -> protowire.DonePruningPointUtxoSetChunksMessage + 23, // 23: protowire.SpectredMessage.ibdBlockLocatorHighestHashNotFound:type_name -> protowire.IbdBlockLocatorHighestHashNotFoundMessage + 24, // 24: protowire.SpectredMessage.blockWithTrustedData:type_name -> protowire.BlockWithTrustedDataMessage + 25, // 25: protowire.SpectredMessage.doneBlocksWithTrustedData:type_name -> protowire.DoneBlocksWithTrustedDataMessage + 26, // 26: protowire.SpectredMessage.requestPruningPointAndItsAnticone:type_name -> protowire.RequestPruningPointAndItsAnticoneMessage + 27, // 27: protowire.SpectredMessage.blockHeaders:type_name -> protowire.BlockHeadersMessage + 28, // 28: protowire.SpectredMessage.requestNextHeaders:type_name -> protowire.RequestNextHeadersMessage + 29, // 29: protowire.SpectredMessage.DoneHeaders:type_name -> protowire.DoneHeadersMessage + 30, // 30: protowire.SpectredMessage.requestPruningPointUTXOSet:type_name -> protowire.RequestPruningPointUTXOSetMessage + 31, // 31: protowire.SpectredMessage.requestHeaders:type_name -> protowire.RequestHeadersMessage + 32, // 32: protowire.SpectredMessage.requestBlockLocator:type_name -> protowire.RequestBlockLocatorMessage + 33, // 33: protowire.SpectredMessage.pruningPoints:type_name -> protowire.PruningPointsMessage + 34, // 34: protowire.SpectredMessage.requestPruningPointProof:type_name -> protowire.RequestPruningPointProofMessage + 35, // 35: protowire.SpectredMessage.pruningPointProof:type_name -> protowire.PruningPointProofMessage + 36, // 36: protowire.SpectredMessage.ready:type_name -> protowire.ReadyMessage + 37, // 37: protowire.SpectredMessage.blockWithTrustedDataV4:type_name -> protowire.BlockWithTrustedDataV4Message + 38, // 38: protowire.SpectredMessage.trustedData:type_name -> protowire.TrustedDataMessage + 39, // 39: protowire.SpectredMessage.requestIBDChainBlockLocator:type_name -> protowire.RequestIBDChainBlockLocatorMessage + 40, // 40: protowire.SpectredMessage.ibdChainBlockLocator:type_name -> protowire.IbdChainBlockLocatorMessage + 41, // 41: protowire.SpectredMessage.requestAnticone:type_name -> protowire.RequestAnticoneMessage + 42, // 42: protowire.SpectredMessage.requestNextPruningPointAndItsAnticoneBlocks:type_name -> protowire.RequestNextPruningPointAndItsAnticoneBlocksMessage + 43, // 43: protowire.SpectredMessage.getCurrentNetworkRequest:type_name -> protowire.GetCurrentNetworkRequestMessage + 44, // 44: protowire.SpectredMessage.getCurrentNetworkResponse:type_name -> protowire.GetCurrentNetworkResponseMessage + 45, // 45: protowire.SpectredMessage.submitBlockRequest:type_name -> protowire.SubmitBlockRequestMessage + 46, // 46: protowire.SpectredMessage.submitBlockResponse:type_name -> protowire.SubmitBlockResponseMessage + 47, // 47: protowire.SpectredMessage.getBlockTemplateRequest:type_name -> protowire.GetBlockTemplateRequestMessage + 48, // 48: protowire.SpectredMessage.getBlockTemplateResponse:type_name -> protowire.GetBlockTemplateResponseMessage + 49, // 49: protowire.SpectredMessage.notifyBlockAddedRequest:type_name -> protowire.NotifyBlockAddedRequestMessage + 50, // 50: protowire.SpectredMessage.notifyBlockAddedResponse:type_name -> protowire.NotifyBlockAddedResponseMessage + 51, // 51: protowire.SpectredMessage.blockAddedNotification:type_name -> protowire.BlockAddedNotificationMessage + 52, // 52: protowire.SpectredMessage.getPeerAddressesRequest:type_name -> protowire.GetPeerAddressesRequestMessage + 53, // 53: protowire.SpectredMessage.getPeerAddressesResponse:type_name -> protowire.GetPeerAddressesResponseMessage + 54, // 54: protowire.SpectredMessage.getSelectedTipHashRequest:type_name -> protowire.GetSelectedTipHashRequestMessage + 55, // 55: protowire.SpectredMessage.getSelectedTipHashResponse:type_name -> protowire.GetSelectedTipHashResponseMessage + 56, // 56: protowire.SpectredMessage.getMempoolEntryRequest:type_name -> protowire.GetMempoolEntryRequestMessage + 57, // 57: protowire.SpectredMessage.getMempoolEntryResponse:type_name -> protowire.GetMempoolEntryResponseMessage + 58, // 58: protowire.SpectredMessage.getConnectedPeerInfoRequest:type_name -> protowire.GetConnectedPeerInfoRequestMessage + 59, // 59: protowire.SpectredMessage.getConnectedPeerInfoResponse:type_name -> protowire.GetConnectedPeerInfoResponseMessage + 60, // 60: protowire.SpectredMessage.addPeerRequest:type_name -> protowire.AddPeerRequestMessage + 61, // 61: protowire.SpectredMessage.addPeerResponse:type_name -> protowire.AddPeerResponseMessage + 62, // 62: protowire.SpectredMessage.submitTransactionRequest:type_name -> protowire.SubmitTransactionRequestMessage + 63, // 63: protowire.SpectredMessage.submitTransactionResponse:type_name -> protowire.SubmitTransactionResponseMessage + 64, // 64: protowire.SpectredMessage.notifyVirtualSelectedParentChainChangedRequest:type_name -> protowire.NotifyVirtualSelectedParentChainChangedRequestMessage + 65, // 65: protowire.SpectredMessage.notifyVirtualSelectedParentChainChangedResponse:type_name -> protowire.NotifyVirtualSelectedParentChainChangedResponseMessage + 66, // 66: protowire.SpectredMessage.virtualSelectedParentChainChangedNotification:type_name -> protowire.VirtualSelectedParentChainChangedNotificationMessage + 67, // 67: protowire.SpectredMessage.getBlockRequest:type_name -> protowire.GetBlockRequestMessage + 68, // 68: protowire.SpectredMessage.getBlockResponse:type_name -> protowire.GetBlockResponseMessage + 69, // 69: protowire.SpectredMessage.getSubnetworkRequest:type_name -> protowire.GetSubnetworkRequestMessage + 70, // 70: protowire.SpectredMessage.getSubnetworkResponse:type_name -> protowire.GetSubnetworkResponseMessage + 71, // 71: protowire.SpectredMessage.getVirtualSelectedParentChainFromBlockRequest:type_name -> protowire.GetVirtualSelectedParentChainFromBlockRequestMessage + 72, // 72: protowire.SpectredMessage.getVirtualSelectedParentChainFromBlockResponse:type_name -> protowire.GetVirtualSelectedParentChainFromBlockResponseMessage + 73, // 73: protowire.SpectredMessage.getBlocksRequest:type_name -> protowire.GetBlocksRequestMessage + 74, // 74: protowire.SpectredMessage.getBlocksResponse:type_name -> protowire.GetBlocksResponseMessage + 75, // 75: protowire.SpectredMessage.getBlockCountRequest:type_name -> protowire.GetBlockCountRequestMessage + 76, // 76: protowire.SpectredMessage.getBlockCountResponse:type_name -> protowire.GetBlockCountResponseMessage + 77, // 77: protowire.SpectredMessage.getBlockDagInfoRequest:type_name -> protowire.GetBlockDagInfoRequestMessage + 78, // 78: protowire.SpectredMessage.getBlockDagInfoResponse:type_name -> protowire.GetBlockDagInfoResponseMessage + 79, // 79: protowire.SpectredMessage.resolveFinalityConflictRequest:type_name -> protowire.ResolveFinalityConflictRequestMessage + 80, // 80: protowire.SpectredMessage.resolveFinalityConflictResponse:type_name -> protowire.ResolveFinalityConflictResponseMessage + 81, // 81: protowire.SpectredMessage.notifyFinalityConflictsRequest:type_name -> protowire.NotifyFinalityConflictsRequestMessage + 82, // 82: protowire.SpectredMessage.notifyFinalityConflictsResponse:type_name -> protowire.NotifyFinalityConflictsResponseMessage + 83, // 83: protowire.SpectredMessage.finalityConflictNotification:type_name -> protowire.FinalityConflictNotificationMessage + 84, // 84: protowire.SpectredMessage.finalityConflictResolvedNotification:type_name -> protowire.FinalityConflictResolvedNotificationMessage + 85, // 85: protowire.SpectredMessage.getMempoolEntriesRequest:type_name -> protowire.GetMempoolEntriesRequestMessage + 86, // 86: protowire.SpectredMessage.getMempoolEntriesResponse:type_name -> protowire.GetMempoolEntriesResponseMessage + 87, // 87: protowire.SpectredMessage.shutDownRequest:type_name -> protowire.ShutDownRequestMessage + 88, // 88: protowire.SpectredMessage.shutDownResponse:type_name -> protowire.ShutDownResponseMessage + 89, // 89: protowire.SpectredMessage.getHeadersRequest:type_name -> protowire.GetHeadersRequestMessage + 90, // 90: protowire.SpectredMessage.getHeadersResponse:type_name -> protowire.GetHeadersResponseMessage + 91, // 91: protowire.SpectredMessage.notifyUtxosChangedRequest:type_name -> protowire.NotifyUtxosChangedRequestMessage + 92, // 92: protowire.SpectredMessage.notifyUtxosChangedResponse:type_name -> protowire.NotifyUtxosChangedResponseMessage + 93, // 93: protowire.SpectredMessage.utxosChangedNotification:type_name -> protowire.UtxosChangedNotificationMessage + 94, // 94: protowire.SpectredMessage.getUtxosByAddressesRequest:type_name -> protowire.GetUtxosByAddressesRequestMessage + 95, // 95: protowire.SpectredMessage.getUtxosByAddressesResponse:type_name -> protowire.GetUtxosByAddressesResponseMessage + 96, // 96: protowire.SpectredMessage.getVirtualSelectedParentBlueScoreRequest:type_name -> protowire.GetVirtualSelectedParentBlueScoreRequestMessage + 97, // 97: protowire.SpectredMessage.getVirtualSelectedParentBlueScoreResponse:type_name -> protowire.GetVirtualSelectedParentBlueScoreResponseMessage + 98, // 98: protowire.SpectredMessage.notifyVirtualSelectedParentBlueScoreChangedRequest:type_name -> protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage + 99, // 99: protowire.SpectredMessage.notifyVirtualSelectedParentBlueScoreChangedResponse:type_name -> protowire.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage + 100, // 100: protowire.SpectredMessage.virtualSelectedParentBlueScoreChangedNotification:type_name -> protowire.VirtualSelectedParentBlueScoreChangedNotificationMessage + 101, // 101: protowire.SpectredMessage.banRequest:type_name -> protowire.BanRequestMessage + 102, // 102: protowire.SpectredMessage.banResponse:type_name -> protowire.BanResponseMessage + 103, // 103: protowire.SpectredMessage.unbanRequest:type_name -> protowire.UnbanRequestMessage + 104, // 104: protowire.SpectredMessage.unbanResponse:type_name -> protowire.UnbanResponseMessage + 105, // 105: protowire.SpectredMessage.getInfoRequest:type_name -> protowire.GetInfoRequestMessage + 106, // 106: protowire.SpectredMessage.getInfoResponse:type_name -> protowire.GetInfoResponseMessage + 107, // 107: protowire.SpectredMessage.stopNotifyingUtxosChangedRequest:type_name -> protowire.StopNotifyingUtxosChangedRequestMessage + 108, // 108: protowire.SpectredMessage.stopNotifyingUtxosChangedResponse:type_name -> protowire.StopNotifyingUtxosChangedResponseMessage + 109, // 109: protowire.SpectredMessage.notifyPruningPointUTXOSetOverrideRequest:type_name -> protowire.NotifyPruningPointUTXOSetOverrideRequestMessage + 110, // 110: protowire.SpectredMessage.notifyPruningPointUTXOSetOverrideResponse:type_name -> protowire.NotifyPruningPointUTXOSetOverrideResponseMessage + 111, // 111: protowire.SpectredMessage.pruningPointUTXOSetOverrideNotification:type_name -> protowire.PruningPointUTXOSetOverrideNotificationMessage + 112, // 112: protowire.SpectredMessage.stopNotifyingPruningPointUTXOSetOverrideRequest:type_name -> protowire.StopNotifyingPruningPointUTXOSetOverrideRequestMessage + 113, // 113: protowire.SpectredMessage.stopNotifyingPruningPointUTXOSetOverrideResponse:type_name -> protowire.StopNotifyingPruningPointUTXOSetOverrideResponseMessage + 114, // 114: protowire.SpectredMessage.estimateNetworkHashesPerSecondRequest:type_name -> protowire.EstimateNetworkHashesPerSecondRequestMessage + 115, // 115: protowire.SpectredMessage.estimateNetworkHashesPerSecondResponse:type_name -> protowire.EstimateNetworkHashesPerSecondResponseMessage + 116, // 116: protowire.SpectredMessage.notifyVirtualDaaScoreChangedRequest:type_name -> protowire.NotifyVirtualDaaScoreChangedRequestMessage + 117, // 117: protowire.SpectredMessage.notifyVirtualDaaScoreChangedResponse:type_name -> protowire.NotifyVirtualDaaScoreChangedResponseMessage + 118, // 118: protowire.SpectredMessage.virtualDaaScoreChangedNotification:type_name -> protowire.VirtualDaaScoreChangedNotificationMessage + 119, // 119: protowire.SpectredMessage.getBalanceByAddressRequest:type_name -> protowire.GetBalanceByAddressRequestMessage + 120, // 120: protowire.SpectredMessage.getBalanceByAddressResponse:type_name -> protowire.GetBalanceByAddressResponseMessage + 121, // 121: protowire.SpectredMessage.getBalancesByAddressesRequest:type_name -> protowire.GetBalancesByAddressesRequestMessage + 122, // 122: protowire.SpectredMessage.getBalancesByAddressesResponse:type_name -> protowire.GetBalancesByAddressesResponseMessage + 123, // 123: protowire.SpectredMessage.notifyNewBlockTemplateRequest:type_name -> protowire.NotifyNewBlockTemplateRequestMessage + 124, // 124: protowire.SpectredMessage.notifyNewBlockTemplateResponse:type_name -> protowire.NotifyNewBlockTemplateResponseMessage + 125, // 125: protowire.SpectredMessage.newBlockTemplateNotification:type_name -> protowire.NewBlockTemplateNotificationMessage + 126, // 126: protowire.SpectredMessage.getMempoolEntriesByAddressesRequest:type_name -> protowire.GetMempoolEntriesByAddressesRequestMessage + 127, // 127: protowire.SpectredMessage.getMempoolEntriesByAddressesResponse:type_name -> protowire.GetMempoolEntriesByAddressesResponseMessage + 128, // 128: protowire.SpectredMessage.getCoinSupplyRequest:type_name -> protowire.GetCoinSupplyRequestMessage + 129, // 129: protowire.SpectredMessage.getCoinSupplyResponse:type_name -> protowire.GetCoinSupplyResponseMessage + 0, // 130: protowire.P2P.MessageStream:input_type -> protowire.SpectredMessage + 0, // 131: protowire.RPC.MessageStream:input_type -> protowire.SpectredMessage + 0, // 132: protowire.P2P.MessageStream:output_type -> protowire.SpectredMessage + 0, // 133: protowire.RPC.MessageStream:output_type -> protowire.SpectredMessage + 132, // [132:134] is the sub-list for method output_type + 130, // [130:132] is the sub-list for method input_type + 130, // [130:130] is the sub-list for extension type_name + 130, // [130:130] is the sub-list for extension extendee + 0, // [0:130] is the sub-list for field type_name +} + +func init() { file_messages_proto_init() } +func file_messages_proto_init() { + if File_messages_proto != nil { + return + } + file_p2p_proto_init() + file_rpc_proto_init() + if !protoimpl.UnsafeEnabled { + file_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpectredMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_messages_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*SpectredMessage_Addresses)(nil), + (*SpectredMessage_Block)(nil), + (*SpectredMessage_Transaction)(nil), + (*SpectredMessage_BlockLocator)(nil), + (*SpectredMessage_RequestAddresses)(nil), + (*SpectredMessage_RequestRelayBlocks)(nil), + (*SpectredMessage_RequestTransactions)(nil), + (*SpectredMessage_IbdBlock)(nil), + (*SpectredMessage_InvRelayBlock)(nil), + (*SpectredMessage_InvTransactions)(nil), + (*SpectredMessage_Ping)(nil), + (*SpectredMessage_Pong)(nil), + (*SpectredMessage_Verack)(nil), + (*SpectredMessage_Version)(nil), + (*SpectredMessage_TransactionNotFound)(nil), + (*SpectredMessage_Reject)(nil), + (*SpectredMessage_PruningPointUtxoSetChunk)(nil), + (*SpectredMessage_RequestIBDBlocks)(nil), + (*SpectredMessage_UnexpectedPruningPoint)(nil), + (*SpectredMessage_IbdBlockLocator)(nil), + (*SpectredMessage_IbdBlockLocatorHighestHash)(nil), + (*SpectredMessage_RequestNextPruningPointUtxoSetChunk)(nil), + (*SpectredMessage_DonePruningPointUtxoSetChunks)(nil), + (*SpectredMessage_IbdBlockLocatorHighestHashNotFound)(nil), + (*SpectredMessage_BlockWithTrustedData)(nil), + (*SpectredMessage_DoneBlocksWithTrustedData)(nil), + (*SpectredMessage_RequestPruningPointAndItsAnticone)(nil), + (*SpectredMessage_BlockHeaders)(nil), + (*SpectredMessage_RequestNextHeaders)(nil), + (*SpectredMessage_DoneHeaders)(nil), + (*SpectredMessage_RequestPruningPointUTXOSet)(nil), + (*SpectredMessage_RequestHeaders)(nil), + (*SpectredMessage_RequestBlockLocator)(nil), + (*SpectredMessage_PruningPoints)(nil), + (*SpectredMessage_RequestPruningPointProof)(nil), + (*SpectredMessage_PruningPointProof)(nil), + (*SpectredMessage_Ready)(nil), + (*SpectredMessage_BlockWithTrustedDataV4)(nil), + (*SpectredMessage_TrustedData)(nil), + (*SpectredMessage_RequestIBDChainBlockLocator)(nil), + (*SpectredMessage_IbdChainBlockLocator)(nil), + (*SpectredMessage_RequestAnticone)(nil), + (*SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks)(nil), + (*SpectredMessage_GetCurrentNetworkRequest)(nil), + (*SpectredMessage_GetCurrentNetworkResponse)(nil), + (*SpectredMessage_SubmitBlockRequest)(nil), + (*SpectredMessage_SubmitBlockResponse)(nil), + (*SpectredMessage_GetBlockTemplateRequest)(nil), + (*SpectredMessage_GetBlockTemplateResponse)(nil), + (*SpectredMessage_NotifyBlockAddedRequest)(nil), + (*SpectredMessage_NotifyBlockAddedResponse)(nil), + (*SpectredMessage_BlockAddedNotification)(nil), + (*SpectredMessage_GetPeerAddressesRequest)(nil), + (*SpectredMessage_GetPeerAddressesResponse)(nil), + (*SpectredMessage_GetSelectedTipHashRequest)(nil), + (*SpectredMessage_GetSelectedTipHashResponse)(nil), + (*SpectredMessage_GetMempoolEntryRequest)(nil), + (*SpectredMessage_GetMempoolEntryResponse)(nil), + (*SpectredMessage_GetConnectedPeerInfoRequest)(nil), + (*SpectredMessage_GetConnectedPeerInfoResponse)(nil), + (*SpectredMessage_AddPeerRequest)(nil), + (*SpectredMessage_AddPeerResponse)(nil), + (*SpectredMessage_SubmitTransactionRequest)(nil), + (*SpectredMessage_SubmitTransactionResponse)(nil), + (*SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest)(nil), + (*SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse)(nil), + (*SpectredMessage_VirtualSelectedParentChainChangedNotification)(nil), + (*SpectredMessage_GetBlockRequest)(nil), + (*SpectredMessage_GetBlockResponse)(nil), + (*SpectredMessage_GetSubnetworkRequest)(nil), + (*SpectredMessage_GetSubnetworkResponse)(nil), + (*SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest)(nil), + (*SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse)(nil), + (*SpectredMessage_GetBlocksRequest)(nil), + (*SpectredMessage_GetBlocksResponse)(nil), + (*SpectredMessage_GetBlockCountRequest)(nil), + (*SpectredMessage_GetBlockCountResponse)(nil), + (*SpectredMessage_GetBlockDagInfoRequest)(nil), + (*SpectredMessage_GetBlockDagInfoResponse)(nil), + (*SpectredMessage_ResolveFinalityConflictRequest)(nil), + (*SpectredMessage_ResolveFinalityConflictResponse)(nil), + (*SpectredMessage_NotifyFinalityConflictsRequest)(nil), + (*SpectredMessage_NotifyFinalityConflictsResponse)(nil), + (*SpectredMessage_FinalityConflictNotification)(nil), + (*SpectredMessage_FinalityConflictResolvedNotification)(nil), + (*SpectredMessage_GetMempoolEntriesRequest)(nil), + (*SpectredMessage_GetMempoolEntriesResponse)(nil), + (*SpectredMessage_ShutDownRequest)(nil), + (*SpectredMessage_ShutDownResponse)(nil), + (*SpectredMessage_GetHeadersRequest)(nil), + (*SpectredMessage_GetHeadersResponse)(nil), + (*SpectredMessage_NotifyUtxosChangedRequest)(nil), + (*SpectredMessage_NotifyUtxosChangedResponse)(nil), + (*SpectredMessage_UtxosChangedNotification)(nil), + (*SpectredMessage_GetUtxosByAddressesRequest)(nil), + (*SpectredMessage_GetUtxosByAddressesResponse)(nil), + (*SpectredMessage_GetVirtualSelectedParentBlueScoreRequest)(nil), + (*SpectredMessage_GetVirtualSelectedParentBlueScoreResponse)(nil), + (*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest)(nil), + (*SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse)(nil), + (*SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification)(nil), + (*SpectredMessage_BanRequest)(nil), + (*SpectredMessage_BanResponse)(nil), + (*SpectredMessage_UnbanRequest)(nil), + (*SpectredMessage_UnbanResponse)(nil), + (*SpectredMessage_GetInfoRequest)(nil), + (*SpectredMessage_GetInfoResponse)(nil), + (*SpectredMessage_StopNotifyingUtxosChangedRequest)(nil), + (*SpectredMessage_StopNotifyingUtxosChangedResponse)(nil), + (*SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest)(nil), + (*SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse)(nil), + (*SpectredMessage_PruningPointUTXOSetOverrideNotification)(nil), + (*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest)(nil), + (*SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse)(nil), + (*SpectredMessage_EstimateNetworkHashesPerSecondRequest)(nil), + (*SpectredMessage_EstimateNetworkHashesPerSecondResponse)(nil), + (*SpectredMessage_NotifyVirtualDaaScoreChangedRequest)(nil), + (*SpectredMessage_NotifyVirtualDaaScoreChangedResponse)(nil), + (*SpectredMessage_VirtualDaaScoreChangedNotification)(nil), + (*SpectredMessage_GetBalanceByAddressRequest)(nil), + (*SpectredMessage_GetBalanceByAddressResponse)(nil), + (*SpectredMessage_GetBalancesByAddressesRequest)(nil), + (*SpectredMessage_GetBalancesByAddressesResponse)(nil), + (*SpectredMessage_NotifyNewBlockTemplateRequest)(nil), + (*SpectredMessage_NotifyNewBlockTemplateResponse)(nil), + (*SpectredMessage_NewBlockTemplateNotification)(nil), + (*SpectredMessage_GetMempoolEntriesByAddressesRequest)(nil), + (*SpectredMessage_GetMempoolEntriesByAddressesResponse)(nil), + (*SpectredMessage_GetCoinSupplyRequest)(nil), + (*SpectredMessage_GetCoinSupplyResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messages_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_messages_proto_goTypes, + DependencyIndexes: file_messages_proto_depIdxs, + MessageInfos: file_messages_proto_msgTypes, + }.Build() + File_messages_proto = out.File + file_messages_proto_rawDesc = nil + file_messages_proto_goTypes = nil + file_messages_proto_depIdxs = nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/messages.proto b/infrastructure/network/netadapter/server/grpcserver/protowire/messages.proto new file mode 100644 index 0000000..1d33c20 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/messages.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; +package protowire; + +option go_package = "github.com/spectre-project/spectred/protowire"; + +import "p2p.proto"; +import "rpc.proto"; + +message SpectredMessage { + oneof payload { + AddressesMessage addresses = 1; + BlockMessage block = 2; + TransactionMessage transaction = 3; + BlockLocatorMessage blockLocator = 5; + RequestAddressesMessage requestAddresses = 6; + RequestRelayBlocksMessage requestRelayBlocks = 10; + RequestTransactionsMessage requestTransactions = 12; + BlockMessage ibdBlock = 13; + InvRelayBlockMessage invRelayBlock = 14; + InvTransactionsMessage invTransactions = 15; + PingMessage ping = 16; + PongMessage pong = 17; + VerackMessage verack = 19; + VersionMessage version = 20; + TransactionNotFoundMessage transactionNotFound = 21; + RejectMessage reject = 22; + PruningPointUtxoSetChunkMessage pruningPointUtxoSetChunk = 25; + RequestIBDBlocksMessage requestIBDBlocks = 26; + UnexpectedPruningPointMessage unexpectedPruningPoint = 27; + IbdBlockLocatorMessage ibdBlockLocator = 30; + IbdBlockLocatorHighestHashMessage ibdBlockLocatorHighestHash = 31; + RequestNextPruningPointUtxoSetChunkMessage requestNextPruningPointUtxoSetChunk = 33; + DonePruningPointUtxoSetChunksMessage donePruningPointUtxoSetChunks = 34; + IbdBlockLocatorHighestHashNotFoundMessage ibdBlockLocatorHighestHashNotFound = 35; + BlockWithTrustedDataMessage blockWithTrustedData = 36; + DoneBlocksWithTrustedDataMessage doneBlocksWithTrustedData = 37; + RequestPruningPointAndItsAnticoneMessage requestPruningPointAndItsAnticone = 40; + BlockHeadersMessage blockHeaders = 41; + RequestNextHeadersMessage requestNextHeaders = 42; + DoneHeadersMessage DoneHeaders = 43; + RequestPruningPointUTXOSetMessage requestPruningPointUTXOSet = 44; + RequestHeadersMessage requestHeaders = 45; + RequestBlockLocatorMessage requestBlockLocator = 46; + PruningPointsMessage pruningPoints = 47; + RequestPruningPointProofMessage requestPruningPointProof = 48; + PruningPointProofMessage pruningPointProof = 49; + ReadyMessage ready = 50; + BlockWithTrustedDataV4Message blockWithTrustedDataV4 = 51; + TrustedDataMessage trustedData = 52; + RequestIBDChainBlockLocatorMessage requestIBDChainBlockLocator = 53; + IbdChainBlockLocatorMessage ibdChainBlockLocator = 54; + RequestAnticoneMessage requestAnticone = 55; + RequestNextPruningPointAndItsAnticoneBlocksMessage requestNextPruningPointAndItsAnticoneBlocks = 56; + + GetCurrentNetworkRequestMessage getCurrentNetworkRequest = 1001; + GetCurrentNetworkResponseMessage getCurrentNetworkResponse = 1002; + SubmitBlockRequestMessage submitBlockRequest = 1003; + SubmitBlockResponseMessage submitBlockResponse = 1004; + GetBlockTemplateRequestMessage getBlockTemplateRequest = 1005; + GetBlockTemplateResponseMessage getBlockTemplateResponse = 1006; + NotifyBlockAddedRequestMessage notifyBlockAddedRequest = 1007; + NotifyBlockAddedResponseMessage notifyBlockAddedResponse = 1008; + BlockAddedNotificationMessage blockAddedNotification = 1009; + GetPeerAddressesRequestMessage getPeerAddressesRequest = 1010; + GetPeerAddressesResponseMessage getPeerAddressesResponse = 1011; + GetSelectedTipHashRequestMessage getSelectedTipHashRequest = 1012; + GetSelectedTipHashResponseMessage getSelectedTipHashResponse = 1013; + GetMempoolEntryRequestMessage getMempoolEntryRequest = 1014; + GetMempoolEntryResponseMessage getMempoolEntryResponse = 1015; + GetConnectedPeerInfoRequestMessage getConnectedPeerInfoRequest = 1016; + GetConnectedPeerInfoResponseMessage getConnectedPeerInfoResponse = 1017; + AddPeerRequestMessage addPeerRequest = 1018; + AddPeerResponseMessage addPeerResponse = 1019; + SubmitTransactionRequestMessage submitTransactionRequest = 1020; + SubmitTransactionResponseMessage submitTransactionResponse = 1021; + NotifyVirtualSelectedParentChainChangedRequestMessage notifyVirtualSelectedParentChainChangedRequest = 1022; + NotifyVirtualSelectedParentChainChangedResponseMessage notifyVirtualSelectedParentChainChangedResponse = 1023; + VirtualSelectedParentChainChangedNotificationMessage virtualSelectedParentChainChangedNotification = 1024; + GetBlockRequestMessage getBlockRequest = 1025; + GetBlockResponseMessage getBlockResponse = 1026; + GetSubnetworkRequestMessage getSubnetworkRequest = 1027; + GetSubnetworkResponseMessage getSubnetworkResponse = 1028; + GetVirtualSelectedParentChainFromBlockRequestMessage getVirtualSelectedParentChainFromBlockRequest = 1029; + GetVirtualSelectedParentChainFromBlockResponseMessage getVirtualSelectedParentChainFromBlockResponse = 1030; + GetBlocksRequestMessage getBlocksRequest = 1031; + GetBlocksResponseMessage getBlocksResponse = 1032; + GetBlockCountRequestMessage getBlockCountRequest = 1033; + GetBlockCountResponseMessage getBlockCountResponse = 1034; + GetBlockDagInfoRequestMessage getBlockDagInfoRequest = 1035; + GetBlockDagInfoResponseMessage getBlockDagInfoResponse = 1036; + ResolveFinalityConflictRequestMessage resolveFinalityConflictRequest = 1037; + ResolveFinalityConflictResponseMessage resolveFinalityConflictResponse = 1038; + NotifyFinalityConflictsRequestMessage notifyFinalityConflictsRequest = 1039; + NotifyFinalityConflictsResponseMessage notifyFinalityConflictsResponse = 1040; + FinalityConflictNotificationMessage finalityConflictNotification = 1041; + FinalityConflictResolvedNotificationMessage finalityConflictResolvedNotification = 1042; + GetMempoolEntriesRequestMessage getMempoolEntriesRequest = 1043; + GetMempoolEntriesResponseMessage getMempoolEntriesResponse = 1044; + ShutDownRequestMessage shutDownRequest = 1045; + ShutDownResponseMessage shutDownResponse = 1046; + GetHeadersRequestMessage getHeadersRequest = 1047; + GetHeadersResponseMessage getHeadersResponse = 1048; + NotifyUtxosChangedRequestMessage notifyUtxosChangedRequest = 1049; + NotifyUtxosChangedResponseMessage notifyUtxosChangedResponse = 1050; + UtxosChangedNotificationMessage utxosChangedNotification = 1051; + GetUtxosByAddressesRequestMessage getUtxosByAddressesRequest = 1052; + GetUtxosByAddressesResponseMessage getUtxosByAddressesResponse = 1053; + GetVirtualSelectedParentBlueScoreRequestMessage getVirtualSelectedParentBlueScoreRequest = 1054; + GetVirtualSelectedParentBlueScoreResponseMessage getVirtualSelectedParentBlueScoreResponse = 1055; + NotifyVirtualSelectedParentBlueScoreChangedRequestMessage notifyVirtualSelectedParentBlueScoreChangedRequest = 1056; + NotifyVirtualSelectedParentBlueScoreChangedResponseMessage notifyVirtualSelectedParentBlueScoreChangedResponse = 1057; + VirtualSelectedParentBlueScoreChangedNotificationMessage virtualSelectedParentBlueScoreChangedNotification = 1058; + BanRequestMessage banRequest = 1059; + BanResponseMessage banResponse = 1060; + UnbanRequestMessage unbanRequest = 1061; + UnbanResponseMessage unbanResponse = 1062; + GetInfoRequestMessage getInfoRequest = 1063; + GetInfoResponseMessage getInfoResponse = 1064; + StopNotifyingUtxosChangedRequestMessage stopNotifyingUtxosChangedRequest = 1065; + StopNotifyingUtxosChangedResponseMessage stopNotifyingUtxosChangedResponse = 1066; + NotifyPruningPointUTXOSetOverrideRequestMessage notifyPruningPointUTXOSetOverrideRequest = 1067; + NotifyPruningPointUTXOSetOverrideResponseMessage notifyPruningPointUTXOSetOverrideResponse = 1068; + PruningPointUTXOSetOverrideNotificationMessage pruningPointUTXOSetOverrideNotification = 1069; + StopNotifyingPruningPointUTXOSetOverrideRequestMessage stopNotifyingPruningPointUTXOSetOverrideRequest = 1070; + StopNotifyingPruningPointUTXOSetOverrideResponseMessage stopNotifyingPruningPointUTXOSetOverrideResponse = 1071; + EstimateNetworkHashesPerSecondRequestMessage estimateNetworkHashesPerSecondRequest = 1072; + EstimateNetworkHashesPerSecondResponseMessage estimateNetworkHashesPerSecondResponse = 1073; + NotifyVirtualDaaScoreChangedRequestMessage notifyVirtualDaaScoreChangedRequest = 1074; + NotifyVirtualDaaScoreChangedResponseMessage notifyVirtualDaaScoreChangedResponse = 1075; + VirtualDaaScoreChangedNotificationMessage virtualDaaScoreChangedNotification = 1076; + GetBalanceByAddressRequestMessage getBalanceByAddressRequest = 1077; + GetBalanceByAddressResponseMessage getBalanceByAddressResponse = 1078; + GetBalancesByAddressesRequestMessage getBalancesByAddressesRequest = 1079; + GetBalancesByAddressesResponseMessage getBalancesByAddressesResponse = 1080; + NotifyNewBlockTemplateRequestMessage notifyNewBlockTemplateRequest = 1081; + NotifyNewBlockTemplateResponseMessage notifyNewBlockTemplateResponse = 1082; + NewBlockTemplateNotificationMessage newBlockTemplateNotification = 1083; + GetMempoolEntriesByAddressesRequestMessage getMempoolEntriesByAddressesRequest = 1084; + GetMempoolEntriesByAddressesResponseMessage getMempoolEntriesByAddressesResponse = 1085; + GetCoinSupplyRequestMessage getCoinSupplyRequest = 1086; + GetCoinSupplyResponseMessage getCoinSupplyResponse= 1087; + } +} + +service P2P { + rpc MessageStream (stream SpectredMessage) returns (stream SpectredMessage) {} +} + +service RPC { + rpc MessageStream (stream SpectredMessage) returns (stream SpectredMessage) {} +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/messages_grpc.pb.go b/infrastructure/network/netadapter/server/grpcserver/protowire/messages_grpc.pb.go new file mode 100644 index 0000000..3416972 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/messages_grpc.pb.go @@ -0,0 +1,263 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.3 +// source: messages.proto + +package protowire + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + P2P_MessageStream_FullMethodName = "/protowire.P2P/MessageStream" +) + +// P2PClient is the client API for P2P service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type P2PClient interface { + MessageStream(ctx context.Context, opts ...grpc.CallOption) (P2P_MessageStreamClient, error) +} + +type p2PClient struct { + cc grpc.ClientConnInterface +} + +func NewP2PClient(cc grpc.ClientConnInterface) P2PClient { + return &p2PClient{cc} +} + +func (c *p2PClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (P2P_MessageStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &P2P_ServiceDesc.Streams[0], P2P_MessageStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &p2PMessageStreamClient{stream} + return x, nil +} + +type P2P_MessageStreamClient interface { + Send(*SpectredMessage) error + Recv() (*SpectredMessage, error) + grpc.ClientStream +} + +type p2PMessageStreamClient struct { + grpc.ClientStream +} + +func (x *p2PMessageStreamClient) Send(m *SpectredMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *p2PMessageStreamClient) Recv() (*SpectredMessage, error) { + m := new(SpectredMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// P2PServer is the server API for P2P service. +// All implementations must embed UnimplementedP2PServer +// for forward compatibility +type P2PServer interface { + MessageStream(P2P_MessageStreamServer) error + mustEmbedUnimplementedP2PServer() +} + +// UnimplementedP2PServer must be embedded to have forward compatible implementations. +type UnimplementedP2PServer struct { +} + +func (UnimplementedP2PServer) MessageStream(P2P_MessageStreamServer) error { + return status.Errorf(codes.Unimplemented, "method MessageStream not implemented") +} +func (UnimplementedP2PServer) mustEmbedUnimplementedP2PServer() {} + +// UnsafeP2PServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to P2PServer will +// result in compilation errors. +type UnsafeP2PServer interface { + mustEmbedUnimplementedP2PServer() +} + +func RegisterP2PServer(s grpc.ServiceRegistrar, srv P2PServer) { + s.RegisterService(&P2P_ServiceDesc, srv) +} + +func _P2P_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(P2PServer).MessageStream(&p2PMessageStreamServer{stream}) +} + +type P2P_MessageStreamServer interface { + Send(*SpectredMessage) error + Recv() (*SpectredMessage, error) + grpc.ServerStream +} + +type p2PMessageStreamServer struct { + grpc.ServerStream +} + +func (x *p2PMessageStreamServer) Send(m *SpectredMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *p2PMessageStreamServer) Recv() (*SpectredMessage, error) { + m := new(SpectredMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// P2P_ServiceDesc is the grpc.ServiceDesc for P2P service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var P2P_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "protowire.P2P", + HandlerType: (*P2PServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "MessageStream", + Handler: _P2P_MessageStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messages.proto", +} + +const ( + RPC_MessageStream_FullMethodName = "/protowire.RPC/MessageStream" +) + +// RPCClient is the client API for RPC service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RPCClient interface { + MessageStream(ctx context.Context, opts ...grpc.CallOption) (RPC_MessageStreamClient, error) +} + +type rPCClient struct { + cc grpc.ClientConnInterface +} + +func NewRPCClient(cc grpc.ClientConnInterface) RPCClient { + return &rPCClient{cc} +} + +func (c *rPCClient) MessageStream(ctx context.Context, opts ...grpc.CallOption) (RPC_MessageStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &RPC_ServiceDesc.Streams[0], RPC_MessageStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &rPCMessageStreamClient{stream} + return x, nil +} + +type RPC_MessageStreamClient interface { + Send(*SpectredMessage) error + Recv() (*SpectredMessage, error) + grpc.ClientStream +} + +type rPCMessageStreamClient struct { + grpc.ClientStream +} + +func (x *rPCMessageStreamClient) Send(m *SpectredMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *rPCMessageStreamClient) Recv() (*SpectredMessage, error) { + m := new(SpectredMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// RPCServer is the server API for RPC service. +// All implementations must embed UnimplementedRPCServer +// for forward compatibility +type RPCServer interface { + MessageStream(RPC_MessageStreamServer) error + mustEmbedUnimplementedRPCServer() +} + +// UnimplementedRPCServer must be embedded to have forward compatible implementations. +type UnimplementedRPCServer struct { +} + +func (UnimplementedRPCServer) MessageStream(RPC_MessageStreamServer) error { + return status.Errorf(codes.Unimplemented, "method MessageStream not implemented") +} +func (UnimplementedRPCServer) mustEmbedUnimplementedRPCServer() {} + +// UnsafeRPCServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RPCServer will +// result in compilation errors. +type UnsafeRPCServer interface { + mustEmbedUnimplementedRPCServer() +} + +func RegisterRPCServer(s grpc.ServiceRegistrar, srv RPCServer) { + s.RegisterService(&RPC_ServiceDesc, srv) +} + +func _RPC_MessageStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RPCServer).MessageStream(&rPCMessageStreamServer{stream}) +} + +type RPC_MessageStreamServer interface { + Send(*SpectredMessage) error + Recv() (*SpectredMessage, error) + grpc.ServerStream +} + +type rPCMessageStreamServer struct { + grpc.ServerStream +} + +func (x *rPCMessageStreamServer) Send(m *SpectredMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *rPCMessageStreamServer) Recv() (*SpectredMessage, error) { + m := new(SpectredMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// RPC_ServiceDesc is the grpc.ServiceDesc for RPC service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RPC_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "protowire.RPC", + HandlerType: (*RPCServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "MessageStream", + Handler: _RPC_MessageStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messages.proto", +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.pb.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.pb.go new file mode 100644 index 0000000..be95b36 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.pb.go @@ -0,0 +1,4496 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: p2p.proto + +package protowire + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RequestAddressesMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeAllSubnetworks bool `protobuf:"varint,1,opt,name=includeAllSubnetworks,proto3" json:"includeAllSubnetworks,omitempty"` + SubnetworkId *SubnetworkId `protobuf:"bytes,2,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` +} + +func (x *RequestAddressesMessage) Reset() { + *x = RequestAddressesMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestAddressesMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestAddressesMessage) ProtoMessage() {} + +func (x *RequestAddressesMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestAddressesMessage.ProtoReflect.Descriptor instead. +func (*RequestAddressesMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{0} +} + +func (x *RequestAddressesMessage) GetIncludeAllSubnetworks() bool { + if x != nil { + return x.IncludeAllSubnetworks + } + return false +} + +func (x *RequestAddressesMessage) GetSubnetworkId() *SubnetworkId { + if x != nil { + return x.SubnetworkId + } + return nil +} + +type AddressesMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AddressList []*NetAddress `protobuf:"bytes,1,rep,name=addressList,proto3" json:"addressList,omitempty"` +} + +func (x *AddressesMessage) Reset() { + *x = AddressesMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddressesMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddressesMessage) ProtoMessage() {} + +func (x *AddressesMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddressesMessage.ProtoReflect.Descriptor instead. +func (*AddressesMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{1} +} + +func (x *AddressesMessage) GetAddressList() []*NetAddress { + if x != nil { + return x.AddressList + } + return nil +} + +type NetAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Ip []byte `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` +} + +func (x *NetAddress) Reset() { + *x = NetAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetAddress) ProtoMessage() {} + +func (x *NetAddress) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetAddress.ProtoReflect.Descriptor instead. +func (*NetAddress) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{2} +} + +func (x *NetAddress) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *NetAddress) GetIp() []byte { + if x != nil { + return x.Ip + } + return nil +} + +func (x *NetAddress) GetPort() uint32 { + if x != nil { + return x.Port + } + return 0 +} + +type SubnetworkId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *SubnetworkId) Reset() { + *x = SubnetworkId{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubnetworkId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubnetworkId) ProtoMessage() {} + +func (x *SubnetworkId) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubnetworkId.ProtoReflect.Descriptor instead. +func (*SubnetworkId) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{3} +} + +func (x *SubnetworkId) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type TransactionMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Inputs []*TransactionInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*TransactionOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + LockTime uint64 `protobuf:"varint,4,opt,name=lockTime,proto3" json:"lockTime,omitempty"` + SubnetworkId *SubnetworkId `protobuf:"bytes,5,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` + Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"` + Payload []byte `protobuf:"bytes,8,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *TransactionMessage) Reset() { + *x = TransactionMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionMessage) ProtoMessage() {} + +func (x *TransactionMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionMessage.ProtoReflect.Descriptor instead. +func (*TransactionMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{4} +} + +func (x *TransactionMessage) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *TransactionMessage) GetInputs() []*TransactionInput { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *TransactionMessage) GetOutputs() []*TransactionOutput { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *TransactionMessage) GetLockTime() uint64 { + if x != nil { + return x.LockTime + } + return 0 +} + +func (x *TransactionMessage) GetSubnetworkId() *SubnetworkId { + if x != nil { + return x.SubnetworkId + } + return nil +} + +func (x *TransactionMessage) GetGas() uint64 { + if x != nil { + return x.Gas + } + return 0 +} + +func (x *TransactionMessage) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type TransactionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousOutpoint *Outpoint `protobuf:"bytes,1,opt,name=previousOutpoint,proto3" json:"previousOutpoint,omitempty"` + SignatureScript []byte `protobuf:"bytes,2,opt,name=signatureScript,proto3" json:"signatureScript,omitempty"` + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` + SigOpCount uint32 `protobuf:"varint,4,opt,name=sigOpCount,proto3" json:"sigOpCount,omitempty"` +} + +func (x *TransactionInput) Reset() { + *x = TransactionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionInput) ProtoMessage() {} + +func (x *TransactionInput) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionInput.ProtoReflect.Descriptor instead. +func (*TransactionInput) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{5} +} + +func (x *TransactionInput) GetPreviousOutpoint() *Outpoint { + if x != nil { + return x.PreviousOutpoint + } + return nil +} + +func (x *TransactionInput) GetSignatureScript() []byte { + if x != nil { + return x.SignatureScript + } + return nil +} + +func (x *TransactionInput) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *TransactionInput) GetSigOpCount() uint32 { + if x != nil { + return x.SigOpCount + } + return 0 +} + +type Outpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId *TransactionId `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *Outpoint) Reset() { + *x = Outpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Outpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Outpoint) ProtoMessage() {} + +func (x *Outpoint) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Outpoint.ProtoReflect.Descriptor instead. +func (*Outpoint) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{6} +} + +func (x *Outpoint) GetTransactionId() *TransactionId { + if x != nil { + return x.TransactionId + } + return nil +} + +func (x *Outpoint) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +type TransactionId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *TransactionId) Reset() { + *x = TransactionId{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionId) ProtoMessage() {} + +func (x *TransactionId) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionId.ProtoReflect.Descriptor instead. +func (*TransactionId) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{7} +} + +func (x *TransactionId) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type ScriptPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Script []byte `protobuf:"bytes,1,opt,name=script,proto3" json:"script,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ScriptPublicKey) Reset() { + *x = ScriptPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScriptPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScriptPublicKey) ProtoMessage() {} + +func (x *ScriptPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScriptPublicKey.ProtoReflect.Descriptor instead. +func (*ScriptPublicKey) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{8} +} + +func (x *ScriptPublicKey) GetScript() []byte { + if x != nil { + return x.Script + } + return nil +} + +func (x *ScriptPublicKey) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +type TransactionOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + ScriptPublicKey *ScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` +} + +func (x *TransactionOutput) Reset() { + *x = TransactionOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionOutput) ProtoMessage() {} + +func (x *TransactionOutput) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionOutput.ProtoReflect.Descriptor instead. +func (*TransactionOutput) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{9} +} + +func (x *TransactionOutput) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *TransactionOutput) GetScriptPublicKey() *ScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +type BlockMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *BlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Transactions []*TransactionMessage `protobuf:"bytes,2,rep,name=transactions,proto3" json:"transactions,omitempty"` +} + +func (x *BlockMessage) Reset() { + *x = BlockMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockMessage) ProtoMessage() {} + +func (x *BlockMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockMessage.ProtoReflect.Descriptor instead. +func (*BlockMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{10} +} + +func (x *BlockMessage) GetHeader() *BlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *BlockMessage) GetTransactions() []*TransactionMessage { + if x != nil { + return x.Transactions + } + return nil +} + +type BlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Parents []*BlockLevelParents `protobuf:"bytes,12,rep,name=parents,proto3" json:"parents,omitempty"` + HashMerkleRoot *Hash `protobuf:"bytes,3,opt,name=hashMerkleRoot,proto3" json:"hashMerkleRoot,omitempty"` + AcceptedIdMerkleRoot *Hash `protobuf:"bytes,4,opt,name=acceptedIdMerkleRoot,proto3" json:"acceptedIdMerkleRoot,omitempty"` + UtxoCommitment *Hash `protobuf:"bytes,5,opt,name=utxoCommitment,proto3" json:"utxoCommitment,omitempty"` + Timestamp int64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Bits uint32 `protobuf:"varint,7,opt,name=bits,proto3" json:"bits,omitempty"` + Nonce uint64 `protobuf:"varint,8,opt,name=nonce,proto3" json:"nonce,omitempty"` + DaaScore uint64 `protobuf:"varint,9,opt,name=daaScore,proto3" json:"daaScore,omitempty"` + BlueWork []byte `protobuf:"bytes,10,opt,name=blueWork,proto3" json:"blueWork,omitempty"` + PruningPoint *Hash `protobuf:"bytes,14,opt,name=pruningPoint,proto3" json:"pruningPoint,omitempty"` + BlueScore uint64 `protobuf:"varint,13,opt,name=blueScore,proto3" json:"blueScore,omitempty"` +} + +func (x *BlockHeader) Reset() { + *x = BlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockHeader) ProtoMessage() {} + +func (x *BlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockHeader.ProtoReflect.Descriptor instead. +func (*BlockHeader) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{11} +} + +func (x *BlockHeader) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *BlockHeader) GetParents() []*BlockLevelParents { + if x != nil { + return x.Parents + } + return nil +} + +func (x *BlockHeader) GetHashMerkleRoot() *Hash { + if x != nil { + return x.HashMerkleRoot + } + return nil +} + +func (x *BlockHeader) GetAcceptedIdMerkleRoot() *Hash { + if x != nil { + return x.AcceptedIdMerkleRoot + } + return nil +} + +func (x *BlockHeader) GetUtxoCommitment() *Hash { + if x != nil { + return x.UtxoCommitment + } + return nil +} + +func (x *BlockHeader) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *BlockHeader) GetBits() uint32 { + if x != nil { + return x.Bits + } + return 0 +} + +func (x *BlockHeader) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +func (x *BlockHeader) GetDaaScore() uint64 { + if x != nil { + return x.DaaScore + } + return 0 +} + +func (x *BlockHeader) GetBlueWork() []byte { + if x != nil { + return x.BlueWork + } + return nil +} + +func (x *BlockHeader) GetPruningPoint() *Hash { + if x != nil { + return x.PruningPoint + } + return nil +} + +func (x *BlockHeader) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +type BlockLevelParents struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentHashes []*Hash `protobuf:"bytes,1,rep,name=parentHashes,proto3" json:"parentHashes,omitempty"` +} + +func (x *BlockLevelParents) Reset() { + *x = BlockLevelParents{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockLevelParents) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockLevelParents) ProtoMessage() {} + +func (x *BlockLevelParents) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockLevelParents.ProtoReflect.Descriptor instead. +func (*BlockLevelParents) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{12} +} + +func (x *BlockLevelParents) GetParentHashes() []*Hash { + if x != nil { + return x.ParentHashes + } + return nil +} + +type Hash struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` +} + +func (x *Hash) Reset() { + *x = Hash{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Hash) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Hash) ProtoMessage() {} + +func (x *Hash) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Hash.ProtoReflect.Descriptor instead. +func (*Hash) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{13} +} + +func (x *Hash) GetBytes() []byte { + if x != nil { + return x.Bytes + } + return nil +} + +type RequestBlockLocatorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HighHash *Hash `protobuf:"bytes,1,opt,name=highHash,proto3" json:"highHash,omitempty"` + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *RequestBlockLocatorMessage) Reset() { + *x = RequestBlockLocatorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBlockLocatorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBlockLocatorMessage) ProtoMessage() {} + +func (x *RequestBlockLocatorMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBlockLocatorMessage.ProtoReflect.Descriptor instead. +func (*RequestBlockLocatorMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{14} +} + +func (x *RequestBlockLocatorMessage) GetHighHash() *Hash { + if x != nil { + return x.HighHash + } + return nil +} + +func (x *RequestBlockLocatorMessage) GetLimit() uint32 { + if x != nil { + return x.Limit + } + return 0 +} + +type BlockLocatorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hashes []*Hash `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` +} + +func (x *BlockLocatorMessage) Reset() { + *x = BlockLocatorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockLocatorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockLocatorMessage) ProtoMessage() {} + +func (x *BlockLocatorMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockLocatorMessage.ProtoReflect.Descriptor instead. +func (*BlockLocatorMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{15} +} + +func (x *BlockLocatorMessage) GetHashes() []*Hash { + if x != nil { + return x.Hashes + } + return nil +} + +type RequestHeadersMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LowHash *Hash `protobuf:"bytes,1,opt,name=lowHash,proto3" json:"lowHash,omitempty"` + HighHash *Hash `protobuf:"bytes,2,opt,name=highHash,proto3" json:"highHash,omitempty"` +} + +func (x *RequestHeadersMessage) Reset() { + *x = RequestHeadersMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestHeadersMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestHeadersMessage) ProtoMessage() {} + +func (x *RequestHeadersMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestHeadersMessage.ProtoReflect.Descriptor instead. +func (*RequestHeadersMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{16} +} + +func (x *RequestHeadersMessage) GetLowHash() *Hash { + if x != nil { + return x.LowHash + } + return nil +} + +func (x *RequestHeadersMessage) GetHighHash() *Hash { + if x != nil { + return x.HighHash + } + return nil +} + +type RequestNextHeadersMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RequestNextHeadersMessage) Reset() { + *x = RequestNextHeadersMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestNextHeadersMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestNextHeadersMessage) ProtoMessage() {} + +func (x *RequestNextHeadersMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestNextHeadersMessage.ProtoReflect.Descriptor instead. +func (*RequestNextHeadersMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{17} +} + +type DoneHeadersMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DoneHeadersMessage) Reset() { + *x = DoneHeadersMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoneHeadersMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoneHeadersMessage) ProtoMessage() {} + +func (x *DoneHeadersMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoneHeadersMessage.ProtoReflect.Descriptor instead. +func (*DoneHeadersMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{18} +} + +type RequestRelayBlocksMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hashes []*Hash `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` +} + +func (x *RequestRelayBlocksMessage) Reset() { + *x = RequestRelayBlocksMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestRelayBlocksMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestRelayBlocksMessage) ProtoMessage() {} + +func (x *RequestRelayBlocksMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestRelayBlocksMessage.ProtoReflect.Descriptor instead. +func (*RequestRelayBlocksMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{19} +} + +func (x *RequestRelayBlocksMessage) GetHashes() []*Hash { + if x != nil { + return x.Hashes + } + return nil +} + +type RequestTransactionsMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []*TransactionId `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *RequestTransactionsMessage) Reset() { + *x = RequestTransactionsMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestTransactionsMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestTransactionsMessage) ProtoMessage() {} + +func (x *RequestTransactionsMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestTransactionsMessage.ProtoReflect.Descriptor instead. +func (*RequestTransactionsMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{20} +} + +func (x *RequestTransactionsMessage) GetIds() []*TransactionId { + if x != nil { + return x.Ids + } + return nil +} + +type TransactionNotFoundMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id *TransactionId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *TransactionNotFoundMessage) Reset() { + *x = TransactionNotFoundMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransactionNotFoundMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransactionNotFoundMessage) ProtoMessage() {} + +func (x *TransactionNotFoundMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransactionNotFoundMessage.ProtoReflect.Descriptor instead. +func (*TransactionNotFoundMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{21} +} + +func (x *TransactionNotFoundMessage) GetId() *TransactionId { + if x != nil { + return x.Id + } + return nil +} + +type InvRelayBlockMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash *Hash `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (x *InvRelayBlockMessage) Reset() { + *x = InvRelayBlockMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvRelayBlockMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvRelayBlockMessage) ProtoMessage() {} + +func (x *InvRelayBlockMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvRelayBlockMessage.ProtoReflect.Descriptor instead. +func (*InvRelayBlockMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{22} +} + +func (x *InvRelayBlockMessage) GetHash() *Hash { + if x != nil { + return x.Hash + } + return nil +} + +type InvTransactionsMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []*TransactionId `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *InvTransactionsMessage) Reset() { + *x = InvTransactionsMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvTransactionsMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvTransactionsMessage) ProtoMessage() {} + +func (x *InvTransactionsMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvTransactionsMessage.ProtoReflect.Descriptor instead. +func (*InvTransactionsMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{23} +} + +func (x *InvTransactionsMessage) GetIds() []*TransactionId { + if x != nil { + return x.Ids + } + return nil +} + +type PingMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *PingMessage) Reset() { + *x = PingMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingMessage) ProtoMessage() {} + +func (x *PingMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingMessage.ProtoReflect.Descriptor instead. +func (*PingMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{24} +} + +func (x *PingMessage) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +type PongMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (x *PongMessage) Reset() { + *x = PongMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PongMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PongMessage) ProtoMessage() {} + +func (x *PongMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PongMessage.ProtoReflect.Descriptor instead. +func (*PongMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{25} +} + +func (x *PongMessage) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +type VerackMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VerackMessage) Reset() { + *x = VerackMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VerackMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerackMessage) ProtoMessage() {} + +func (x *VerackMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerackMessage.ProtoReflect.Descriptor instead. +func (*VerackMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{26} +} + +type VersionMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProtocolVersion uint32 `protobuf:"varint,1,opt,name=protocolVersion,proto3" json:"protocolVersion,omitempty"` + Services uint64 `protobuf:"varint,2,opt,name=services,proto3" json:"services,omitempty"` + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Address *NetAddress `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` + Id []byte `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` + UserAgent string `protobuf:"bytes,6,opt,name=userAgent,proto3" json:"userAgent,omitempty"` + DisableRelayTx bool `protobuf:"varint,8,opt,name=disableRelayTx,proto3" json:"disableRelayTx,omitempty"` + SubnetworkId *SubnetworkId `protobuf:"bytes,9,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` + Network string `protobuf:"bytes,10,opt,name=network,proto3" json:"network,omitempty"` +} + +func (x *VersionMessage) Reset() { + *x = VersionMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionMessage) ProtoMessage() {} + +func (x *VersionMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionMessage.ProtoReflect.Descriptor instead. +func (*VersionMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{27} +} + +func (x *VersionMessage) GetProtocolVersion() uint32 { + if x != nil { + return x.ProtocolVersion + } + return 0 +} + +func (x *VersionMessage) GetServices() uint64 { + if x != nil { + return x.Services + } + return 0 +} + +func (x *VersionMessage) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *VersionMessage) GetAddress() *NetAddress { + if x != nil { + return x.Address + } + return nil +} + +func (x *VersionMessage) GetId() []byte { + if x != nil { + return x.Id + } + return nil +} + +func (x *VersionMessage) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *VersionMessage) GetDisableRelayTx() bool { + if x != nil { + return x.DisableRelayTx + } + return false +} + +func (x *VersionMessage) GetSubnetworkId() *SubnetworkId { + if x != nil { + return x.SubnetworkId + } + return nil +} + +func (x *VersionMessage) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +type RejectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *RejectMessage) Reset() { + *x = RejectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RejectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RejectMessage) ProtoMessage() {} + +func (x *RejectMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RejectMessage.ProtoReflect.Descriptor instead. +func (*RejectMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{28} +} + +func (x *RejectMessage) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type RequestPruningPointUTXOSetMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PruningPointHash *Hash `protobuf:"bytes,1,opt,name=pruningPointHash,proto3" json:"pruningPointHash,omitempty"` +} + +func (x *RequestPruningPointUTXOSetMessage) Reset() { + *x = RequestPruningPointUTXOSetMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestPruningPointUTXOSetMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestPruningPointUTXOSetMessage) ProtoMessage() {} + +func (x *RequestPruningPointUTXOSetMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestPruningPointUTXOSetMessage.ProtoReflect.Descriptor instead. +func (*RequestPruningPointUTXOSetMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{29} +} + +func (x *RequestPruningPointUTXOSetMessage) GetPruningPointHash() *Hash { + if x != nil { + return x.PruningPointHash + } + return nil +} + +type PruningPointUtxoSetChunkMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OutpointAndUtxoEntryPairs []*OutpointAndUtxoEntryPair `protobuf:"bytes,1,rep,name=outpointAndUtxoEntryPairs,proto3" json:"outpointAndUtxoEntryPairs,omitempty"` +} + +func (x *PruningPointUtxoSetChunkMessage) Reset() { + *x = PruningPointUtxoSetChunkMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PruningPointUtxoSetChunkMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PruningPointUtxoSetChunkMessage) ProtoMessage() {} + +func (x *PruningPointUtxoSetChunkMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PruningPointUtxoSetChunkMessage.ProtoReflect.Descriptor instead. +func (*PruningPointUtxoSetChunkMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{30} +} + +func (x *PruningPointUtxoSetChunkMessage) GetOutpointAndUtxoEntryPairs() []*OutpointAndUtxoEntryPair { + if x != nil { + return x.OutpointAndUtxoEntryPairs + } + return nil +} + +type OutpointAndUtxoEntryPair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Outpoint *Outpoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` + UtxoEntry *UtxoEntry `protobuf:"bytes,2,opt,name=utxoEntry,proto3" json:"utxoEntry,omitempty"` +} + +func (x *OutpointAndUtxoEntryPair) Reset() { + *x = OutpointAndUtxoEntryPair{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OutpointAndUtxoEntryPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutpointAndUtxoEntryPair) ProtoMessage() {} + +func (x *OutpointAndUtxoEntryPair) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutpointAndUtxoEntryPair.ProtoReflect.Descriptor instead. +func (*OutpointAndUtxoEntryPair) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{31} +} + +func (x *OutpointAndUtxoEntryPair) GetOutpoint() *Outpoint { + if x != nil { + return x.Outpoint + } + return nil +} + +func (x *OutpointAndUtxoEntryPair) GetUtxoEntry() *UtxoEntry { + if x != nil { + return x.UtxoEntry + } + return nil +} + +type UtxoEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + ScriptPublicKey *ScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` + BlockDaaScore uint64 `protobuf:"varint,3,opt,name=blockDaaScore,proto3" json:"blockDaaScore,omitempty"` + IsCoinbase bool `protobuf:"varint,4,opt,name=isCoinbase,proto3" json:"isCoinbase,omitempty"` +} + +func (x *UtxoEntry) Reset() { + *x = UtxoEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UtxoEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UtxoEntry) ProtoMessage() {} + +func (x *UtxoEntry) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UtxoEntry.ProtoReflect.Descriptor instead. +func (*UtxoEntry) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{32} +} + +func (x *UtxoEntry) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *UtxoEntry) GetScriptPublicKey() *ScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +func (x *UtxoEntry) GetBlockDaaScore() uint64 { + if x != nil { + return x.BlockDaaScore + } + return 0 +} + +func (x *UtxoEntry) GetIsCoinbase() bool { + if x != nil { + return x.IsCoinbase + } + return false +} + +type RequestNextPruningPointUtxoSetChunkMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RequestNextPruningPointUtxoSetChunkMessage) Reset() { + *x = RequestNextPruningPointUtxoSetChunkMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestNextPruningPointUtxoSetChunkMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestNextPruningPointUtxoSetChunkMessage) ProtoMessage() {} + +func (x *RequestNextPruningPointUtxoSetChunkMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestNextPruningPointUtxoSetChunkMessage.ProtoReflect.Descriptor instead. +func (*RequestNextPruningPointUtxoSetChunkMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{33} +} + +type DonePruningPointUtxoSetChunksMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DonePruningPointUtxoSetChunksMessage) Reset() { + *x = DonePruningPointUtxoSetChunksMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DonePruningPointUtxoSetChunksMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DonePruningPointUtxoSetChunksMessage) ProtoMessage() {} + +func (x *DonePruningPointUtxoSetChunksMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DonePruningPointUtxoSetChunksMessage.ProtoReflect.Descriptor instead. +func (*DonePruningPointUtxoSetChunksMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{34} +} + +type RequestIBDBlocksMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hashes []*Hash `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"` +} + +func (x *RequestIBDBlocksMessage) Reset() { + *x = RequestIBDBlocksMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestIBDBlocksMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestIBDBlocksMessage) ProtoMessage() {} + +func (x *RequestIBDBlocksMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestIBDBlocksMessage.ProtoReflect.Descriptor instead. +func (*RequestIBDBlocksMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{35} +} + +func (x *RequestIBDBlocksMessage) GetHashes() []*Hash { + if x != nil { + return x.Hashes + } + return nil +} + +type UnexpectedPruningPointMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UnexpectedPruningPointMessage) Reset() { + *x = UnexpectedPruningPointMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnexpectedPruningPointMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnexpectedPruningPointMessage) ProtoMessage() {} + +func (x *UnexpectedPruningPointMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnexpectedPruningPointMessage.ProtoReflect.Descriptor instead. +func (*UnexpectedPruningPointMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{36} +} + +type IbdBlockLocatorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TargetHash *Hash `protobuf:"bytes,1,opt,name=targetHash,proto3" json:"targetHash,omitempty"` + BlockLocatorHashes []*Hash `protobuf:"bytes,2,rep,name=blockLocatorHashes,proto3" json:"blockLocatorHashes,omitempty"` +} + +func (x *IbdBlockLocatorMessage) Reset() { + *x = IbdBlockLocatorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IbdBlockLocatorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IbdBlockLocatorMessage) ProtoMessage() {} + +func (x *IbdBlockLocatorMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IbdBlockLocatorMessage.ProtoReflect.Descriptor instead. +func (*IbdBlockLocatorMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{37} +} + +func (x *IbdBlockLocatorMessage) GetTargetHash() *Hash { + if x != nil { + return x.TargetHash + } + return nil +} + +func (x *IbdBlockLocatorMessage) GetBlockLocatorHashes() []*Hash { + if x != nil { + return x.BlockLocatorHashes + } + return nil +} + +type RequestIBDChainBlockLocatorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LowHash *Hash `protobuf:"bytes,1,opt,name=lowHash,proto3" json:"lowHash,omitempty"` + HighHash *Hash `protobuf:"bytes,2,opt,name=highHash,proto3" json:"highHash,omitempty"` +} + +func (x *RequestIBDChainBlockLocatorMessage) Reset() { + *x = RequestIBDChainBlockLocatorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestIBDChainBlockLocatorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestIBDChainBlockLocatorMessage) ProtoMessage() {} + +func (x *RequestIBDChainBlockLocatorMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestIBDChainBlockLocatorMessage.ProtoReflect.Descriptor instead. +func (*RequestIBDChainBlockLocatorMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{38} +} + +func (x *RequestIBDChainBlockLocatorMessage) GetLowHash() *Hash { + if x != nil { + return x.LowHash + } + return nil +} + +func (x *RequestIBDChainBlockLocatorMessage) GetHighHash() *Hash { + if x != nil { + return x.HighHash + } + return nil +} + +type IbdChainBlockLocatorMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockLocatorHashes []*Hash `protobuf:"bytes,1,rep,name=blockLocatorHashes,proto3" json:"blockLocatorHashes,omitempty"` +} + +func (x *IbdChainBlockLocatorMessage) Reset() { + *x = IbdChainBlockLocatorMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IbdChainBlockLocatorMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IbdChainBlockLocatorMessage) ProtoMessage() {} + +func (x *IbdChainBlockLocatorMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IbdChainBlockLocatorMessage.ProtoReflect.Descriptor instead. +func (*IbdChainBlockLocatorMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{39} +} + +func (x *IbdChainBlockLocatorMessage) GetBlockLocatorHashes() []*Hash { + if x != nil { + return x.BlockLocatorHashes + } + return nil +} + +type RequestAnticoneMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockHash *Hash `protobuf:"bytes,1,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + ContextHash *Hash `protobuf:"bytes,2,opt,name=contextHash,proto3" json:"contextHash,omitempty"` +} + +func (x *RequestAnticoneMessage) Reset() { + *x = RequestAnticoneMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestAnticoneMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestAnticoneMessage) ProtoMessage() {} + +func (x *RequestAnticoneMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestAnticoneMessage.ProtoReflect.Descriptor instead. +func (*RequestAnticoneMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{40} +} + +func (x *RequestAnticoneMessage) GetBlockHash() *Hash { + if x != nil { + return x.BlockHash + } + return nil +} + +func (x *RequestAnticoneMessage) GetContextHash() *Hash { + if x != nil { + return x.ContextHash + } + return nil +} + +type IbdBlockLocatorHighestHashMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HighestHash *Hash `protobuf:"bytes,1,opt,name=highestHash,proto3" json:"highestHash,omitempty"` +} + +func (x *IbdBlockLocatorHighestHashMessage) Reset() { + *x = IbdBlockLocatorHighestHashMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IbdBlockLocatorHighestHashMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IbdBlockLocatorHighestHashMessage) ProtoMessage() {} + +func (x *IbdBlockLocatorHighestHashMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IbdBlockLocatorHighestHashMessage.ProtoReflect.Descriptor instead. +func (*IbdBlockLocatorHighestHashMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{41} +} + +func (x *IbdBlockLocatorHighestHashMessage) GetHighestHash() *Hash { + if x != nil { + return x.HighestHash + } + return nil +} + +type IbdBlockLocatorHighestHashNotFoundMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *IbdBlockLocatorHighestHashNotFoundMessage) Reset() { + *x = IbdBlockLocatorHighestHashNotFoundMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IbdBlockLocatorHighestHashNotFoundMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IbdBlockLocatorHighestHashNotFoundMessage) ProtoMessage() {} + +func (x *IbdBlockLocatorHighestHashNotFoundMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IbdBlockLocatorHighestHashNotFoundMessage.ProtoReflect.Descriptor instead. +func (*IbdBlockLocatorHighestHashNotFoundMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{42} +} + +type BlockHeadersMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockHeaders []*BlockHeader `protobuf:"bytes,1,rep,name=blockHeaders,proto3" json:"blockHeaders,omitempty"` +} + +func (x *BlockHeadersMessage) Reset() { + *x = BlockHeadersMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockHeadersMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockHeadersMessage) ProtoMessage() {} + +func (x *BlockHeadersMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockHeadersMessage.ProtoReflect.Descriptor instead. +func (*BlockHeadersMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{43} +} + +func (x *BlockHeadersMessage) GetBlockHeaders() []*BlockHeader { + if x != nil { + return x.BlockHeaders + } + return nil +} + +type RequestPruningPointAndItsAnticoneMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RequestPruningPointAndItsAnticoneMessage) Reset() { + *x = RequestPruningPointAndItsAnticoneMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestPruningPointAndItsAnticoneMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestPruningPointAndItsAnticoneMessage) ProtoMessage() {} + +func (x *RequestPruningPointAndItsAnticoneMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestPruningPointAndItsAnticoneMessage.ProtoReflect.Descriptor instead. +func (*RequestPruningPointAndItsAnticoneMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{44} +} + +type RequestNextPruningPointAndItsAnticoneBlocksMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RequestNextPruningPointAndItsAnticoneBlocksMessage) Reset() { + *x = RequestNextPruningPointAndItsAnticoneBlocksMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestNextPruningPointAndItsAnticoneBlocksMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestNextPruningPointAndItsAnticoneBlocksMessage) ProtoMessage() {} + +func (x *RequestNextPruningPointAndItsAnticoneBlocksMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestNextPruningPointAndItsAnticoneBlocksMessage.ProtoReflect.Descriptor instead. +func (*RequestNextPruningPointAndItsAnticoneBlocksMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{45} +} + +type BlockWithTrustedDataMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *BlockMessage `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + DaaScore uint64 `protobuf:"varint,2,opt,name=daaScore,proto3" json:"daaScore,omitempty"` + DaaWindow []*DaaBlock `protobuf:"bytes,3,rep,name=daaWindow,proto3" json:"daaWindow,omitempty"` + GhostdagData []*BlockGhostdagDataHashPair `protobuf:"bytes,4,rep,name=ghostdagData,proto3" json:"ghostdagData,omitempty"` +} + +func (x *BlockWithTrustedDataMessage) Reset() { + *x = BlockWithTrustedDataMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockWithTrustedDataMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockWithTrustedDataMessage) ProtoMessage() {} + +func (x *BlockWithTrustedDataMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockWithTrustedDataMessage.ProtoReflect.Descriptor instead. +func (*BlockWithTrustedDataMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{46} +} + +func (x *BlockWithTrustedDataMessage) GetBlock() *BlockMessage { + if x != nil { + return x.Block + } + return nil +} + +func (x *BlockWithTrustedDataMessage) GetDaaScore() uint64 { + if x != nil { + return x.DaaScore + } + return 0 +} + +func (x *BlockWithTrustedDataMessage) GetDaaWindow() []*DaaBlock { + if x != nil { + return x.DaaWindow + } + return nil +} + +func (x *BlockWithTrustedDataMessage) GetGhostdagData() []*BlockGhostdagDataHashPair { + if x != nil { + return x.GhostdagData + } + return nil +} + +type DaaBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *BlockMessage `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` + GhostdagData *GhostdagData `protobuf:"bytes,2,opt,name=ghostdagData,proto3" json:"ghostdagData,omitempty"` +} + +func (x *DaaBlock) Reset() { + *x = DaaBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DaaBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DaaBlock) ProtoMessage() {} + +func (x *DaaBlock) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DaaBlock.ProtoReflect.Descriptor instead. +func (*DaaBlock) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{47} +} + +func (x *DaaBlock) GetBlock() *BlockMessage { + if x != nil { + return x.Block + } + return nil +} + +func (x *DaaBlock) GetGhostdagData() *GhostdagData { + if x != nil { + return x.GhostdagData + } + return nil +} + +type DaaBlockV4 struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *BlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + GhostdagData *GhostdagData `protobuf:"bytes,2,opt,name=ghostdagData,proto3" json:"ghostdagData,omitempty"` +} + +func (x *DaaBlockV4) Reset() { + *x = DaaBlockV4{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DaaBlockV4) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DaaBlockV4) ProtoMessage() {} + +func (x *DaaBlockV4) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DaaBlockV4.ProtoReflect.Descriptor instead. +func (*DaaBlockV4) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{48} +} + +func (x *DaaBlockV4) GetHeader() *BlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *DaaBlockV4) GetGhostdagData() *GhostdagData { + if x != nil { + return x.GhostdagData + } + return nil +} + +type BlockGhostdagDataHashPair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash *Hash `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + GhostdagData *GhostdagData `protobuf:"bytes,2,opt,name=ghostdagData,proto3" json:"ghostdagData,omitempty"` +} + +func (x *BlockGhostdagDataHashPair) Reset() { + *x = BlockGhostdagDataHashPair{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockGhostdagDataHashPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockGhostdagDataHashPair) ProtoMessage() {} + +func (x *BlockGhostdagDataHashPair) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockGhostdagDataHashPair.ProtoReflect.Descriptor instead. +func (*BlockGhostdagDataHashPair) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{49} +} + +func (x *BlockGhostdagDataHashPair) GetHash() *Hash { + if x != nil { + return x.Hash + } + return nil +} + +func (x *BlockGhostdagDataHashPair) GetGhostdagData() *GhostdagData { + if x != nil { + return x.GhostdagData + } + return nil +} + +type GhostdagData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlueScore uint64 `protobuf:"varint,1,opt,name=blueScore,proto3" json:"blueScore,omitempty"` + BlueWork []byte `protobuf:"bytes,2,opt,name=blueWork,proto3" json:"blueWork,omitempty"` + SelectedParent *Hash `protobuf:"bytes,3,opt,name=selectedParent,proto3" json:"selectedParent,omitempty"` + MergeSetBlues []*Hash `protobuf:"bytes,4,rep,name=mergeSetBlues,proto3" json:"mergeSetBlues,omitempty"` + MergeSetReds []*Hash `protobuf:"bytes,5,rep,name=mergeSetReds,proto3" json:"mergeSetReds,omitempty"` + BluesAnticoneSizes []*BluesAnticoneSizes `protobuf:"bytes,6,rep,name=bluesAnticoneSizes,proto3" json:"bluesAnticoneSizes,omitempty"` +} + +func (x *GhostdagData) Reset() { + *x = GhostdagData{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GhostdagData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GhostdagData) ProtoMessage() {} + +func (x *GhostdagData) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GhostdagData.ProtoReflect.Descriptor instead. +func (*GhostdagData) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{50} +} + +func (x *GhostdagData) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +func (x *GhostdagData) GetBlueWork() []byte { + if x != nil { + return x.BlueWork + } + return nil +} + +func (x *GhostdagData) GetSelectedParent() *Hash { + if x != nil { + return x.SelectedParent + } + return nil +} + +func (x *GhostdagData) GetMergeSetBlues() []*Hash { + if x != nil { + return x.MergeSetBlues + } + return nil +} + +func (x *GhostdagData) GetMergeSetReds() []*Hash { + if x != nil { + return x.MergeSetReds + } + return nil +} + +func (x *GhostdagData) GetBluesAnticoneSizes() []*BluesAnticoneSizes { + if x != nil { + return x.BluesAnticoneSizes + } + return nil +} + +type BluesAnticoneSizes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlueHash *Hash `protobuf:"bytes,1,opt,name=blueHash,proto3" json:"blueHash,omitempty"` + AnticoneSize uint32 `protobuf:"varint,2,opt,name=anticoneSize,proto3" json:"anticoneSize,omitempty"` +} + +func (x *BluesAnticoneSizes) Reset() { + *x = BluesAnticoneSizes{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BluesAnticoneSizes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BluesAnticoneSizes) ProtoMessage() {} + +func (x *BluesAnticoneSizes) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BluesAnticoneSizes.ProtoReflect.Descriptor instead. +func (*BluesAnticoneSizes) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{51} +} + +func (x *BluesAnticoneSizes) GetBlueHash() *Hash { + if x != nil { + return x.BlueHash + } + return nil +} + +func (x *BluesAnticoneSizes) GetAnticoneSize() uint32 { + if x != nil { + return x.AnticoneSize + } + return 0 +} + +type DoneBlocksWithTrustedDataMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DoneBlocksWithTrustedDataMessage) Reset() { + *x = DoneBlocksWithTrustedDataMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DoneBlocksWithTrustedDataMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoneBlocksWithTrustedDataMessage) ProtoMessage() {} + +func (x *DoneBlocksWithTrustedDataMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoneBlocksWithTrustedDataMessage.ProtoReflect.Descriptor instead. +func (*DoneBlocksWithTrustedDataMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{52} +} + +type PruningPointsMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []*BlockHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *PruningPointsMessage) Reset() { + *x = PruningPointsMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PruningPointsMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PruningPointsMessage) ProtoMessage() {} + +func (x *PruningPointsMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PruningPointsMessage.ProtoReflect.Descriptor instead. +func (*PruningPointsMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{53} +} + +func (x *PruningPointsMessage) GetHeaders() []*BlockHeader { + if x != nil { + return x.Headers + } + return nil +} + +type RequestPruningPointProofMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RequestPruningPointProofMessage) Reset() { + *x = RequestPruningPointProofMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestPruningPointProofMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestPruningPointProofMessage) ProtoMessage() {} + +func (x *RequestPruningPointProofMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestPruningPointProofMessage.ProtoReflect.Descriptor instead. +func (*RequestPruningPointProofMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{54} +} + +type PruningPointProofMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []*PruningPointProofHeaderArray `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *PruningPointProofMessage) Reset() { + *x = PruningPointProofMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PruningPointProofMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PruningPointProofMessage) ProtoMessage() {} + +func (x *PruningPointProofMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PruningPointProofMessage.ProtoReflect.Descriptor instead. +func (*PruningPointProofMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{55} +} + +func (x *PruningPointProofMessage) GetHeaders() []*PruningPointProofHeaderArray { + if x != nil { + return x.Headers + } + return nil +} + +type PruningPointProofHeaderArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []*BlockHeader `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` +} + +func (x *PruningPointProofHeaderArray) Reset() { + *x = PruningPointProofHeaderArray{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PruningPointProofHeaderArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PruningPointProofHeaderArray) ProtoMessage() {} + +func (x *PruningPointProofHeaderArray) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PruningPointProofHeaderArray.ProtoReflect.Descriptor instead. +func (*PruningPointProofHeaderArray) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{56} +} + +func (x *PruningPointProofHeaderArray) GetHeaders() []*BlockHeader { + if x != nil { + return x.Headers + } + return nil +} + +type ReadyMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReadyMessage) Reset() { + *x = ReadyMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadyMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadyMessage) ProtoMessage() {} + +func (x *ReadyMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadyMessage.ProtoReflect.Descriptor instead. +func (*ReadyMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{57} +} + +type BlockWithTrustedDataV4Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *BlockMessage `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + DaaWindowIndices []uint64 `protobuf:"varint,2,rep,packed,name=daaWindowIndices,proto3" json:"daaWindowIndices,omitempty"` + GhostdagDataIndices []uint64 `protobuf:"varint,3,rep,packed,name=ghostdagDataIndices,proto3" json:"ghostdagDataIndices,omitempty"` +} + +func (x *BlockWithTrustedDataV4Message) Reset() { + *x = BlockWithTrustedDataV4Message{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockWithTrustedDataV4Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockWithTrustedDataV4Message) ProtoMessage() {} + +func (x *BlockWithTrustedDataV4Message) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockWithTrustedDataV4Message.ProtoReflect.Descriptor instead. +func (*BlockWithTrustedDataV4Message) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{58} +} + +func (x *BlockWithTrustedDataV4Message) GetBlock() *BlockMessage { + if x != nil { + return x.Block + } + return nil +} + +func (x *BlockWithTrustedDataV4Message) GetDaaWindowIndices() []uint64 { + if x != nil { + return x.DaaWindowIndices + } + return nil +} + +func (x *BlockWithTrustedDataV4Message) GetGhostdagDataIndices() []uint64 { + if x != nil { + return x.GhostdagDataIndices + } + return nil +} + +type TrustedDataMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DaaWindow []*DaaBlockV4 `protobuf:"bytes,1,rep,name=daaWindow,proto3" json:"daaWindow,omitempty"` + GhostdagData []*BlockGhostdagDataHashPair `protobuf:"bytes,2,rep,name=ghostdagData,proto3" json:"ghostdagData,omitempty"` +} + +func (x *TrustedDataMessage) Reset() { + *x = TrustedDataMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TrustedDataMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrustedDataMessage) ProtoMessage() {} + +func (x *TrustedDataMessage) ProtoReflect() protoreflect.Message { + mi := &file_p2p_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrustedDataMessage.ProtoReflect.Descriptor instead. +func (*TrustedDataMessage) Descriptor() ([]byte, []int) { + return file_p2p_proto_rawDescGZIP(), []int{59} +} + +func (x *TrustedDataMessage) GetDaaWindow() []*DaaBlockV4 { + if x != nil { + return x.DaaWindow + } + return nil +} + +func (x *TrustedDataMessage) GetGhostdagData() []*BlockGhostdagDataHashPair { + if x != nil { + return x.GhostdagData + } + return nil +} + +var File_p2p_proto protoreflect.FileDescriptor + +var file_p2p_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x34, 0x0a, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x6c, 0x6c, + 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x6c, 0x6c, 0x53, 0x75, 0x62, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x3b, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x22, 0x4b, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x65, 0x74, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x22, 0x4e, 0x0a, 0x0a, 0x4e, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x22, 0x24, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xa0, 0x02, 0x0a, 0x12, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x49, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, + 0x10, 0x0a, 0x03, 0x67, 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x10, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x3f, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x4f, 0x70, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x69, 0x67, + 0x4f, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x60, 0x0a, 0x08, 0x4f, 0x75, 0x74, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x25, 0x0a, 0x0d, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x22, 0x43, 0x0a, 0x0f, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x44, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x81, 0x01, 0x0a, 0x0c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe9, 0x03, 0x0a, 0x0b, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0e, + 0x68, 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, 0x6b, 0x6c, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x43, 0x0a, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x49, 0x64, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x48, 0x61, 0x73, 0x68, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, + 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x37, 0x0a, 0x0e, 0x75, 0x74, + 0x78, 0x6f, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x0e, 0x75, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x69, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x62, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, + 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, + 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, + 0x6f, 0x72, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, + 0x6f, 0x72, 0x6b, 0x12, 0x33, 0x0a, 0x0c, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x70, 0x72, 0x75, 0x6e, + 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, + 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x48, 0x0a, 0x11, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x0c, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x22, 0x1c, 0x0a, 0x04, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x5f, + 0x0a, 0x1a, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2b, 0x0a, 0x08, + 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x08, 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, + 0x3e, 0x0a, 0x13, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, + 0x6f, 0x0a, 0x15, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x6c, 0x6f, 0x77, 0x48, + 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x07, 0x6c, 0x6f, 0x77, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x08, 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x08, 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x14, 0x0a, + 0x12, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x44, 0x0a, 0x19, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, + 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x27, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x48, 0x0a, 0x1a, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x03, + 0x69, 0x64, 0x73, 0x22, 0x46, 0x0a, 0x1a, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x28, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3b, 0x0a, 0x14, 0x49, + 0x6e, 0x76, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x44, 0x0a, 0x16, 0x49, 0x6e, 0x76, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x23, + 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, + 0x6e, 0x63, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x50, 0x6f, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x61, + 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xd2, 0x02, 0x0a, 0x0e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x2f, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4e, 0x65, + 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, + 0x26, 0x0a, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, + 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x6c, 0x61, 0x79, 0x54, 0x78, 0x12, 0x3b, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x22, 0x27, + 0x0a, 0x0d, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x60, 0x0a, 0x21, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, + 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x10, + 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x10, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x84, 0x01, 0x0a, 0x1f, 0x50, 0x72, + 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, + 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x61, 0x0a, + 0x19, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x55, 0x74, 0x78, 0x6f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x61, 0x69, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4f, 0x75, 0x74, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x50, 0x61, 0x69, 0x72, 0x52, 0x19, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x41, + 0x6e, 0x64, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x61, 0x69, 0x72, 0x73, + 0x22, 0x7f, 0x0a, 0x18, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x55, + 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x50, 0x61, 0x69, 0x72, 0x12, 0x2f, 0x0a, 0x08, + 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, + 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x55, 0x74, 0x78, + 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x22, 0xaf, 0x01, 0x0a, 0x09, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, + 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, + 0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, + 0x61, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x2a, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, + 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, + 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x26, 0x0a, 0x24, 0x44, 0x6f, 0x6e, 0x65, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x53, 0x65, 0x74, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x42, 0x0a, 0x17, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x1f, 0x0a, + 0x1d, 0x55, 0x6e, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x72, 0x75, 0x6e, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x8a, + 0x01, 0x0a, 0x16, 0x49, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3f, 0x0a, 0x12, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x7c, 0x0a, 0x22, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x42, 0x44, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x29, 0x0a, 0x07, 0x6c, 0x6f, 0x77, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x07, 0x6c, 0x6f, 0x77, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x08, + 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x08, 0x68, 0x69, 0x67, 0x68, 0x48, 0x61, 0x73, 0x68, 0x22, 0x5e, 0x0a, 0x1b, 0x49, 0x62, 0x64, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, + 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x12, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x12, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x6f, 0x72, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x7a, 0x0a, 0x16, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x31, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x56, 0x0a, 0x21, 0x49, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x68, 0x69, + 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, + 0x52, 0x0b, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x2b, 0x0a, + 0x29, 0x49, 0x62, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, + 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x4e, 0x6f, 0x74, 0x46, 0x6f, + 0x75, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x51, 0x0a, 0x13, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, + 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x2a, 0x0a, + 0x28, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, + 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x34, 0x0a, 0x32, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x64, 0x49, 0x74, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, + 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0xe5, 0x01, 0x0a, 0x1b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, + 0x0a, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x64, 0x61, + 0x61, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x61, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x09, 0x64, 0x61, 0x61, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x48, 0x0a, + 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x48, 0x61, 0x73, 0x68, 0x50, 0x61, 0x69, 0x72, 0x52, 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, + 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x76, 0x0a, 0x08, 0x44, 0x61, 0x61, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x3b, 0x0a, 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, + 0x79, 0x0a, 0x0a, 0x44, 0x61, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x34, 0x12, 0x2e, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, + 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x67, 0x68, + 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x7d, 0x0a, 0x19, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x48, + 0x61, 0x73, 0x68, 0x50, 0x61, 0x69, 0x72, 0x12, 0x23, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x3b, 0x0a, 0x0c, + 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, + 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0c, 0x67, 0x68, 0x6f, + 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0xbc, 0x02, 0x0a, 0x0c, 0x47, 0x68, + 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, + 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, + 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x37, 0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0e, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x35, 0x0a, + 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0d, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, + 0x6c, 0x75, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x0c, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x12, 0x4d, 0x0a, 0x12, 0x62, 0x6c, 0x75, + 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x73, 0x52, 0x12, 0x62, 0x6c, 0x75, 0x65, 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, + 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x22, 0x65, 0x0a, 0x12, 0x42, 0x6c, 0x75, 0x65, + 0x73, 0x41, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x2b, + 0x0a, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x48, 0x61, 0x73, + 0x68, 0x52, 0x08, 0x62, 0x6c, 0x75, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x0c, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0c, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, + 0x22, 0x0a, 0x20, 0x44, 0x6f, 0x6e, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x57, 0x69, 0x74, + 0x68, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x14, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, + 0x69, 0x6e, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x21, 0x0a, + 0x1f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x5d, 0x0a, 0x18, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x41, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, + 0x50, 0x0a, 0x1c, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x50, + 0x72, 0x6f, 0x6f, 0x66, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, + 0x30, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x22, 0x0e, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0xac, 0x01, 0x0a, 0x1d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x57, 0x69, 0x74, 0x68, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x56, 0x34, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x10, 0x64, 0x61, 0x61, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x49, + 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x10, 0x64, 0x61, + 0x61, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x12, 0x30, + 0x0a, 0x13, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x49, 0x6e, + 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x13, 0x67, 0x68, 0x6f, + 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, + 0x22, 0x93, 0x01, 0x0a, 0x12, 0x54, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x09, 0x64, 0x61, 0x61, 0x57, 0x69, + 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x61, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, + 0x34, 0x52, 0x09, 0x64, 0x61, 0x61, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x48, 0x0a, 0x0c, + 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x47, 0x68, 0x6f, 0x73, 0x74, 0x64, 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, + 0x48, 0x61, 0x73, 0x68, 0x50, 0x61, 0x69, 0x72, 0x52, 0x0c, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x64, + 0x61, 0x67, 0x44, 0x61, 0x74, 0x61, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_p2p_proto_rawDescOnce sync.Once + file_p2p_proto_rawDescData = file_p2p_proto_rawDesc +) + +func file_p2p_proto_rawDescGZIP() []byte { + file_p2p_proto_rawDescOnce.Do(func() { + file_p2p_proto_rawDescData = protoimpl.X.CompressGZIP(file_p2p_proto_rawDescData) + }) + return file_p2p_proto_rawDescData +} + +var file_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_p2p_proto_goTypes = []interface{}{ + (*RequestAddressesMessage)(nil), // 0: protowire.RequestAddressesMessage + (*AddressesMessage)(nil), // 1: protowire.AddressesMessage + (*NetAddress)(nil), // 2: protowire.NetAddress + (*SubnetworkId)(nil), // 3: protowire.SubnetworkId + (*TransactionMessage)(nil), // 4: protowire.TransactionMessage + (*TransactionInput)(nil), // 5: protowire.TransactionInput + (*Outpoint)(nil), // 6: protowire.Outpoint + (*TransactionId)(nil), // 7: protowire.TransactionId + (*ScriptPublicKey)(nil), // 8: protowire.ScriptPublicKey + (*TransactionOutput)(nil), // 9: protowire.TransactionOutput + (*BlockMessage)(nil), // 10: protowire.BlockMessage + (*BlockHeader)(nil), // 11: protowire.BlockHeader + (*BlockLevelParents)(nil), // 12: protowire.BlockLevelParents + (*Hash)(nil), // 13: protowire.Hash + (*RequestBlockLocatorMessage)(nil), // 14: protowire.RequestBlockLocatorMessage + (*BlockLocatorMessage)(nil), // 15: protowire.BlockLocatorMessage + (*RequestHeadersMessage)(nil), // 16: protowire.RequestHeadersMessage + (*RequestNextHeadersMessage)(nil), // 17: protowire.RequestNextHeadersMessage + (*DoneHeadersMessage)(nil), // 18: protowire.DoneHeadersMessage + (*RequestRelayBlocksMessage)(nil), // 19: protowire.RequestRelayBlocksMessage + (*RequestTransactionsMessage)(nil), // 20: protowire.RequestTransactionsMessage + (*TransactionNotFoundMessage)(nil), // 21: protowire.TransactionNotFoundMessage + (*InvRelayBlockMessage)(nil), // 22: protowire.InvRelayBlockMessage + (*InvTransactionsMessage)(nil), // 23: protowire.InvTransactionsMessage + (*PingMessage)(nil), // 24: protowire.PingMessage + (*PongMessage)(nil), // 25: protowire.PongMessage + (*VerackMessage)(nil), // 26: protowire.VerackMessage + (*VersionMessage)(nil), // 27: protowire.VersionMessage + (*RejectMessage)(nil), // 28: protowire.RejectMessage + (*RequestPruningPointUTXOSetMessage)(nil), // 29: protowire.RequestPruningPointUTXOSetMessage + (*PruningPointUtxoSetChunkMessage)(nil), // 30: protowire.PruningPointUtxoSetChunkMessage + (*OutpointAndUtxoEntryPair)(nil), // 31: protowire.OutpointAndUtxoEntryPair + (*UtxoEntry)(nil), // 32: protowire.UtxoEntry + (*RequestNextPruningPointUtxoSetChunkMessage)(nil), // 33: protowire.RequestNextPruningPointUtxoSetChunkMessage + (*DonePruningPointUtxoSetChunksMessage)(nil), // 34: protowire.DonePruningPointUtxoSetChunksMessage + (*RequestIBDBlocksMessage)(nil), // 35: protowire.RequestIBDBlocksMessage + (*UnexpectedPruningPointMessage)(nil), // 36: protowire.UnexpectedPruningPointMessage + (*IbdBlockLocatorMessage)(nil), // 37: protowire.IbdBlockLocatorMessage + (*RequestIBDChainBlockLocatorMessage)(nil), // 38: protowire.RequestIBDChainBlockLocatorMessage + (*IbdChainBlockLocatorMessage)(nil), // 39: protowire.IbdChainBlockLocatorMessage + (*RequestAnticoneMessage)(nil), // 40: protowire.RequestAnticoneMessage + (*IbdBlockLocatorHighestHashMessage)(nil), // 41: protowire.IbdBlockLocatorHighestHashMessage + (*IbdBlockLocatorHighestHashNotFoundMessage)(nil), // 42: protowire.IbdBlockLocatorHighestHashNotFoundMessage + (*BlockHeadersMessage)(nil), // 43: protowire.BlockHeadersMessage + (*RequestPruningPointAndItsAnticoneMessage)(nil), // 44: protowire.RequestPruningPointAndItsAnticoneMessage + (*RequestNextPruningPointAndItsAnticoneBlocksMessage)(nil), // 45: protowire.RequestNextPruningPointAndItsAnticoneBlocksMessage + (*BlockWithTrustedDataMessage)(nil), // 46: protowire.BlockWithTrustedDataMessage + (*DaaBlock)(nil), // 47: protowire.DaaBlock + (*DaaBlockV4)(nil), // 48: protowire.DaaBlockV4 + (*BlockGhostdagDataHashPair)(nil), // 49: protowire.BlockGhostdagDataHashPair + (*GhostdagData)(nil), // 50: protowire.GhostdagData + (*BluesAnticoneSizes)(nil), // 51: protowire.BluesAnticoneSizes + (*DoneBlocksWithTrustedDataMessage)(nil), // 52: protowire.DoneBlocksWithTrustedDataMessage + (*PruningPointsMessage)(nil), // 53: protowire.PruningPointsMessage + (*RequestPruningPointProofMessage)(nil), // 54: protowire.RequestPruningPointProofMessage + (*PruningPointProofMessage)(nil), // 55: protowire.PruningPointProofMessage + (*PruningPointProofHeaderArray)(nil), // 56: protowire.PruningPointProofHeaderArray + (*ReadyMessage)(nil), // 57: protowire.ReadyMessage + (*BlockWithTrustedDataV4Message)(nil), // 58: protowire.BlockWithTrustedDataV4Message + (*TrustedDataMessage)(nil), // 59: protowire.TrustedDataMessage +} +var file_p2p_proto_depIdxs = []int32{ + 3, // 0: protowire.RequestAddressesMessage.subnetworkId:type_name -> protowire.SubnetworkId + 2, // 1: protowire.AddressesMessage.addressList:type_name -> protowire.NetAddress + 5, // 2: protowire.TransactionMessage.inputs:type_name -> protowire.TransactionInput + 9, // 3: protowire.TransactionMessage.outputs:type_name -> protowire.TransactionOutput + 3, // 4: protowire.TransactionMessage.subnetworkId:type_name -> protowire.SubnetworkId + 6, // 5: protowire.TransactionInput.previousOutpoint:type_name -> protowire.Outpoint + 7, // 6: protowire.Outpoint.transactionId:type_name -> protowire.TransactionId + 8, // 7: protowire.TransactionOutput.scriptPublicKey:type_name -> protowire.ScriptPublicKey + 11, // 8: protowire.BlockMessage.header:type_name -> protowire.BlockHeader + 4, // 9: protowire.BlockMessage.transactions:type_name -> protowire.TransactionMessage + 12, // 10: protowire.BlockHeader.parents:type_name -> protowire.BlockLevelParents + 13, // 11: protowire.BlockHeader.hashMerkleRoot:type_name -> protowire.Hash + 13, // 12: protowire.BlockHeader.acceptedIdMerkleRoot:type_name -> protowire.Hash + 13, // 13: protowire.BlockHeader.utxoCommitment:type_name -> protowire.Hash + 13, // 14: protowire.BlockHeader.pruningPoint:type_name -> protowire.Hash + 13, // 15: protowire.BlockLevelParents.parentHashes:type_name -> protowire.Hash + 13, // 16: protowire.RequestBlockLocatorMessage.highHash:type_name -> protowire.Hash + 13, // 17: protowire.BlockLocatorMessage.hashes:type_name -> protowire.Hash + 13, // 18: protowire.RequestHeadersMessage.lowHash:type_name -> protowire.Hash + 13, // 19: protowire.RequestHeadersMessage.highHash:type_name -> protowire.Hash + 13, // 20: protowire.RequestRelayBlocksMessage.hashes:type_name -> protowire.Hash + 7, // 21: protowire.RequestTransactionsMessage.ids:type_name -> protowire.TransactionId + 7, // 22: protowire.TransactionNotFoundMessage.id:type_name -> protowire.TransactionId + 13, // 23: protowire.InvRelayBlockMessage.hash:type_name -> protowire.Hash + 7, // 24: protowire.InvTransactionsMessage.ids:type_name -> protowire.TransactionId + 2, // 25: protowire.VersionMessage.address:type_name -> protowire.NetAddress + 3, // 26: protowire.VersionMessage.subnetworkId:type_name -> protowire.SubnetworkId + 13, // 27: protowire.RequestPruningPointUTXOSetMessage.pruningPointHash:type_name -> protowire.Hash + 31, // 28: protowire.PruningPointUtxoSetChunkMessage.outpointAndUtxoEntryPairs:type_name -> protowire.OutpointAndUtxoEntryPair + 6, // 29: protowire.OutpointAndUtxoEntryPair.outpoint:type_name -> protowire.Outpoint + 32, // 30: protowire.OutpointAndUtxoEntryPair.utxoEntry:type_name -> protowire.UtxoEntry + 8, // 31: protowire.UtxoEntry.scriptPublicKey:type_name -> protowire.ScriptPublicKey + 13, // 32: protowire.RequestIBDBlocksMessage.hashes:type_name -> protowire.Hash + 13, // 33: protowire.IbdBlockLocatorMessage.targetHash:type_name -> protowire.Hash + 13, // 34: protowire.IbdBlockLocatorMessage.blockLocatorHashes:type_name -> protowire.Hash + 13, // 35: protowire.RequestIBDChainBlockLocatorMessage.lowHash:type_name -> protowire.Hash + 13, // 36: protowire.RequestIBDChainBlockLocatorMessage.highHash:type_name -> protowire.Hash + 13, // 37: protowire.IbdChainBlockLocatorMessage.blockLocatorHashes:type_name -> protowire.Hash + 13, // 38: protowire.RequestAnticoneMessage.blockHash:type_name -> protowire.Hash + 13, // 39: protowire.RequestAnticoneMessage.contextHash:type_name -> protowire.Hash + 13, // 40: protowire.IbdBlockLocatorHighestHashMessage.highestHash:type_name -> protowire.Hash + 11, // 41: protowire.BlockHeadersMessage.blockHeaders:type_name -> protowire.BlockHeader + 10, // 42: protowire.BlockWithTrustedDataMessage.block:type_name -> protowire.BlockMessage + 47, // 43: protowire.BlockWithTrustedDataMessage.daaWindow:type_name -> protowire.DaaBlock + 49, // 44: protowire.BlockWithTrustedDataMessage.ghostdagData:type_name -> protowire.BlockGhostdagDataHashPair + 10, // 45: protowire.DaaBlock.block:type_name -> protowire.BlockMessage + 50, // 46: protowire.DaaBlock.ghostdagData:type_name -> protowire.GhostdagData + 11, // 47: protowire.DaaBlockV4.header:type_name -> protowire.BlockHeader + 50, // 48: protowire.DaaBlockV4.ghostdagData:type_name -> protowire.GhostdagData + 13, // 49: protowire.BlockGhostdagDataHashPair.hash:type_name -> protowire.Hash + 50, // 50: protowire.BlockGhostdagDataHashPair.ghostdagData:type_name -> protowire.GhostdagData + 13, // 51: protowire.GhostdagData.selectedParent:type_name -> protowire.Hash + 13, // 52: protowire.GhostdagData.mergeSetBlues:type_name -> protowire.Hash + 13, // 53: protowire.GhostdagData.mergeSetReds:type_name -> protowire.Hash + 51, // 54: protowire.GhostdagData.bluesAnticoneSizes:type_name -> protowire.BluesAnticoneSizes + 13, // 55: protowire.BluesAnticoneSizes.blueHash:type_name -> protowire.Hash + 11, // 56: protowire.PruningPointsMessage.headers:type_name -> protowire.BlockHeader + 56, // 57: protowire.PruningPointProofMessage.headers:type_name -> protowire.PruningPointProofHeaderArray + 11, // 58: protowire.PruningPointProofHeaderArray.headers:type_name -> protowire.BlockHeader + 10, // 59: protowire.BlockWithTrustedDataV4Message.block:type_name -> protowire.BlockMessage + 48, // 60: protowire.TrustedDataMessage.daaWindow:type_name -> protowire.DaaBlockV4 + 49, // 61: protowire.TrustedDataMessage.ghostdagData:type_name -> protowire.BlockGhostdagDataHashPair + 62, // [62:62] is the sub-list for method output_type + 62, // [62:62] is the sub-list for method input_type + 62, // [62:62] is the sub-list for extension type_name + 62, // [62:62] is the sub-list for extension extendee + 0, // [0:62] is the sub-list for field type_name +} + +func init() { file_p2p_proto_init() } +func file_p2p_proto_init() { + if File_p2p_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_p2p_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestAddressesMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddressesMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubnetworkId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Outpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScriptPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockLevelParents); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Hash); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBlockLocatorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockLocatorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestHeadersMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestNextHeadersMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoneHeadersMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestRelayBlocksMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestTransactionsMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransactionNotFoundMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvRelayBlockMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvTransactionsMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PongMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VerackMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RejectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestPruningPointUTXOSetMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PruningPointUtxoSetChunkMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OutpointAndUtxoEntryPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UtxoEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestNextPruningPointUtxoSetChunkMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DonePruningPointUtxoSetChunksMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestIBDBlocksMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnexpectedPruningPointMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IbdBlockLocatorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestIBDChainBlockLocatorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IbdChainBlockLocatorMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestAnticoneMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IbdBlockLocatorHighestHashMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IbdBlockLocatorHighestHashNotFoundMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockHeadersMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestPruningPointAndItsAnticoneMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestNextPruningPointAndItsAnticoneBlocksMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockWithTrustedDataMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DaaBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DaaBlockV4); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockGhostdagDataHashPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GhostdagData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BluesAnticoneSizes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DoneBlocksWithTrustedDataMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PruningPointsMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestPruningPointProofMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PruningPointProofMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PruningPointProofHeaderArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadyMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockWithTrustedDataV4Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TrustedDataMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_p2p_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_p2p_proto_goTypes, + DependencyIndexes: file_p2p_proto_depIdxs, + MessageInfos: file_p2p_proto_msgTypes, + }.Build() + File_p2p_proto = out.File + file_p2p_proto_rawDesc = nil + file_p2p_proto_goTypes = nil + file_p2p_proto_depIdxs = nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.proto b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.proto new file mode 100644 index 0000000..8004bbb --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p.proto @@ -0,0 +1,290 @@ +syntax = "proto3"; +package protowire; + +option go_package = "github.com/spectre-project/spectred/protowire"; + +message RequestAddressesMessage{ + bool includeAllSubnetworks = 1; + SubnetworkId subnetworkId = 2; +} + +message AddressesMessage{ + repeated NetAddress addressList = 1; +} + +message NetAddress{ + int64 timestamp = 1; + bytes ip = 3; + uint32 port = 4; +} + +message SubnetworkId{ + bytes bytes = 1; +} + +message TransactionMessage{ + uint32 version = 1; + repeated TransactionInput inputs = 2; + repeated TransactionOutput outputs = 3; + uint64 lockTime = 4; + SubnetworkId subnetworkId = 5; + uint64 gas = 6; + bytes payload = 8; +} + +message TransactionInput{ + Outpoint previousOutpoint = 1; + bytes signatureScript = 2; + uint64 sequence = 3; + uint32 sigOpCount = 4; +} + +message Outpoint{ + TransactionId transactionId = 1; + uint32 index = 2; +} + +message TransactionId{ + bytes bytes = 1; +} +message ScriptPublicKey { + bytes script = 1; + uint32 version = 2; +} + +message TransactionOutput{ + uint64 value = 1; + ScriptPublicKey scriptPublicKey = 2; +} + +message BlockMessage{ + BlockHeader header = 1; + repeated TransactionMessage transactions = 2; +} + +message BlockHeader{ + uint32 version = 1; + repeated BlockLevelParents parents = 12; + Hash hashMerkleRoot = 3; + Hash acceptedIdMerkleRoot = 4; + Hash utxoCommitment = 5; + int64 timestamp = 6; + uint32 bits = 7; + uint64 nonce = 8; + uint64 daaScore = 9; + bytes blueWork = 10; + Hash pruningPoint = 14; + uint64 blueScore = 13; +} + +message BlockLevelParents { + repeated Hash parentHashes = 1; +} + +message Hash{ + bytes bytes = 1; +} + +message RequestBlockLocatorMessage{ + Hash highHash = 1; + uint32 limit = 2; +} + +message BlockLocatorMessage{ + repeated Hash hashes = 1; +} + +message RequestHeadersMessage{ + Hash lowHash = 1; + Hash highHash = 2; +} + +message RequestNextHeadersMessage{ +} + +message DoneHeadersMessage{ +} + +message RequestRelayBlocksMessage{ + repeated Hash hashes = 1; +} + +message RequestTransactionsMessage { + repeated TransactionId ids = 1; +} + +message TransactionNotFoundMessage{ + TransactionId id = 1; +} + +message InvRelayBlockMessage{ + Hash hash = 1; +} + +message InvTransactionsMessage{ + repeated TransactionId ids = 1; +} + +message PingMessage{ + uint64 nonce = 1; +} + +message PongMessage{ + uint64 nonce = 1; +} + +message VerackMessage{ +} + +message VersionMessage{ + uint32 protocolVersion = 1; + uint64 services = 2; + int64 timestamp = 3; + NetAddress address = 4; + bytes id = 5; + string userAgent = 6; + bool disableRelayTx = 8; + SubnetworkId subnetworkId = 9; + string network = 10; +} + +message RejectMessage{ + string reason = 1; +} + +message RequestPruningPointUTXOSetMessage{ + Hash pruningPointHash = 1; +} + +message PruningPointUtxoSetChunkMessage{ + repeated OutpointAndUtxoEntryPair outpointAndUtxoEntryPairs = 1; +} + +message OutpointAndUtxoEntryPair{ + Outpoint outpoint = 1; + UtxoEntry utxoEntry = 2; +} + +message UtxoEntry { + uint64 amount = 1; + ScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; +} + +message RequestNextPruningPointUtxoSetChunkMessage { +} + +message DonePruningPointUtxoSetChunksMessage { +} + +message RequestIBDBlocksMessage{ + repeated Hash hashes = 1; +} + +message UnexpectedPruningPointMessage{ +} + +message IbdBlockLocatorMessage { + Hash targetHash = 1; + repeated Hash blockLocatorHashes = 2; +} + +message RequestIBDChainBlockLocatorMessage{ + Hash lowHash = 1; + Hash highHash = 2; +} + +message IbdChainBlockLocatorMessage { + repeated Hash blockLocatorHashes = 1; +} + +message RequestAnticoneMessage{ + Hash blockHash = 1; + Hash contextHash = 2; +} + +message IbdBlockLocatorHighestHashMessage { + Hash highestHash = 1; +} + +message IbdBlockLocatorHighestHashNotFoundMessage { +} + +message BlockHeadersMessage { + repeated BlockHeader blockHeaders = 1; +} + +message RequestPruningPointAndItsAnticoneMessage { +} + +message RequestNextPruningPointAndItsAnticoneBlocksMessage{ +} + +message BlockWithTrustedDataMessage { + BlockMessage block = 1; + uint64 daaScore = 2; + repeated DaaBlock daaWindow = 3; + repeated BlockGhostdagDataHashPair ghostdagData = 4; +} + +message DaaBlock { + BlockMessage block = 3; + GhostdagData ghostdagData = 2; +} + +message DaaBlockV4 { + BlockHeader header = 1; + GhostdagData ghostdagData = 2; +} + +message BlockGhostdagDataHashPair { + Hash hash = 1; + GhostdagData ghostdagData = 2; +} + +message GhostdagData { + uint64 blueScore = 1; + bytes blueWork = 2; + Hash selectedParent = 3; + repeated Hash mergeSetBlues = 4; + repeated Hash mergeSetReds = 5; + repeated BluesAnticoneSizes bluesAnticoneSizes = 6; +} + +message BluesAnticoneSizes { + Hash blueHash = 1; + uint32 anticoneSize = 2; +} + +message DoneBlocksWithTrustedDataMessage { +} + +message PruningPointsMessage { + repeated BlockHeader headers = 1; +} + +message RequestPruningPointProofMessage { +} + +message PruningPointProofMessage { + repeated PruningPointProofHeaderArray headers = 1; +} + +message PruningPointProofHeaderArray { + repeated BlockHeader headers = 1; +} + +message ReadyMessage { +} + +message BlockWithTrustedDataV4Message { + BlockMessage block = 1; + repeated uint64 daaWindowIndices = 2; + repeated uint64 ghostdagDataIndices = 3; +} + +message TrustedDataMessage { + repeated DaaBlockV4 daaWindow = 1; + repeated BlockGhostdagDataHashPair ghostdagData = 2; +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_addresses.go new file mode 100644 index 0000000..301a02e --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_addresses.go @@ -0,0 +1,56 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Addresses) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "SpectredMessage_Addresses is nil") + } + addressList, err := x.Addresses.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.MsgAddresses{ + AddressList: addressList, + }, nil +} + +func (x *AddressesMessage) toAppMessage() ([]*appmessage.NetAddress, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "AddressesMessage is nil") + } + + if len(x.AddressList) > appmessage.MaxAddressesPerMsg { + return nil, errors.Errorf("too many addresses for message "+ + "[count %d, max %d]", len(x.AddressList), appmessage.MaxAddressesPerMsg) + } + addressList := make([]*appmessage.NetAddress, len(x.AddressList)) + for i, address := range x.AddressList { + var err error + addressList[i], err = address.toAppMessage() + if err != nil { + return nil, err + } + } + return addressList, nil +} + +func (x *SpectredMessage_Addresses) fromAppMessage(msgAddresses *appmessage.MsgAddresses) error { + if len(msgAddresses.AddressList) > appmessage.MaxAddressesPerMsg { + return errors.Errorf("too many addresses for message "+ + "[count %d, max %d]", len(msgAddresses.AddressList), appmessage.MaxAddressesPerMsg) + } + + addressList := make([]*NetAddress, len(msgAddresses.AddressList)) + for i, address := range msgAddresses.AddressList { + addressList[i] = appMessageNetAddressToProto(address) + } + + x.Addresses = &AddressesMessage{ + AddressList: addressList, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block.go new file mode 100644 index 0000000..2efd4ba --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block.go @@ -0,0 +1,62 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Block) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "SpectredMessage_Block is nil") + } + return x.Block.toAppMessage() +} + +func (x *SpectredMessage_Block) fromAppMessage(msgBlock *appmessage.MsgBlock) error { + x.Block = new(BlockMessage) + return x.Block.fromAppMessage(msgBlock) +} + +func (x *BlockMessage) toAppMessage() (*appmessage.MsgBlock, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "BlockMessage is nil") + } + header, err := x.Header.toAppMessage() + if err != nil { + return nil, err + } + + transactions := make([]*appmessage.MsgTx, len(x.Transactions)) + for i, protoTx := range x.Transactions { + msgTx, err := protoTx.toAppMessage() + if err != nil { + return nil, err + } + transactions[i] = msgTx.(*appmessage.MsgTx) + } + + return &appmessage.MsgBlock{ + Header: *header, + Transactions: transactions, + }, nil +} + +func (x *BlockMessage) fromAppMessage(msgBlock *appmessage.MsgBlock) error { + protoHeader := new(BlockHeader) + err := protoHeader.fromAppMessage(&msgBlock.Header) + if err != nil { + return err + } + + protoTransactions := make([]*TransactionMessage, len(msgBlock.Transactions)) + for i, tx := range msgBlock.Transactions { + protoTx := new(TransactionMessage) + protoTx.fromAppMessage(tx) + protoTransactions[i] = protoTx + } + *x = BlockMessage{ + Header: protoHeader, + Transactions: protoTransactions, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_headers.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_headers.go new file mode 100644 index 0000000..a6e226d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_headers.go @@ -0,0 +1,51 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_BlockHeaders) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BlockHeaders is nil") + } + blockHeaders, err := x.BlockHeaders.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.BlockHeadersMessage{ + BlockHeaders: blockHeaders, + }, nil +} + +func (x *BlockHeadersMessage) toAppMessage() ([]*appmessage.MsgBlockHeader, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BlockHeadersMessage is nil") + } + blockHeaders := make([]*appmessage.MsgBlockHeader, len(x.BlockHeaders)) + for i, blockHeader := range x.BlockHeaders { + var err error + blockHeaders[i], err = blockHeader.toAppMessage() + if err != nil { + return nil, err + } + } + + return blockHeaders, nil +} + +func (x *SpectredMessage_BlockHeaders) fromAppMessage(blockHeadersMessage *appmessage.BlockHeadersMessage) error { + blockHeaders := make([]*BlockHeader, len(blockHeadersMessage.BlockHeaders)) + for i, blockHeader := range blockHeadersMessage.BlockHeaders { + blockHeaders[i] = &BlockHeader{} + err := blockHeaders[i].fromAppMessage(blockHeader) + if err != nil { + return err + } + } + + x.BlockHeaders = &BlockHeadersMessage{ + BlockHeaders: blockHeaders, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_locator.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_locator.go new file mode 100644 index 0000000..77828d3 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_locator.go @@ -0,0 +1,40 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (x *SpectredMessage_BlockLocator) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BlockLocator is nil") + } + hashes, err := x.BlockLocator.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.MsgBlockLocator{BlockLocatorHashes: hashes}, nil +} + +func (x *BlockLocatorMessage) toAppMessage() ([]*externalapi.DomainHash, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BlockLocatorMessage is nil") + } + if len(x.Hashes) > appmessage.MaxBlockLocatorsPerMsg { + return nil, errors.Errorf("too many block locator hashes for message "+ + "[count %d, max %d]", len(x.Hashes), appmessage.MaxBlockLocatorsPerMsg) + } + return protoHashesToDomain(x.Hashes) +} + +func (x *SpectredMessage_BlockLocator) fromAppMessage(msgBlockLocator *appmessage.MsgBlockLocator) error { + if len(msgBlockLocator.BlockLocatorHashes) > appmessage.MaxBlockLocatorsPerMsg { + return errors.Errorf("too many block locator hashes for message "+ + "[count %d, max %d]", len(msgBlockLocator.BlockLocatorHashes), appmessage.MaxBlockLocatorsPerMsg) + } + x.BlockLocator = &BlockLocatorMessage{ + Hashes: domainHashesToProto(msgBlockLocator.BlockLocatorHashes), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data.go new file mode 100644 index 0000000..d9f6c8c --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data.go @@ -0,0 +1,195 @@ +package protowire + +import ( + "math/big" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (x *SpectredMessage_BlockWithTrustedData) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BlockWithTrustedData is nil") + } + + msgBlock, err := x.BlockWithTrustedData.Block.toAppMessage() + if err != nil { + return nil, err + } + + daaWindow := make([]*appmessage.TrustedDataDataDAABlock, len(x.BlockWithTrustedData.DaaWindow)) + for i, daaBlock := range x.BlockWithTrustedData.DaaWindow { + daaWindow[i], err = daaBlock.toAppMessage() + if err != nil { + return nil, err + } + } + + ghostdagData := make([]*appmessage.BlockGHOSTDAGDataHashPair, len(x.BlockWithTrustedData.GhostdagData)) + for i, pair := range x.BlockWithTrustedData.GhostdagData { + hash, err := pair.Hash.toDomain() + if err != nil { + return nil, err + } + + data, err := pair.GhostdagData.toAppMessage() + if err != nil { + return nil, err + } + + ghostdagData[i] = &appmessage.BlockGHOSTDAGDataHashPair{ + Hash: hash, + GHOSTDAGData: data, + } + } + + return &appmessage.MsgBlockWithTrustedData{ + Block: msgBlock, + DAAScore: x.BlockWithTrustedData.DaaScore, + DAAWindow: daaWindow, + GHOSTDAGData: ghostdagData, + }, nil +} + +func (x *SpectredMessage_BlockWithTrustedData) fromAppMessage(msgBlockWithTrustedData *appmessage.MsgBlockWithTrustedData) error { + x.BlockWithTrustedData = &BlockWithTrustedDataMessage{ + Block: &BlockMessage{}, + DaaScore: msgBlockWithTrustedData.DAAScore, + DaaWindow: make([]*DaaBlock, len(msgBlockWithTrustedData.DAAWindow)), + GhostdagData: make([]*BlockGhostdagDataHashPair, len(msgBlockWithTrustedData.GHOSTDAGData)), + } + + err := x.BlockWithTrustedData.Block.fromAppMessage(msgBlockWithTrustedData.Block) + if err != nil { + return err + } + + for i, daaBlock := range msgBlockWithTrustedData.DAAWindow { + x.BlockWithTrustedData.DaaWindow[i] = &DaaBlock{} + err := x.BlockWithTrustedData.DaaWindow[i].fromAppMessage(daaBlock) + if err != nil { + return err + } + } + + for i, pair := range msgBlockWithTrustedData.GHOSTDAGData { + x.BlockWithTrustedData.GhostdagData[i] = &BlockGhostdagDataHashPair{ + Hash: domainHashToProto(pair.Hash), + GhostdagData: &GhostdagData{}, + } + + x.BlockWithTrustedData.GhostdagData[i].GhostdagData.fromAppMessage(pair.GHOSTDAGData) + } + + return nil +} + +func (x *DaaBlock) toAppMessage() (*appmessage.TrustedDataDataDAABlock, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "DaaBlock is nil") + } + + msgBlock, err := x.Block.toAppMessage() + if err != nil { + return nil, err + } + + ghostdagData, err := x.GhostdagData.toAppMessage() + if err != nil { + return nil, err + } + + return &appmessage.TrustedDataDataDAABlock{ + Block: msgBlock, + GHOSTDAGData: ghostdagData, + }, nil +} + +func (x *DaaBlock) fromAppMessage(daaBlock *appmessage.TrustedDataDataDAABlock) error { + *x = DaaBlock{ + Block: &BlockMessage{}, + GhostdagData: &GhostdagData{}, + } + + err := x.Block.fromAppMessage(daaBlock.Block) + if err != nil { + return err + } + + x.GhostdagData.fromAppMessage(daaBlock.GHOSTDAGData) + + return nil +} + +func (x *GhostdagData) toAppMessage() (*appmessage.BlockGHOSTDAGData, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GhostdagData is nil") + } + + selectedParent, err := x.SelectedParent.toDomain() + if err != nil { + return nil, err + } + + mergeSetBlues, err := protoHashesToDomain(x.MergeSetBlues) + if err != nil { + return nil, err + } + + mergeSetReds, err := protoHashesToDomain(x.MergeSetReds) + if err != nil { + return nil, err + } + + bluesAnticoneSizes := make([]*appmessage.BluesAnticoneSizes, len(x.BluesAnticoneSizes)) + for i, protoBluesAnticoneSizes := range x.BluesAnticoneSizes { + blueHash, err := protoBluesAnticoneSizes.BlueHash.toDomain() + if err != nil { + return nil, err + } + + if protoBluesAnticoneSizes.AnticoneSize > maxKType() { + return nil, errors.Errorf("anticone size %d is greater than max k type %d", protoBluesAnticoneSizes.AnticoneSize, maxKType()) + } + + bluesAnticoneSizes[i] = &appmessage.BluesAnticoneSizes{ + BlueHash: blueHash, + AnticoneSize: externalapi.KType(protoBluesAnticoneSizes.AnticoneSize), + } + } + + blueWork := big.NewInt(0).SetBytes(x.BlueWork) + return &appmessage.BlockGHOSTDAGData{ + BlueScore: x.BlueScore, + BlueWork: blueWork, + SelectedParent: selectedParent, + MergeSetBlues: mergeSetBlues, + MergeSetReds: mergeSetReds, + BluesAnticoneSizes: bluesAnticoneSizes, + }, nil +} + +func (x *GhostdagData) fromAppMessage(ghostdagData *appmessage.BlockGHOSTDAGData) { + protoBluesAnticoneSizes := make([]*BluesAnticoneSizes, 0, len(ghostdagData.BluesAnticoneSizes)) + for _, pair := range ghostdagData.BluesAnticoneSizes { + protoBluesAnticoneSizes = append(protoBluesAnticoneSizes, &BluesAnticoneSizes{ + BlueHash: domainHashToProto(pair.BlueHash), + AnticoneSize: uint32(pair.AnticoneSize), + }) + } + *x = GhostdagData{ + BlueScore: ghostdagData.BlueScore, + BlueWork: ghostdagData.BlueWork.Bytes(), + SelectedParent: domainHashToProto(ghostdagData.SelectedParent), + MergeSetBlues: domainHashesToProto(ghostdagData.MergeSetBlues), + MergeSetReds: domainHashesToProto(ghostdagData.MergeSetReds), + BluesAnticoneSizes: protoBluesAnticoneSizes, + } +} + +func maxKType() uint32 { + zero := externalapi.KType(0) + max := zero - 1 + return uint32(max) +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data_v4.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data_v4.go new file mode 100644 index 0000000..00ca900 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_block_with_trusted_data_v4.go @@ -0,0 +1,75 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_BlockWithTrustedDataV4) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BlockWithTrustedDataV4 is nil") + } + + msgBlock, err := x.BlockWithTrustedDataV4.Block.toAppMessage() + if err != nil { + return nil, err + } + + return &appmessage.MsgBlockWithTrustedDataV4{ + Block: msgBlock, + DAAWindowIndices: x.BlockWithTrustedDataV4.DaaWindowIndices, + GHOSTDAGDataIndices: x.BlockWithTrustedDataV4.GhostdagDataIndices, + }, nil +} + +func (x *SpectredMessage_BlockWithTrustedDataV4) fromAppMessage(msgBlockWithTrustedData *appmessage.MsgBlockWithTrustedDataV4) error { + x.BlockWithTrustedDataV4 = &BlockWithTrustedDataV4Message{ + Block: &BlockMessage{}, + DaaWindowIndices: msgBlockWithTrustedData.DAAWindowIndices, + GhostdagDataIndices: msgBlockWithTrustedData.GHOSTDAGDataIndices, + } + + err := x.BlockWithTrustedDataV4.Block.fromAppMessage(msgBlockWithTrustedData.Block) + if err != nil { + return err + } + + return nil +} + +func (x *DaaBlockV4) toAppMessage() (*appmessage.TrustedDataDAAHeader, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "DaaBlockV4 is nil") + } + + msgBlockHeader, err := x.Header.toAppMessage() + if err != nil { + return nil, err + } + + ghostdagData, err := x.GhostdagData.toAppMessage() + if err != nil { + return nil, err + } + + return &appmessage.TrustedDataDAAHeader{ + Header: msgBlockHeader, + GHOSTDAGData: ghostdagData, + }, nil +} + +func (x *DaaBlockV4) fromAppMessage(daaBlock *appmessage.TrustedDataDAAHeader) error { + *x = DaaBlockV4{ + Header: &BlockHeader{}, + GhostdagData: &GhostdagData{}, + } + + err := x.Header.fromAppMessage(daaBlock.Header) + if err != nil { + return err + } + + x.GhostdagData.fromAppMessage(daaBlock.GHOSTDAGData) + + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_blocks_with_trusted_data.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_blocks_with_trusted_data.go new file mode 100644 index 0000000..86bef0e --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_blocks_with_trusted_data.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_DoneBlocksWithTrustedData) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_DoneBlocksWithTrustedData is nil") + } + return &appmessage.MsgDoneBlocksWithTrustedData{}, nil +} + +func (x *SpectredMessage_DoneBlocksWithTrustedData) fromAppMessage(_ *appmessage.MsgDoneBlocksWithTrustedData) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_headers.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_headers.go new file mode 100644 index 0000000..edeeccb --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_headers.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_DoneHeaders) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_DoneHeaders is nil") + } + return &appmessage.MsgDoneHeaders{}, nil +} + +func (x *SpectredMessage_DoneHeaders) fromAppMessage(_ *appmessage.MsgDoneHeaders) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_pruning_point_utxo_set_chunks.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_pruning_point_utxo_set_chunks.go new file mode 100644 index 0000000..cc72422 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_done_pruning_point_utxo_set_chunks.go @@ -0,0 +1,18 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_DonePruningPointUtxoSetChunks) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_DonePruningPointUtxoSetChunks is nil") + } + return &appmessage.MsgDonePruningPointUTXOSetChunks{}, nil +} + +func (x *SpectredMessage_DonePruningPointUtxoSetChunks) fromAppMessage(_ *appmessage.MsgDonePruningPointUTXOSetChunks) error { + x.DonePruningPointUtxoSetChunks = &DonePruningPointUtxoSetChunksMessage{} + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_header.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_header.go new file mode 100644 index 0000000..adc103b --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_header.go @@ -0,0 +1,117 @@ +package protowire + +import ( + "math" + "math/big" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/util/mstime" +) + +func (x *BlockHeader) toAppMessage() (*appmessage.MsgBlockHeader, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BlockHeaderMessage is nil") + } + parents, err := protoParentsToDomain(x.Parents) + if err != nil { + return nil, err + } + hashMerkleRoot, err := x.HashMerkleRoot.toDomain() + if err != nil { + return nil, err + } + acceptedIDMerkleRoot, err := x.AcceptedIdMerkleRoot.toDomain() + if err != nil { + return nil, err + } + utxoCommitment, err := x.UtxoCommitment.toDomain() + if err != nil { + return nil, err + } + pruningPoint, err := x.PruningPoint.toDomain() + if err != nil { + return nil, err + } + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid block header version - bigger then uint16") + } + return &appmessage.MsgBlockHeader{ + Version: uint16(x.Version), + Parents: parents, + HashMerkleRoot: hashMerkleRoot, + AcceptedIDMerkleRoot: acceptedIDMerkleRoot, + UTXOCommitment: utxoCommitment, + Timestamp: mstime.UnixMilliseconds(x.Timestamp), + Bits: x.Bits, + Nonce: x.Nonce, + DAAScore: x.DaaScore, + BlueScore: x.BlueScore, + BlueWork: new(big.Int).SetBytes(x.BlueWork), + PruningPoint: pruningPoint, + }, nil +} + +func (x *BlockHeader) fromAppMessage(msgBlockHeader *appmessage.MsgBlockHeader) error { + *x = BlockHeader{ + Version: uint32(msgBlockHeader.Version), + Parents: domainParentsToProto(msgBlockHeader.Parents), + HashMerkleRoot: domainHashToProto(msgBlockHeader.HashMerkleRoot), + AcceptedIdMerkleRoot: domainHashToProto(msgBlockHeader.AcceptedIDMerkleRoot), + UtxoCommitment: domainHashToProto(msgBlockHeader.UTXOCommitment), + Timestamp: msgBlockHeader.Timestamp.UnixMilliseconds(), + Bits: msgBlockHeader.Bits, + Nonce: msgBlockHeader.Nonce, + DaaScore: msgBlockHeader.DAAScore, + BlueScore: msgBlockHeader.BlueScore, + BlueWork: msgBlockHeader.BlueWork.Bytes(), + PruningPoint: domainHashToProto(msgBlockHeader.PruningPoint), + } + return nil +} + +func (x *BlockLevelParents) toDomain() (externalapi.BlockLevelParents, error) { + if x == nil { + return nil, errors.Wrap(errorNil, "BlockLevelParents is nil") + } + domainBlockLevelParents := make(externalapi.BlockLevelParents, len(x.ParentHashes)) + for i, parentHash := range x.ParentHashes { + var err error + domainBlockLevelParents[i], err = externalapi.NewDomainHashFromByteSlice(parentHash.Bytes) + if err != nil { + return nil, err + } + } + return domainBlockLevelParents, nil +} + +func protoParentsToDomain(protoParents []*BlockLevelParents) ([]externalapi.BlockLevelParents, error) { + domainParents := make([]externalapi.BlockLevelParents, len(protoParents)) + for i, protoBlockLevelParents := range protoParents { + var err error + domainParents[i], err = protoBlockLevelParents.toDomain() + if err != nil { + return nil, err + } + } + return domainParents, nil +} + +func domainBlockLevelParentsToProto(parentHashes externalapi.BlockLevelParents) *BlockLevelParents { + protoParentHashes := make([]*Hash, len(parentHashes)) + for i, parentHash := range parentHashes { + protoParentHashes[i] = &Hash{Bytes: parentHash.ByteSlice()} + } + return &BlockLevelParents{ + ParentHashes: protoParentHashes, + } +} + +func domainParentsToProto(parents []externalapi.BlockLevelParents) []*BlockLevelParents { + protoParents := make([]*BlockLevelParents, len(parents)) + for i, hash := range parents { + protoParents[i] = domainBlockLevelParentsToProto(hash) + } + return protoParents +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block.go new file mode 100644 index 0000000..184583a --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block.go @@ -0,0 +1,22 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_IbdBlock) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_IbdBlock is nil") + } + msgBlock, err := x.IbdBlock.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.MsgIBDBlock{MsgBlock: msgBlock}, nil +} + +func (x *SpectredMessage_IbdBlock) fromAppMessage(msgIBDBlock *appmessage.MsgIBDBlock) error { + x.IbdBlock = new(BlockMessage) + return x.IbdBlock.fromAppMessage(msgIBDBlock.MsgBlock) +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator.go new file mode 100644 index 0000000..4a75969 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator.go @@ -0,0 +1,39 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_IbdBlockLocator) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_IbdBlockLocator is nil") + } + return x.IbdBlockLocator.toAppMessage() +} + +func (x *IbdBlockLocatorMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "IbdBlockLocatorMessage is nil") + } + targetHash, err := x.TargetHash.toDomain() + if err != nil { + return nil, err + } + blockLocatorHash, err := protoHashesToDomain(x.BlockLocatorHashes) + if err != nil { + return nil, err + } + return &appmessage.MsgIBDBlockLocator{ + TargetHash: targetHash, + BlockLocatorHashes: blockLocatorHash, + }, nil +} + +func (x *SpectredMessage_IbdBlockLocator) fromAppMessage(message *appmessage.MsgIBDBlockLocator) error { + x.IbdBlockLocator = &IbdBlockLocatorMessage{ + TargetHash: domainHashToProto(message.TargetHash), + BlockLocatorHashes: domainHashesToProto(message.BlockLocatorHashes), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash.go new file mode 100644 index 0000000..4b91bb0 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash.go @@ -0,0 +1,35 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_IbdBlockLocatorHighestHash) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_IbdBlockLocatorHighestHash is nil") + } + return x.IbdBlockLocatorHighestHash.toAppMessgage() +} + +func (x *IbdBlockLocatorHighestHashMessage) toAppMessgage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "IbdBlockLocatorHighestHashMessage is nil") + } + highestHash, err := x.HighestHash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgIBDBlockLocatorHighestHash{ + HighestHash: highestHash, + }, nil + +} + +func (x *SpectredMessage_IbdBlockLocatorHighestHash) fromAppMessage(message *appmessage.MsgIBDBlockLocatorHighestHash) error { + x.IbdBlockLocatorHighestHash = &IbdBlockLocatorHighestHashMessage{ + HighestHash: domainHashToProto(message.HighestHash), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash_not_found.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash_not_found.go new file mode 100644 index 0000000..47851d5 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_block_locator_highest_hash_not_found.go @@ -0,0 +1,18 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_IbdBlockLocatorHighestHashNotFound) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_IbdBlockLocatorHighestHashNotFound is nil") + } + return &appmessage.MsgIBDBlockLocatorHighestHashNotFound{}, nil +} + +func (x *SpectredMessage_IbdBlockLocatorHighestHashNotFound) fromAppMessage(message *appmessage.MsgIBDBlockLocatorHighestHashNotFound) error { + x.IbdBlockLocatorHighestHashNotFound = &IbdBlockLocatorHighestHashNotFoundMessage{} + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_chain_block_locator.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_chain_block_locator.go new file mode 100644 index 0000000..329aa24 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ibd_chain_block_locator.go @@ -0,0 +1,33 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_IbdChainBlockLocator) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_IbdChainBlockLocator is nil") + } + return x.IbdChainBlockLocator.toAppMessage() +} + +func (x *IbdChainBlockLocatorMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "IbdChainBlockLocatorMessage is nil") + } + blockLocatorHashes, err := protoHashesToDomain(x.BlockLocatorHashes) + if err != nil { + return nil, err + } + return &appmessage.MsgIBDChainBlockLocator{ + BlockLocatorHashes: blockLocatorHashes, + }, nil +} + +func (x *SpectredMessage_IbdChainBlockLocator) fromAppMessage(message *appmessage.MsgIBDChainBlockLocator) error { + x.IbdChainBlockLocator = &IbdChainBlockLocatorMessage{ + BlockLocatorHashes: domainHashesToProto(message.BlockLocatorHashes), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_relay_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_relay_block.go new file mode 100644 index 0000000..65f90f5 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_relay_block.go @@ -0,0 +1,33 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_InvRelayBlock) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_InvRelayBlock is nil") + } + return x.InvRelayBlock.toAppMessage() +} + +func (x *InvRelayBlockMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "InvRelayBlockMessage is nil") + } + hash, err := x.Hash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgInvRelayBlock{Hash: hash}, nil + +} + +func (x *SpectredMessage_InvRelayBlock) fromAppMessage(msgInvRelayBlock *appmessage.MsgInvRelayBlock) error { + x.InvRelayBlock = &InvRelayBlockMessage{ + Hash: domainHashToProto(msgInvRelayBlock.Hash), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_transactions.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_transactions.go new file mode 100644 index 0000000..1522476 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_inv_transactions.go @@ -0,0 +1,42 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_InvTransactions) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_InvTransactions is nil") + } + return x.InvTransactions.toAppMessage() +} + +func (x *InvTransactionsMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "InvTransactionsMessage is nil") + } + if len(x.Ids) > appmessage.MaxInvPerTxInvMsg { + return nil, errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(x.Ids), appmessage.MaxInvPerTxInvMsg) + } + + ids, err := protoTransactionIDsToDomain(x.Ids) + if err != nil { + return nil, err + } + return &appmessage.MsgInvTransaction{TxIDs: ids}, nil + +} + +func (x *SpectredMessage_InvTransactions) fromAppMessage(msgInvTransaction *appmessage.MsgInvTransaction) error { + if len(msgInvTransaction.TxIDs) > appmessage.MaxInvPerTxInvMsg { + return errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(msgInvTransaction.TxIDs), appmessage.MaxInvPerTxInvMsg) + } + + x.InvTransactions = &InvTransactionsMessage{ + Ids: wireTransactionIDsToProto(msgInvTransaction.TxIDs), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ping.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ping.go new file mode 100644 index 0000000..5b5bf30 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ping.go @@ -0,0 +1,29 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Ping) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Ping is nil") + } + return x.Ping.toAppMessage() +} + +func (x *PingMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "PingMessage is nil") + } + return &appmessage.MsgPing{ + Nonce: x.Nonce, + }, nil +} + +func (x *SpectredMessage_Ping) fromAppMessage(msgPing *appmessage.MsgPing) error { + x.Ping = &PingMessage{ + Nonce: msgPing.Nonce, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pong.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pong.go new file mode 100644 index 0000000..9b375e6 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pong.go @@ -0,0 +1,29 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Pong) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Pong is nil") + } + return x.Pong.toAppMessage() +} + +func (x *PongMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "PongMessage is nil") + } + return &appmessage.MsgPong{ + Nonce: x.Nonce, + }, nil +} + +func (x *SpectredMessage_Pong) fromAppMessage(msgPong *appmessage.MsgPong) error { + x.Pong = &PongMessage{ + Nonce: msgPong.Nonce, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_proof.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_proof.go new file mode 100644 index 0000000..6c2debb --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_proof.go @@ -0,0 +1,50 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_PruningPointProof) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_PruningPointProof is nil") + } + + if x.PruningPointProof == nil { + return nil, errors.Wrapf(errorNil, "x.PruningPointProof is nil") + } + + blockHeaders := make([][]*appmessage.MsgBlockHeader, len(x.PruningPointProof.Headers)) + for i, blockHeaderArray := range x.PruningPointProof.Headers { + blockHeaders[i] = make([]*appmessage.MsgBlockHeader, len(blockHeaderArray.Headers)) + for j, blockHeader := range blockHeaderArray.Headers { + var err error + blockHeaders[i][j], err = blockHeader.toAppMessage() + if err != nil { + return nil, err + } + } + } + return &appmessage.MsgPruningPointProof{ + Headers: blockHeaders, + }, nil +} + +func (x *SpectredMessage_PruningPointProof) fromAppMessage(msgPruningPointProof *appmessage.MsgPruningPointProof) error { + blockHeaders := make([]*PruningPointProofHeaderArray, len(msgPruningPointProof.Headers)) + for i, blockHeaderArray := range msgPruningPointProof.Headers { + blockHeaders[i] = &PruningPointProofHeaderArray{Headers: make([]*BlockHeader, len(blockHeaderArray))} + for j, blockHeader := range blockHeaderArray { + blockHeaders[i].Headers[j] = &BlockHeader{} + err := blockHeaders[i].Headers[j].fromAppMessage(blockHeader) + if err != nil { + return err + } + } + } + + x.PruningPointProof = &PruningPointProofMessage{ + Headers: blockHeaders, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_utxo_set_chunk.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_utxo_set_chunk.go new file mode 100644 index 0000000..b3784a2 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_point_utxo_set_chunk.go @@ -0,0 +1,70 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_PruningPointUtxoSetChunk) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_PruningPointUtxoSetChunk is nil") + } + outpointAndUTXOEntryPairs := make([]*appmessage.OutpointAndUTXOEntryPair, len(x.PruningPointUtxoSetChunk.OutpointAndUtxoEntryPairs)) + for i, outpointAndUTXOEntryPair := range x.PruningPointUtxoSetChunk.OutpointAndUtxoEntryPairs { + outpointEntryPairAppMessage, err := outpointAndUTXOEntryPair.toAppMessage() + if err != nil { + return nil, err + } + outpointAndUTXOEntryPairs[i] = outpointEntryPairAppMessage + } + return &appmessage.MsgPruningPointUTXOSetChunk{ + OutpointAndUTXOEntryPairs: outpointAndUTXOEntryPairs, + }, nil +} + +func (x *OutpointAndUtxoEntryPair) toAppMessage() (*appmessage.OutpointAndUTXOEntryPair, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "OutpointAndUtxoEntryPair is nil") + } + outpoint, err := x.Outpoint.toAppMessage() + if err != nil { + return nil, err + } + utxoEntry, err := x.UtxoEntry.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.OutpointAndUTXOEntryPair{ + Outpoint: outpoint, + UTXOEntry: utxoEntry, + }, nil +} + +func (x *SpectredMessage_PruningPointUtxoSetChunk) fromAppMessage(message *appmessage.MsgPruningPointUTXOSetChunk) error { + outpointAndUTXOEntryPairs := make([]*OutpointAndUtxoEntryPair, len(message.OutpointAndUTXOEntryPairs)) + for i, outpointAndUTXOEntryPair := range message.OutpointAndUTXOEntryPairs { + transactionID := domainTransactionIDToProto(&outpointAndUTXOEntryPair.Outpoint.TxID) + outpoint := &Outpoint{ + TransactionId: transactionID, + Index: outpointAndUTXOEntryPair.Outpoint.Index, + } + scriptPublicKey := &ScriptPublicKey{ + Script: outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey.Script, + Version: uint32(outpointAndUTXOEntryPair.UTXOEntry.ScriptPublicKey.Version), + } + utxoEntry := &UtxoEntry{ + Amount: outpointAndUTXOEntryPair.UTXOEntry.Amount, + ScriptPublicKey: scriptPublicKey, + BlockDaaScore: outpointAndUTXOEntryPair.UTXOEntry.BlockDAAScore, + IsCoinbase: outpointAndUTXOEntryPair.UTXOEntry.IsCoinbase, + } + outpointAndUTXOEntryPairs[i] = &OutpointAndUtxoEntryPair{ + Outpoint: outpoint, + UtxoEntry: utxoEntry, + } + } + x.PruningPointUtxoSetChunk = &PruningPointUtxoSetChunkMessage{ + OutpointAndUtxoEntryPairs: outpointAndUTXOEntryPairs, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_points.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_points.go new file mode 100644 index 0000000..c8a5d87 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_pruning_points.go @@ -0,0 +1,44 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_PruningPoints) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_PruningPoints is nil") + } + + if x.PruningPoints == nil { + return nil, errors.Wrapf(errorNil, "x.PruningPoints is nil") + } + + blockHeaders := make([]*appmessage.MsgBlockHeader, len(x.PruningPoints.Headers)) + for i, blockHeader := range x.PruningPoints.Headers { + var err error + blockHeaders[i], err = blockHeader.toAppMessage() + if err != nil { + return nil, err + } + } + return &appmessage.MsgPruningPoints{ + Headers: blockHeaders, + }, nil +} + +func (x *SpectredMessage_PruningPoints) fromAppMessage(msgPruningPoints *appmessage.MsgPruningPoints) error { + blockHeaders := make([]*BlockHeader, len(msgPruningPoints.Headers)) + for i, blockHeader := range msgPruningPoints.Headers { + blockHeaders[i] = &BlockHeader{} + err := blockHeaders[i].fromAppMessage(blockHeader) + if err != nil { + return err + } + } + + x.PruningPoints = &PruningPointsMessage{ + Headers: blockHeaders, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ready.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ready.go new file mode 100644 index 0000000..fda28bc --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_ready.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Ready) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Ready is nil") + } + return &appmessage.MsgReady{}, nil +} + +func (x *SpectredMessage_Ready) fromAppMessage(_ *appmessage.MsgReady) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_reject.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_reject.go new file mode 100644 index 0000000..2fe7e44 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_reject.go @@ -0,0 +1,29 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Reject) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Reject is nil") + } + return x.Reject.toAppMessage() +} + +func (x *RejectMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RejectMessage is nil") + } + return &appmessage.MsgReject{ + Reason: x.Reason, + }, nil +} + +func (x *SpectredMessage_Reject) fromAppMessage(msgReject *appmessage.MsgReject) error { + x.Reject = &RejectMessage{ + Reason: msgReject.Reason, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_addresses.go new file mode 100644 index 0000000..91ca2ce --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_addresses.go @@ -0,0 +1,38 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestAddresses) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestAddresses is nil") + } + return x.RequestAddresses.toAppMessage() +} + +func (x *RequestAddressesMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestAddressesMessage is nil") + } + subnetworkID, err := x.SubnetworkId.toDomain() + // Full spectre nodes set SubnetworkId==nil + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + return &appmessage.MsgRequestAddresses{ + IncludeAllSubnetworks: x.IncludeAllSubnetworks, + SubnetworkID: subnetworkID, + }, nil + +} + +func (x *SpectredMessage_RequestAddresses) fromAppMessage(msgGetAddresses *appmessage.MsgRequestAddresses) error { + x.RequestAddresses = &RequestAddressesMessage{ + IncludeAllSubnetworks: msgGetAddresses.IncludeAllSubnetworks, + SubnetworkId: domainSubnetworkIDToProto(msgGetAddresses.SubnetworkID), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_anticone.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_anticone.go new file mode 100644 index 0000000..aaabab8 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_anticone.go @@ -0,0 +1,42 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestAnticone) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestAnticone is nil") + } + return x.RequestAnticone.toAppMessage() +} + +func (x *RequestAnticoneMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestAnticoneMessage is nil") + } + blockHash, err := x.BlockHash.toDomain() + if err != nil { + return nil, err + } + + contextHash, err := x.ContextHash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgRequestAnticone{ + BlockHash: blockHash, + ContextHash: contextHash, + }, nil + +} + +func (x *SpectredMessage_RequestAnticone) fromAppMessage(msgRequestPastDiff *appmessage.MsgRequestAnticone) error { + x.RequestAnticone = &RequestAnticoneMessage{ + BlockHash: domainHashToProto(msgRequestPastDiff.BlockHash), + ContextHash: domainHashToProto(msgRequestPastDiff.ContextHash), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_block_locator.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_block_locator.go new file mode 100644 index 0000000..d7ac33d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_block_locator.go @@ -0,0 +1,39 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestBlockLocator) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestBlockLocator is nil") + } + return x.RequestBlockLocator.toAppMessage() +} + +func (x *RequestBlockLocatorMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestBlockLocatorMessage is nil") + } + + highHash, err := x.HighHash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgRequestBlockLocator{ + HighHash: highHash, + Limit: x.Limit, + }, nil + +} + +func (x *SpectredMessage_RequestBlockLocator) fromAppMessage(msgGetBlockLocator *appmessage.MsgRequestBlockLocator) error { + x.RequestBlockLocator = &RequestBlockLocatorMessage{ + HighHash: domainHashToProto(msgGetBlockLocator.HighHash), + Limit: msgGetBlockLocator.Limit, + } + + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_headers.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_headers.go new file mode 100644 index 0000000..92a631f --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_headers.go @@ -0,0 +1,54 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestHeaders) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestBlockLocator is nil") + } + lowHash, err := x.RequestHeaders.LowHash.toDomain() + if err != nil { + return nil, err + } + + highHash, err := x.RequestHeaders.HighHash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgRequestHeaders{ + LowHash: lowHash, + HighHash: highHash, + }, nil +} +func (x *RequestHeadersMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestHeadersMessage is nil") + } + lowHash, err := x.LowHash.toDomain() + if err != nil { + return nil, err + } + + highHash, err := x.HighHash.toDomain() + if err != nil { + return nil, err + } + + return &appmessage.MsgRequestHeaders{ + LowHash: lowHash, + HighHash: highHash, + }, nil + +} + +func (x *SpectredMessage_RequestHeaders) fromAppMessage(msgRequestHeaders *appmessage.MsgRequestHeaders) error { + x.RequestHeaders = &RequestHeadersMessage{ + LowHash: domainHashToProto(msgRequestHeaders.LowHash), + HighHash: domainHashToProto(msgRequestHeaders.HighHash), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_blocks.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_blocks.go new file mode 100644 index 0000000..42999de --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_blocks.go @@ -0,0 +1,32 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestIBDBlocks) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestIBDBlocks is nil") + } + return x.RequestIBDBlocks.toAppMessage() +} + +func (x *RequestIBDBlocksMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestIBDBlocksMessage is nil") + } + hashes, err := protoHashesToDomain(x.Hashes) + if err != nil { + return nil, err + } + return &appmessage.MsgRequestIBDBlocks{Hashes: hashes}, nil +} + +func (x *SpectredMessage_RequestIBDBlocks) fromAppMessage(msgRequestIBDBlocks *appmessage.MsgRequestIBDBlocks) error { + x.RequestIBDBlocks = &RequestIBDBlocksMessage{ + Hashes: domainHashesToProto(msgRequestIBDBlocks.Hashes), + } + + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_chain_block_locator.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_chain_block_locator.go new file mode 100644 index 0000000..ed947aa --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_ibd_chain_block_locator.go @@ -0,0 +1,55 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (x *SpectredMessage_RequestIBDChainBlockLocator) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestIBDChainBlockLocator is nil") + } + return x.RequestIBDChainBlockLocator.toAppMessage() +} + +func (x *RequestIBDChainBlockLocatorMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestIBDChainBlockLocatorMessage is nil") + } + var err error + var highHash, lowHash *externalapi.DomainHash + if x.HighHash != nil { + highHash, err = x.HighHash.toDomain() + if err != nil { + return nil, err + } + } + if x.LowHash != nil { + lowHash, err = x.LowHash.toDomain() + if err != nil { + return nil, err + } + } + return &appmessage.MsgRequestIBDChainBlockLocator{ + HighHash: highHash, + LowHash: lowHash, + }, nil + +} + +func (x *SpectredMessage_RequestIBDChainBlockLocator) fromAppMessage(msgGetBlockLocator *appmessage.MsgRequestIBDChainBlockLocator) error { + var highHash, lowHash *Hash + if msgGetBlockLocator.HighHash != nil { + highHash = domainHashToProto(msgGetBlockLocator.HighHash) + } + if msgGetBlockLocator.LowHash != nil { + lowHash = domainHashToProto(msgGetBlockLocator.LowHash) + } + x.RequestIBDChainBlockLocator = &RequestIBDChainBlockLocatorMessage{ + HighHash: highHash, + LowHash: lowHash, + } + + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_headers.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_headers.go new file mode 100644 index 0000000..18a8cf9 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_headers.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestNextHeaders) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestNextHeaders is nil") + } + return &appmessage.MsgRequestNextHeaders{}, nil +} + +func (x *SpectredMessage_RequestNextHeaders) fromAppMessage(_ *appmessage.MsgRequestNextHeaders) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_and_its_anticone_blocks.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_and_its_anticone_blocks.go new file mode 100644 index 0000000..6673ac8 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_and_its_anticone_blocks.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_DonePruningPointAndItsAnticoneBlocks is nil") + } + return &appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks{}, nil +} + +func (x *SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks) fromAppMessage(_ *appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_utxo_set_chunk.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_utxo_set_chunk.go new file mode 100644 index 0000000..68bdc97 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_next_pruning_point_utxo_set_chunk.go @@ -0,0 +1,18 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestNextPruningPointUtxoSetChunk) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestNextPruningPointUtxoSetChunk is nil") + } + return &appmessage.MsgRequestNextPruningPointUTXOSetChunk{}, nil +} + +func (x *SpectredMessage_RequestNextPruningPointUtxoSetChunk) fromAppMessage(_ *appmessage.MsgRequestNextPruningPointUTXOSetChunk) error { + x.RequestNextPruningPointUtxoSetChunk = &RequestNextPruningPointUtxoSetChunkMessage{} + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_and_its_anticone.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_and_its_anticone.go new file mode 100644 index 0000000..516e397 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_and_its_anticone.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestPruningPointAndItsAnticone) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestPruningPointAndItsAnticone is nil") + } + return &appmessage.MsgRequestPruningPointAndItsAnticone{}, nil +} + +func (x *SpectredMessage_RequestPruningPointAndItsAnticone) fromAppMessage(_ *appmessage.MsgRequestPruningPointAndItsAnticone) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_proof.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_proof.go new file mode 100644 index 0000000..4da6ab1 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_proof.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestPruningPointProof) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestPruningPointProof is nil") + } + return &appmessage.MsgRequestPruningPointProof{}, nil +} + +func (x *SpectredMessage_RequestPruningPointProof) fromAppMessage(_ *appmessage.MsgRequestPruningPointProof) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_utxo_set_and_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_utxo_set_and_block.go new file mode 100644 index 0000000..d06ac3d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_pruning_point_utxo_set_and_block.go @@ -0,0 +1,32 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestPruningPointUTXOSet) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestPruningPointUTXOSet is nil") + } + return x.RequestPruningPointUTXOSet.toAppMessage() +} + +func (x *RequestPruningPointUTXOSetMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestPruningPointUTXOSetMessage is nil") + } + pruningPointHash, err := x.PruningPointHash.toDomain() + if err != nil { + return nil, err + } + return &appmessage.MsgRequestPruningPointUTXOSet{PruningPointHash: pruningPointHash}, nil +} + +func (x *SpectredMessage_RequestPruningPointUTXOSet) fromAppMessage( + msgRequestPruningPointUTXOSet *appmessage.MsgRequestPruningPointUTXOSet) error { + + x.RequestPruningPointUTXOSet = &RequestPruningPointUTXOSetMessage{} + x.RequestPruningPointUTXOSet.PruningPointHash = domainHashToProto(msgRequestPruningPointUTXOSet.PruningPointHash) + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_relay_blocks.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_relay_blocks.go new file mode 100644 index 0000000..e4e8b3e --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_relay_blocks.go @@ -0,0 +1,41 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestRelayBlocks) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestRelayBlocks is nil") + } + return x.RequestRelayBlocks.toAppMessage() +} + +func (x *RequestRelayBlocksMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestRelayBlocksMessage is nil") + } + if len(x.Hashes) > appmessage.MaxRequestRelayBlocksHashes { + return nil, errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(x.Hashes), appmessage.MaxRequestRelayBlocksHashes) + } + hashes, err := protoHashesToDomain(x.Hashes) + if err != nil { + return nil, err + } + return &appmessage.MsgRequestRelayBlocks{Hashes: hashes}, nil + +} + +func (x *SpectredMessage_RequestRelayBlocks) fromAppMessage(msgGetRelayBlocks *appmessage.MsgRequestRelayBlocks) error { + if len(msgGetRelayBlocks.Hashes) > appmessage.MaxRequestRelayBlocksHashes { + return errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(msgGetRelayBlocks.Hashes), appmessage.MaxRequestRelayBlocksHashes) + } + + x.RequestRelayBlocks = &RequestRelayBlocksMessage{ + Hashes: domainHashesToProto(msgGetRelayBlocks.Hashes), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_transactions.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_transactions.go new file mode 100644 index 0000000..5c57089 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_request_transactions.go @@ -0,0 +1,40 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_RequestTransactions) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_RequestTransactions is nil") + } + return x.RequestTransactions.toAppMessage() +} + +func (x *RequestTransactionsMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RequestTransactionsMessage is nil") + } + if len(x.Ids) > appmessage.MaxInvPerRequestTransactionsMsg { + return nil, errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(x.Ids), appmessage.MaxInvPerRequestTransactionsMsg) + } + ids, err := protoTransactionIDsToDomain(x.Ids) + if err != nil { + return nil, err + } + return &appmessage.MsgRequestTransactions{IDs: ids}, nil +} + +func (x *SpectredMessage_RequestTransactions) fromAppMessage(msgGetTransactions *appmessage.MsgRequestTransactions) error { + if len(msgGetTransactions.IDs) > appmessage.MaxInvPerRequestTransactionsMsg { + return errors.Errorf("too many hashes for message "+ + "[count %d, max %d]", len(x.RequestTransactions.Ids), appmessage.MaxInvPerRequestTransactionsMsg) + } + + x.RequestTransactions = &RequestTransactionsMessage{ + Ids: wireTransactionIDsToProto(msgGetTransactions.IDs), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction.go new file mode 100644 index 0000000..1b3404a --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction.go @@ -0,0 +1,126 @@ +package protowire + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Transaction) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Transaction is nil") + } + return x.Transaction.toAppMessage() +} + +func (x *SpectredMessage_Transaction) fromAppMessage(msgTx *appmessage.MsgTx) error { + x.Transaction = new(TransactionMessage) + x.Transaction.fromAppMessage(msgTx) + return nil +} + +func (x *TransactionMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "TransactionMessage is nil") + } + inputs := make([]*appmessage.TxIn, len(x.Inputs)) + for i, protoInput := range x.Inputs { + input, err := protoInput.toAppMessage() + if err != nil { + return nil, err + } + inputs[i] = input + } + + outputs := make([]*appmessage.TxOut, len(x.Outputs)) + for i, protoOutput := range x.Outputs { + output, err := protoOutput.toAppMessage() + if err != nil { + return nil, err + } + outputs[i] = output + } + + subnetworkID, err := x.SubnetworkId.toDomain() + if err != nil { + return nil, err + } + + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid transaction version - bigger then uint16") + } + return &appmessage.MsgTx{ + Version: uint16(x.Version), + TxIn: inputs, + TxOut: outputs, + LockTime: x.LockTime, + SubnetworkID: *subnetworkID, + Gas: x.Gas, + Payload: x.Payload, + }, nil +} + +func (x *TransactionInput) toAppMessage() (*appmessage.TxIn, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "TransactionInput is nil") + } + if x.SigOpCount > math.MaxUint8 { + return nil, errors.New("TransactionInput SigOpCount > math.MaxUint8") + } + outpoint, err := x.PreviousOutpoint.toAppMessage() + if err != nil { + return nil, err + } + return appmessage.NewTxIn(outpoint, x.SignatureScript, x.Sequence, byte(x.SigOpCount)), nil +} + +func (x *TransactionOutput) toAppMessage() (*appmessage.TxOut, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "TransactionOutput is nil") + } + scriptPublicKey, err := x.ScriptPublicKey.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.TxOut{ + Value: x.Value, + ScriptPubKey: scriptPublicKey, + }, nil +} + +func (x *TransactionMessage) fromAppMessage(msgTx *appmessage.MsgTx) { + protoInputs := make([]*TransactionInput, len(msgTx.TxIn)) + for i, input := range msgTx.TxIn { + protoInputs[i] = &TransactionInput{ + PreviousOutpoint: &Outpoint{ + TransactionId: domainTransactionIDToProto(&input.PreviousOutpoint.TxID), + Index: input.PreviousOutpoint.Index, + }, + SignatureScript: input.SignatureScript, + Sequence: input.Sequence, + SigOpCount: uint32(input.SigOpCount), + } + } + + protoOutputs := make([]*TransactionOutput, len(msgTx.TxOut)) + for i, output := range msgTx.TxOut { + protoOutputs[i] = &TransactionOutput{ + Value: output.Value, + ScriptPublicKey: &ScriptPublicKey{ + Script: output.ScriptPubKey.Script, + Version: uint32(output.ScriptPubKey.Version), + }, + } + } + + *x = TransactionMessage{ + Version: uint32(msgTx.Version), + Inputs: protoInputs, + Outputs: protoOutputs, + LockTime: msgTx.LockTime, + SubnetworkId: domainSubnetworkIDToProto(&msgTx.SubnetworkID), + Gas: msgTx.Gas, + Payload: msgTx.Payload, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction_not_found.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction_not_found.go new file mode 100644 index 0000000..4c9dbd5 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_transaction_not_found.go @@ -0,0 +1,31 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_TransactionNotFound) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_TransactionNotFound is nil") + } + return x.TransactionNotFound.toAppMessage() +} + +func (x *TransactionNotFoundMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "TransactionNotFoundMessage is nil") + } + id, err := x.Id.toDomain() + if err != nil { + return nil, err + } + return appmessage.NewMsgTransactionNotFound(id), nil +} + +func (x *SpectredMessage_TransactionNotFound) fromAppMessage(msgTransactionsNotFound *appmessage.MsgTransactionNotFound) error { + x.TransactionNotFound = &TransactionNotFoundMessage{ + Id: domainTransactionIDToProto(msgTransactionsNotFound.ID), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_trusted_data.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_trusted_data.go new file mode 100644 index 0000000..f553f48 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_trusted_data.go @@ -0,0 +1,70 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_TrustedData) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_TrustedDataMessage is nil") + } + + daaWindow := make([]*appmessage.TrustedDataDAAHeader, len(x.TrustedData.DaaWindow)) + for i, daaBlock := range x.TrustedData.DaaWindow { + var err error + daaWindow[i], err = daaBlock.toAppMessage() + if err != nil { + return nil, err + } + } + + ghostdagData := make([]*appmessage.BlockGHOSTDAGDataHashPair, len(x.TrustedData.GhostdagData)) + for i, pair := range x.TrustedData.GhostdagData { + hash, err := pair.Hash.toDomain() + if err != nil { + return nil, err + } + + data, err := pair.GhostdagData.toAppMessage() + if err != nil { + return nil, err + } + + ghostdagData[i] = &appmessage.BlockGHOSTDAGDataHashPair{ + Hash: hash, + GHOSTDAGData: data, + } + } + + return &appmessage.MsgTrustedData{ + DAAWindow: daaWindow, + GHOSTDAGData: ghostdagData, + }, nil +} + +func (x *SpectredMessage_TrustedData) fromAppMessage(msgTrustedData *appmessage.MsgTrustedData) error { + x.TrustedData = &TrustedDataMessage{ + DaaWindow: make([]*DaaBlockV4, len(msgTrustedData.DAAWindow)), + GhostdagData: make([]*BlockGhostdagDataHashPair, len(msgTrustedData.GHOSTDAGData)), + } + + for i, daaBlock := range msgTrustedData.DAAWindow { + x.TrustedData.DaaWindow[i] = &DaaBlockV4{} + err := x.TrustedData.DaaWindow[i].fromAppMessage(daaBlock) + if err != nil { + return err + } + } + + for i, pair := range msgTrustedData.GHOSTDAGData { + x.TrustedData.GhostdagData[i] = &BlockGhostdagDataHashPair{ + Hash: domainHashToProto(pair.Hash), + GhostdagData: &GhostdagData{}, + } + + x.TrustedData.GhostdagData[i].GhostdagData.fromAppMessage(pair.GHOSTDAGData) + } + + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_unexpected_pruning_point.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_unexpected_pruning_point.go new file mode 100644 index 0000000..0665273 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_unexpected_pruning_point.go @@ -0,0 +1,11 @@ +package protowire + +import "github.com/spectre-project/spectred/app/appmessage" + +func (x *SpectredMessage_UnexpectedPruningPoint) toAppMessage() (appmessage.Message, error) { + return &appmessage.MsgUnexpectedPruningPoint{}, nil +} + +func (x *SpectredMessage_UnexpectedPruningPoint) fromAppMessage(_ *appmessage.MsgUnexpectedPruningPoint) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_verack.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_verack.go new file mode 100644 index 0000000..2c169d0 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_verack.go @@ -0,0 +1,17 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_Verack) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Verack is nil") + } + return &appmessage.MsgVerAck{}, nil +} + +func (x *SpectredMessage_Verack) fromAppMessage(_ *appmessage.MsgVerAck) error { + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_version.go b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_version.go new file mode 100644 index 0000000..fda44a4 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/p2p_version.go @@ -0,0 +1,89 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + "github.com/spectre-project/spectred/util/mstime" +) + +func (x *SpectredMessage_Version) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_Version is nil") + } + return x.Version.toAppMessage() +} + +func (x *VersionMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "VersionMessage is nil") + } + address, err := x.Address.toAppMessage() + // Address is optional for non-listening nodes + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + subnetworkID, err := x.SubnetworkId.toDomain() + // Full spectre nodes set SubnetworkId==nil + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + err = appmessage.ValidateUserAgent(x.UserAgent) + if err != nil { + return nil, err + } + + if x.Id == nil { + return nil, errors.Wrapf(errorNil, "VersionMessage.Id is nil") + } + + appMsgID, err := id.FromBytes(x.Id) + if err != nil { + return nil, err + } + + return &appmessage.MsgVersion{ + ProtocolVersion: x.ProtocolVersion, + Network: x.Network, + Services: appmessage.ServiceFlag(x.Services), + Timestamp: mstime.UnixMilliseconds(x.Timestamp), + Address: address, + ID: appMsgID, + UserAgent: x.UserAgent, + DisableRelayTx: x.DisableRelayTx, + SubnetworkID: subnetworkID, + }, nil +} + +func (x *SpectredMessage_Version) fromAppMessage(msgVersion *appmessage.MsgVersion) error { + err := appmessage.ValidateUserAgent(msgVersion.UserAgent) + if err != nil { + return err + } + + versionID, err := msgVersion.ID.SerializeToBytes() + if err != nil { + return err + } + + // Address is optional for non-listening nodes + var address *NetAddress + if msgVersion.Address != nil { + address = appMessageNetAddressToProto(msgVersion.Address) + } + + x.Version = &VersionMessage{ + ProtocolVersion: msgVersion.ProtocolVersion, + Network: msgVersion.Network, + Services: uint64(msgVersion.Services), + Timestamp: msgVersion.Timestamp.UnixMilliseconds(), + Address: address, + Id: versionID, + UserAgent: msgVersion.UserAgent, + DisableRelayTx: msgVersion.DisableRelayTx, + SubnetworkId: domainSubnetworkIDToProto(msgVersion.SubnetworkID), + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.md b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.md new file mode 100644 index 0000000..4c97ba6 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.md @@ -0,0 +1,1253 @@ +# Protocol Documentation + + + +## Table of Contents + +- [rpc.proto](#rpc.proto) + - [RPCError](#protowire.RPCError) + - [RpcBlock](#protowire.RpcBlock) + - [RpcBlockHeader](#protowire.RpcBlockHeader) + - [RpcBlockLevelParents](#protowire.RpcBlockLevelParents) + - [RpcBlockVerboseData](#protowire.RpcBlockVerboseData) + - [RpcTransaction](#protowire.RpcTransaction) + - [RpcTransactionInput](#protowire.RpcTransactionInput) + - [RpcScriptPublicKey](#protowire.RpcScriptPublicKey) + - [RpcTransactionOutput](#protowire.RpcTransactionOutput) + - [RpcOutpoint](#protowire.RpcOutpoint) + - [RpcUtxoEntry](#protowire.RpcUtxoEntry) + - [RpcTransactionVerboseData](#protowire.RpcTransactionVerboseData) + - [RpcTransactionInputVerboseData](#protowire.RpcTransactionInputVerboseData) + - [RpcTransactionOutputVerboseData](#protowire.RpcTransactionOutputVerboseData) + - [GetCurrentNetworkRequestMessage](#protowire.GetCurrentNetworkRequestMessage) + - [GetCurrentNetworkResponseMessage](#protowire.GetCurrentNetworkResponseMessage) + - [SubmitBlockRequestMessage](#protowire.SubmitBlockRequestMessage) + - [SubmitBlockResponseMessage](#protowire.SubmitBlockResponseMessage) + - [GetBlockTemplateRequestMessage](#protowire.GetBlockTemplateRequestMessage) + - [GetBlockTemplateResponseMessage](#protowire.GetBlockTemplateResponseMessage) + - [NotifyBlockAddedRequestMessage](#protowire.NotifyBlockAddedRequestMessage) + - [NotifyBlockAddedResponseMessage](#protowire.NotifyBlockAddedResponseMessage) + - [BlockAddedNotificationMessage](#protowire.BlockAddedNotificationMessage) + - [GetPeerAddressesRequestMessage](#protowire.GetPeerAddressesRequestMessage) + - [GetPeerAddressesResponseMessage](#protowire.GetPeerAddressesResponseMessage) + - [GetPeerAddressesKnownAddressMessage](#protowire.GetPeerAddressesKnownAddressMessage) + - [GetSelectedTipHashRequestMessage](#protowire.GetSelectedTipHashRequestMessage) + - [GetSelectedTipHashResponseMessage](#protowire.GetSelectedTipHashResponseMessage) + - [GetMempoolEntryRequestMessage](#protowire.GetMempoolEntryRequestMessage) + - [GetMempoolEntryResponseMessage](#protowire.GetMempoolEntryResponseMessage) + - [GetMempoolEntriesRequestMessage](#protowire.GetMempoolEntriesRequestMessage) + - [GetMempoolEntriesResponseMessage](#protowire.GetMempoolEntriesResponseMessage) + - [MempoolEntry](#protowire.MempoolEntry) + - [GetConnectedPeerInfoRequestMessage](#protowire.GetConnectedPeerInfoRequestMessage) + - [GetConnectedPeerInfoResponseMessage](#protowire.GetConnectedPeerInfoResponseMessage) + - [GetConnectedPeerInfoMessage](#protowire.GetConnectedPeerInfoMessage) + - [AddPeerRequestMessage](#protowire.AddPeerRequestMessage) + - [AddPeerResponseMessage](#protowire.AddPeerResponseMessage) + - [SubmitTransactionRequestMessage](#protowire.SubmitTransactionRequestMessage) + - [SubmitTransactionResponseMessage](#protowire.SubmitTransactionResponseMessage) + - [NotifyVirtualSelectedParentChainChangedRequestMessage](#protowire.NotifyVirtualSelectedParentChainChangedRequestMessage) + - [NotifyVirtualSelectedParentChainChangedResponseMessage](#protowire.NotifyVirtualSelectedParentChainChangedResponseMessage) + - [VirtualSelectedParentChainChangedNotificationMessage](#protowire.VirtualSelectedParentChainChangedNotificationMessage) + - [GetBlockRequestMessage](#protowire.GetBlockRequestMessage) + - [GetBlockResponseMessage](#protowire.GetBlockResponseMessage) + - [GetSubnetworkRequestMessage](#protowire.GetSubnetworkRequestMessage) + - [GetSubnetworkResponseMessage](#protowire.GetSubnetworkResponseMessage) + - [GetVirtualSelectedParentChainFromBlockRequestMessage](#protowire.GetVirtualSelectedParentChainFromBlockRequestMessage) + - [AcceptedTransactionIds](#protowire.AcceptedTransactionIds) + - [GetVirtualSelectedParentChainFromBlockResponseMessage](#protowire.GetVirtualSelectedParentChainFromBlockResponseMessage) + - [GetBlocksRequestMessage](#protowire.GetBlocksRequestMessage) + - [GetBlocksResponseMessage](#protowire.GetBlocksResponseMessage) + - [GetBlockCountRequestMessage](#protowire.GetBlockCountRequestMessage) + - [GetBlockCountResponseMessage](#protowire.GetBlockCountResponseMessage) + - [GetBlockDagInfoRequestMessage](#protowire.GetBlockDagInfoRequestMessage) + - [GetBlockDagInfoResponseMessage](#protowire.GetBlockDagInfoResponseMessage) + - [ResolveFinalityConflictRequestMessage](#protowire.ResolveFinalityConflictRequestMessage) + - [ResolveFinalityConflictResponseMessage](#protowire.ResolveFinalityConflictResponseMessage) + - [NotifyFinalityConflictsRequestMessage](#protowire.NotifyFinalityConflictsRequestMessage) + - [NotifyFinalityConflictsResponseMessage](#protowire.NotifyFinalityConflictsResponseMessage) + - [FinalityConflictNotificationMessage](#protowire.FinalityConflictNotificationMessage) + - [FinalityConflictResolvedNotificationMessage](#protowire.FinalityConflictResolvedNotificationMessage) + - [ShutDownRequestMessage](#protowire.ShutDownRequestMessage) + - [ShutDownResponseMessage](#protowire.ShutDownResponseMessage) + - [GetHeadersRequestMessage](#protowire.GetHeadersRequestMessage) + - [GetHeadersResponseMessage](#protowire.GetHeadersResponseMessage) + - [NotifyUtxosChangedRequestMessage](#protowire.NotifyUtxosChangedRequestMessage) + - [NotifyUtxosChangedResponseMessage](#protowire.NotifyUtxosChangedResponseMessage) + - [UtxosChangedNotificationMessage](#protowire.UtxosChangedNotificationMessage) + - [UtxosByAddressesEntry](#protowire.UtxosByAddressesEntry) + - [StopNotifyingUtxosChangedRequestMessage](#protowire.StopNotifyingUtxosChangedRequestMessage) + - [StopNotifyingUtxosChangedResponseMessage](#protowire.StopNotifyingUtxosChangedResponseMessage) + - [GetUtxosByAddressesRequestMessage](#protowire.GetUtxosByAddressesRequestMessage) + - [GetUtxosByAddressesResponseMessage](#protowire.GetUtxosByAddressesResponseMessage) + - [GetBalanceByAddressRequestMessage](#protowire.GetBalanceByAddressRequestMessage) + - [GetBalanceByAddressResponseMessage](#protowire.GetBalanceByAddressResponseMessage) + - [GetBalancesByAddressesRequestMessage](#protowire.GetBalancesByAddressesRequestMessage) + - [BalancesByAddressEntry](#protowire.BalancesByAddressEntry) + - [GetBalancesByAddressesResponseMessage](#protowire.GetBalancesByAddressesResponseMessage) + - [GetVirtualSelectedParentBlueScoreRequestMessage](#protowire.GetVirtualSelectedParentBlueScoreRequestMessage) + - [GetVirtualSelectedParentBlueScoreResponseMessage](#protowire.GetVirtualSelectedParentBlueScoreResponseMessage) + - [NotifyVirtualSelectedParentBlueScoreChangedRequestMessage](#protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) + - [NotifyVirtualSelectedParentBlueScoreChangedResponseMessage](#protowire.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) + - [VirtualSelectedParentBlueScoreChangedNotificationMessage](#protowire.VirtualSelectedParentBlueScoreChangedNotificationMessage) + - [NotifyVirtualDaaScoreChangedRequestMessage](#protowire.NotifyVirtualDaaScoreChangedRequestMessage) + - [NotifyVirtualDaaScoreChangedResponseMessage](#protowire.NotifyVirtualDaaScoreChangedResponseMessage) + - [VirtualDaaScoreChangedNotificationMessage](#protowire.VirtualDaaScoreChangedNotificationMessage) + - [NotifyPruningPointUTXOSetOverrideRequestMessage](#protowire.NotifyPruningPointUTXOSetOverrideRequestMessage) + - [NotifyPruningPointUTXOSetOverrideResponseMessage](#protowire.NotifyPruningPointUTXOSetOverrideResponseMessage) + - [PruningPointUTXOSetOverrideNotificationMessage](#protowire.PruningPointUTXOSetOverrideNotificationMessage) + - [StopNotifyingPruningPointUTXOSetOverrideRequestMessage](#protowire.StopNotifyingPruningPointUTXOSetOverrideRequestMessage) + - [StopNotifyingPruningPointUTXOSetOverrideResponseMessage](#protowire.StopNotifyingPruningPointUTXOSetOverrideResponseMessage) + - [BanRequestMessage](#protowire.BanRequestMessage) + - [BanResponseMessage](#protowire.BanResponseMessage) + - [UnbanRequestMessage](#protowire.UnbanRequestMessage) + - [UnbanResponseMessage](#protowire.UnbanResponseMessage) + - [GetInfoRequestMessage](#protowire.GetInfoRequestMessage) + - [GetInfoResponseMessage](#protowire.GetInfoResponseMessage) + - [EstimateNetworkHashesPerSecondRequestMessage](#protowire.EstimateNetworkHashesPerSecondRequestMessage) + - [EstimateNetworkHashesPerSecondResponseMessage](#protowire.EstimateNetworkHashesPerSecondResponseMessage) + - [NotifyNewBlockTemplateRequestMessage](#protowire.NotifyNewBlockTemplateRequestMessage) + - [NotifyNewBlockTemplateResponseMessage](#protowire.NotifyNewBlockTemplateResponseMessage) + - [NewBlockTemplateNotificationMessage](#protowire.NewBlockTemplateNotificationMessage) + - [MempoolEntryByAddress](#protowire.MempoolEntryByAddress) + - [GetMempoolEntriesByAddressesRequestMessage](#protowire.GetMempoolEntriesByAddressesRequestMessage) + - [GetMempoolEntriesByAddressesResponseMessage](#protowire.GetMempoolEntriesByAddressesResponseMessage) + - [GetCoinSupplyRequestMessage](#protowire.GetCoinSupplyRequestMessage) + - [GetCoinSupplyResponseMessage](#protowire.GetCoinSupplyResponseMessage) + - [SubmitBlockResponseMessage.RejectReason](#protowire.SubmitBlockResponseMessage.RejectReason) +- [Scalar Value Types](#scalar-value-types) + + +

Top

+ +## rpc.proto + +RPC-related types. Request messages, response messages, and dependant +types. + +Clients are expected to build RequestMessages and wrap them in +SpectredMessage. (see messages.proto) + +Having received a RequestMessage, (wrapped in a SpectredMessage) the +RPC server will respond with a ResponseMessage (likewise wrapped in a +SpectredMessage) respective to the original RequestMessage. + +**IMPORTANT:** This API is a work in progress and is subject to break +between versions. + + + +### RPCError + +RPCError represents a generic non-internal error. + +Receivers of any ResponseMessage are expected to check whether its +error field is not null. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| message | [string](#string) | | | + + + +### RpcBlock + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| header | [RpcBlockHeader](#protowire.RpcBlockHeader) | | | +| transactions | [RpcTransaction](#protowire.RpcTransaction) | repeated | | +| verboseData | [RpcBlockVerboseData](#protowire.RpcBlockVerboseData) | | | + + + +### RpcBlockHeader + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| version | [uint32](#uint32) | | | +| parents | [RpcBlockLevelParents](#protowire.RpcBlockLevelParents) | repeated | | +| hashMerkleRoot | [string](#string) | | | +| acceptedIdMerkleRoot | [string](#string) | | | +| utxoCommitment | [string](#string) | | | +| timestamp | [int64](#int64) | | | +| bits | [uint32](#uint32) | | | +| nonce | [uint64](#uint64) | | | +| daaScore | [uint64](#uint64) | | | +| blueWork | [string](#string) | | | +| pruningPoint | [string](#string) | | | +| blueScore | [uint64](#uint64) | | | + + + +### RpcBlockLevelParents + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| parentHashes | [string](#string) | repeated | | + + + +### RpcBlockVerboseData + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| hash | [string](#string) | | | +| difficulty | [double](#double) | | | +| selectedParentHash | [string](#string) | | | +| transactionIds | [string](#string) | repeated | | +| isHeaderOnly | [bool](#bool) | | | +| blueScore | [uint64](#uint64) | | | +| childrenHashes | [string](#string) | repeated | | +| mergeSetBluesHashes | [string](#string) | repeated | | +| mergeSetRedsHashes | [string](#string) | repeated | | +| isChainBlock | [bool](#bool) | | | + + + +### RpcTransaction + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| version | [uint32](#uint32) | | | +| inputs | [RpcTransactionInput](#protowire.RpcTransactionInput) | repeated | | +| outputs | [RpcTransactionOutput](#protowire.RpcTransactionOutput) | repeated | | +| lockTime | [uint64](#uint64) | | | +| subnetworkId | [string](#string) | | | +| gas | [uint64](#uint64) | | | +| payload | [string](#string) | | | +| verboseData | [RpcTransactionVerboseData](#protowire.RpcTransactionVerboseData) | | | + + + +### RpcTransactionInput + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| previousOutpoint | [RpcOutpoint](#protowire.RpcOutpoint) | | | +| signatureScript | [string](#string) | | | +| sequence | [uint64](#uint64) | | | +| sigOpCount | [uint32](#uint32) | | | +| verboseData | [RpcTransactionInputVerboseData](#protowire.RpcTransactionInputVerboseData) | | | + + + +### RpcScriptPublicKey + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| version | [uint32](#uint32) | | | +| scriptPublicKey | [string](#string) | | | + + + +### RpcTransactionOutput + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| amount | [uint64](#uint64) | | | +| scriptPublicKey | [RpcScriptPublicKey](#protowire.RpcScriptPublicKey) | | | +| verboseData | [RpcTransactionOutputVerboseData](#protowire.RpcTransactionOutputVerboseData) | | | + + + +### RpcOutpoint + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| transactionId | [string](#string) | | | +| index | [uint32](#uint32) | | | + + + +### RpcUtxoEntry + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| amount | [uint64](#uint64) | | | +| scriptPublicKey | [RpcScriptPublicKey](#protowire.RpcScriptPublicKey) | | | +| blockDaaScore | [uint64](#uint64) | | | +| isCoinbase | [bool](#bool) | | | + + + +### RpcTransactionVerboseData + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| transactionId | [string](#string) | | | +| hash | [string](#string) | | | +| mass | [uint64](#uint64) | | | +| blockHash | [string](#string) | | | +| blockTime | [uint64](#uint64) | | | + + + +### RpcTransactionInputVerboseData + + + +### RpcTransactionOutputVerboseData + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| scriptPublicKeyType | [string](#string) | | | +| scriptPublicKeyAddress | [string](#string) | | | + + + +### GetCurrentNetworkRequestMessage + +GetCurrentNetworkRequestMessage requests the network spectred is +currently running against. + +Possible networks are: Mainnet, Testnet, Simnet, Devnet + + + +### GetCurrentNetworkResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| currentNetwork | [string](#string) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### SubmitBlockRequestMessage + +SubmitBlockRequestMessage requests to submit a block into the DAG. +Blocks are generally expected to have been generated using the +getBlockTemplate call. + +See: GetBlockTemplateRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| block | [RpcBlock](#protowire.RpcBlock) | | | +| allowNonDAABlocks | [bool](#bool) | | | + + + +### SubmitBlockResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| rejectReason | [SubmitBlockResponseMessage.RejectReason](#protowire.SubmitBlockResponseMessage.RejectReason) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBlockTemplateRequestMessage + +GetBlockTemplateRequestMessage requests a current block template. +Callers are expected to solve the block template and submit it using +the submitBlock call. + +See: SubmitBlockRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| payAddress | [string](#string) | | Which spectre address should the coinbase block reward transaction pay into | +| extraData | [string](#string) | | | + + + +### GetBlockTemplateResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| block | [RpcBlock](#protowire.RpcBlock) | | | +| isSynced | [bool](#bool) | | Whether spectred thinks that it's synced. Callers are discouraged (but not forbidden) from solving blocks when spectred is not synced. That is because when spectred isn't in sync with the rest of the network there's a high chance the block will never be accepted, thus the solving effort would have been wasted. | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyBlockAddedRequestMessage + +NotifyBlockAddedRequestMessage registers this connection for +blockAdded notifications. + +See: BlockAddedNotificationMessage + + + +### NotifyBlockAddedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### BlockAddedNotificationMessage + +BlockAddedNotificationMessage is sent whenever a blocks has been +added (NOT accepted) into the DAG. + +See: NotifyBlockAddedRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| block | [RpcBlock](#protowire.RpcBlock) | | | + + + +### GetPeerAddressesRequestMessage + +GetPeerAddressesRequestMessage requests the list of known spectred +addresses in the current network. (mainnet, testnet, etc.) + + + +### GetPeerAddressesResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [GetPeerAddressesKnownAddressMessage](#protowire.GetPeerAddressesKnownAddressMessage) | repeated | | +| bannedAddresses | [GetPeerAddressesKnownAddressMessage](#protowire.GetPeerAddressesKnownAddressMessage) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetPeerAddressesKnownAddressMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| Addr | [string](#string) | | | + + + +### GetSelectedTipHashRequestMessage + +GetSelectedTipHashRequestMessage requests the hash of the current +virtual's selected parent. + + + +### GetSelectedTipHashResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| selectedTipHash | [string](#string) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetMempoolEntryRequestMessage + +GetMempoolEntryRequestMessage requests information about a specific +transaction in the mempool. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| txId | [string](#string) | | The transaction's TransactionID. | +| includeOrphanPool | [bool](#bool) | | | +| filterTransactionPool | [bool](#bool) | | | + + + +### GetMempoolEntryResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| entry | [MempoolEntry](#protowire.MempoolEntry) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetMempoolEntriesRequestMessage + +GetMempoolEntriesRequestMessage requests information about all the +transactions currently in the mempool. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| includeOrphanPool | [bool](#bool) | | | +| filterTransactionPool | [bool](#bool) | | | + + + +### GetMempoolEntriesResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| entries | [MempoolEntry](#protowire.MempoolEntry) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### MempoolEntry + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| fee | [uint64](#uint64) | | | +| transaction | [RpcTransaction](#protowire.RpcTransaction) | | | +| isOrphan | [bool](#bool) | | | + + + +### GetConnectedPeerInfoRequestMessage + +GetConnectedPeerInfoRequestMessage requests information about all the +p2p peers currently connected to this spectred. + + + +### GetConnectedPeerInfoResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| infos | [GetConnectedPeerInfoMessage](#protowire.GetConnectedPeerInfoMessage) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetConnectedPeerInfoMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| id | [string](#string) | | | +| address | [string](#string) | | | +| lastPingDuration | [int64](#int64) | | How long did the last ping/pong exchange take | +| isOutbound | [bool](#bool) | | Whether this spectred initiated the connection | +| timeOffset | [int64](#int64) | | | +| userAgent | [string](#string) | | | +| advertisedProtocolVersion | [uint32](#uint32) | | The protocol version that this peer claims to support | +| timeConnected | [int64](#int64) | | The timestamp of when this peer connected to this spectred | +| isIbdPeer | [bool](#bool) | | Whether this peer is the IBD peer (if IBD is running) | + + + +### AddPeerRequestMessage + +AddPeerRequestMessage adds a peer to spectred's outgoing +connection list. This will, in most cases, result in spectred +connecting to said peer. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| address | [string](#string) | | | +| isPermanent | [bool](#bool) | | Whether to keep attempting to connect to this peer after disconnection | + + + +### AddPeerResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### SubmitTransactionRequestMessage + +SubmitTransactionRequestMessage submits a transaction to the mempool. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| transaction | [RpcTransaction](#protowire.RpcTransaction) | | | +| allowOrphan | [bool](#bool) | | | + + + +### SubmitTransactionResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| transactionId | [string](#string) | | The transaction ID of the submitted transaction | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyVirtualSelectedParentChainChangedRequestMessage + +NotifyVirtualSelectedParentChainChangedRequestMessage registers this +connection for virtualSelectedParentChainChanged notifications. + +See: VirtualSelectedParentChainChangedNotificationMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| includeAcceptedTransactionIds | [bool](#bool) | | | + + + +### NotifyVirtualSelectedParentChainChangedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### VirtualSelectedParentChainChangedNotificationMessage + +VirtualSelectedParentChainChangedNotificationMessage is sent whenever +the DAG's selected parent chain had changed. + +See: NotifyVirtualSelectedParentChainChangedRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| removedChainBlockHashes | [string](#string) | repeated | The chain blocks that were removed, in high-to-low order | +| addedChainBlockHashes | [string](#string) | repeated | The chain blocks that were added, in low-to-high order | +| acceptedTransactionIds | [AcceptedTransactionIds](#protowire.AcceptedTransactionIds) | repeated | Will be filled only if `includeAcceptedTransactionIds = true` in the notify request. | + + + +### GetBlockRequestMessage + +GetBlockRequestMessage requests information about a specific block. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| hash | [string](#string) | | The hash of the requested block | +| includeTransactions | [bool](#bool) | | Whether to include transaction data in the response | + + + +### GetBlockResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| block | [RpcBlock](#protowire.RpcBlock) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetSubnetworkRequestMessage + +GetSubnetworkRequestMessage requests information about a specific +subnetwork. + +Currently unimplemented + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| subnetworkId | [string](#string) | | | + + + +### GetSubnetworkResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| gasLimit | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetVirtualSelectedParentChainFromBlockRequestMessage + +GetVirtualSelectedParentChainFromBlockRequestMessage requests the +virtual selected parent chain from some startHash to this +spectred's current virtual. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| startHash | [string](#string) | | | +| includeAcceptedTransactionIds | [bool](#bool) | | | + + + +### AcceptedTransactionIds + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| acceptingBlockHash | [string](#string) | | | +| acceptedTransactionIds | [string](#string) | repeated | | + + + +### GetVirtualSelectedParentChainFromBlockResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| removedChainBlockHashes | [string](#string) | repeated | The chain blocks that were removed, in high-to-low order | +| addedChainBlockHashes | [string](#string) | repeated | The chain blocks that were added, in low-to-high order | +| acceptedTransactionIds | [AcceptedTransactionIds](#protowire.AcceptedTransactionIds) | repeated | The transactions accepted by each block in addedChainBlockHashes. Will be filled only if `includeAcceptedTransactionIds = true` in the request. | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBlocksRequestMessage + +GetBlocksRequestMessage requests blocks between a certain block +lowHash up to this spectred's current virtual. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| lowHash | [string](#string) | | | +| includeBlocks | [bool](#bool) | | | +| includeTransactions | [bool](#bool) | | | + + + +### GetBlocksResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| blockHashes | [string](#string) | repeated | | +| blocks | [RpcBlock](#protowire.RpcBlock) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBlockCountRequestMessage + +GetBlockCountRequestMessage requests the current number of blocks in +this spectred. Note that this number may decrease as pruning occurs. + + + +### GetBlockCountResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| blockCount | [uint64](#uint64) | | | +| headerCount | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBlockDagInfoRequestMessage + +GetBlockDagInfoRequestMessage requests general information about the +current state of this spectred's DAG. + + + +### GetBlockDagInfoResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| networkName | [string](#string) | | | +| blockCount | [uint64](#uint64) | | | +| headerCount | [uint64](#uint64) | | | +| tipHashes | [string](#string) | repeated | | +| difficulty | [double](#double) | | | +| pastMedianTime | [int64](#int64) | | | +| virtualParentHashes | [string](#string) | repeated | | +| pruningPointHash | [string](#string) | | | +| virtualDaaScore | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### ResolveFinalityConflictRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| finalityBlockHash | [string](#string) | | | + + + +### ResolveFinalityConflictResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyFinalityConflictsRequestMessage + + + +### NotifyFinalityConflictsResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### FinalityConflictNotificationMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| violatingBlockHash | [string](#string) | | | + + + +### FinalityConflictResolvedNotificationMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| finalityBlockHash | [string](#string) | | | + + + +### ShutDownRequestMessage + +ShutDownRequestMessage shuts down this spectred. + + + +### ShutDownResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetHeadersRequestMessage + +GetHeadersRequestMessage requests headers between the given startHash +and the current virtual, up to the given limit. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| startHash | [string](#string) | | | +| limit | [uint64](#uint64) | | | +| isAscending | [bool](#bool) | | | + + + +### GetHeadersResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| headers | [string](#string) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyUtxosChangedRequestMessage + +NotifyUtxosChangedRequestMessage registers this connection for +utxoChanged notifications for the given addresses. + +This call is only available when this spectred was started with +`--utxoindex`. + +See: UtxosChangedNotificationMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [string](#string) | repeated | Leave empty to get all updates | + + + +### NotifyUtxosChangedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### UtxosChangedNotificationMessage + +UtxosChangedNotificationMessage is sent whenever the UTXO index had +been updated. + +See: NotifyUtxosChangedRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| added | [UtxosByAddressesEntry](#protowire.UtxosByAddressesEntry) | repeated | | +| removed | [UtxosByAddressesEntry](#protowire.UtxosByAddressesEntry) | repeated | | + + + +### UtxosByAddressesEntry + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| address | [string](#string) | | | +| outpoint | [RpcOutpoint](#protowire.RpcOutpoint) | | | +| utxoEntry | [RpcUtxoEntry](#protowire.RpcUtxoEntry) | | | + + + +### StopNotifyingUtxosChangedRequestMessage + +StopNotifyingUtxosChangedRequestMessage unregisters this connection +for utxoChanged notifications for the given addresses. + +This call is only available when this spectred was started with +`--utxoindex`. + +See: UtxosChangedNotificationMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [string](#string) | repeated | | + + + +### StopNotifyingUtxosChangedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetUtxosByAddressesRequestMessage + +GetUtxosByAddressesRequestMessage requests all current UTXOs for the +given spectred addresses. + +This call is only available when this spectred was started with +`--utxoindex`. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [string](#string) | repeated | | + + + +### GetUtxosByAddressesResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| entries | [UtxosByAddressesEntry](#protowire.UtxosByAddressesEntry) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBalanceByAddressRequestMessage + +GetBalanceByAddressRequest returns the total balance in unspent +transactions towards a given address. + +This call is only available when this spectred was started with +`--utxoindex`. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| address | [string](#string) | | | + + + +### GetBalanceByAddressResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| balance | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBalancesByAddressesRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [string](#string) | repeated | | + + + +### BalancesByAddressEntry + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| address | [string](#string) | | | +| balance | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetBalancesByAddressesResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| entries | [BalancesByAddressEntry](#protowire.BalancesByAddressEntry) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetVirtualSelectedParentBlueScoreRequestMessage + +GetVirtualSelectedParentBlueScoreRequestMessage requests the blue +score of the current selected parent of the virtual block. + + + +### GetVirtualSelectedParentBlueScoreResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| blueScore | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyVirtualSelectedParentBlueScoreChangedRequestMessage + +NotifyVirtualSelectedParentBlueScoreChangedRequestMessage registers +this connection for virtualSelectedParentBlueScoreChanged +notifications. + +See: VirtualSelectedParentBlueScoreChangedNotificationMessage + + + +### NotifyVirtualSelectedParentBlueScoreChangedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### VirtualSelectedParentBlueScoreChangedNotificationMessage + +VirtualSelectedParentBlueScoreChangedNotificationMessage is sent +whenever the blue score of the virtual's selected parent changes. + +See NotifyVirtualSelectedParentBlueScoreChangedRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| virtualSelectedParentBlueScore | [uint64](#uint64) | | | + + + +### NotifyVirtualDaaScoreChangedRequestMessage + +NotifyVirtualDaaScoreChangedRequestMessage registers this connection +for virtualDaaScoreChanged notifications. + +See: VirtualDaaScoreChangedNotificationMessage + + + +### NotifyVirtualDaaScoreChangedResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### VirtualDaaScoreChangedNotificationMessage + +VirtualDaaScoreChangedNotificationMessage is sent whenever the DAA +score of the virtual changes. + +See NotifyVirtualDaaScoreChangedRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| virtualDaaScore | [uint64](#uint64) | | | + + + +### NotifyPruningPointUTXOSetOverrideRequestMessage +NotifyPruningPointUTXOSetOverrideRequestMessage registers this +connection for pruning point UTXO set override notifications. + +This call is only available when this spectred was started with +`--utxoindex`. + +See: NotifyPruningPointUTXOSetOverrideResponseMessage + + + +### NotifyPruningPointUTXOSetOverrideResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### PruningPointUTXOSetOverrideNotificationMessage +PruningPointUTXOSetOverrideNotificationMessage is sent whenever the +UTXO index resets due to pruning point change via IBD. + +See NotifyPruningPointUTXOSetOverrideRequestMessage + + + +### StopNotifyingPruningPointUTXOSetOverrideRequestMessage + +StopNotifyingPruningPointUTXOSetOverrideRequestMessage unregisters +this connection for pruning point UTXO set override notifications. + +This call is only available when this spectred was started with +`--utxoindex`. + +See: PruningPointUTXOSetOverrideNotificationMessage + + + +### StopNotifyingPruningPointUTXOSetOverrideResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### BanRequestMessage + +BanRequestMessage bans the given ip. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ip | [string](#string) | | | + + + +### BanResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### UnbanRequestMessage + +UnbanRequestMessage unbans the given ip. + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| ip | [string](#string) | | | + + + +### UnbanResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetInfoRequestMessage + +GetInfoRequestMessage returns info about the node. + + + +### GetInfoResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| p2pId | [string](#string) | | | +| mempoolSize | [uint64](#uint64) | | | +| serverVersion | [string](#string) | | | +| isUtxoIndexed | [bool](#bool) | | | +| isSynced | [bool](#bool) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### EstimateNetworkHashesPerSecondRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| windowSize | [uint32](#uint32) | | | +| startHash | [string](#string) | | | + + + +### EstimateNetworkHashesPerSecondResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| networkHashesPerSecond | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NotifyNewBlockTemplateRequestMessage + +NotifyNewBlockTemplateRequestMessage registers this connection for +NewBlockTemplate notifications. + +See: NewBlockTemplateNotificationMessage + + + +### NotifyNewBlockTemplateResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| error | [RPCError](#protowire.RPCError) | | | + + + +### NewBlockTemplateNotificationMessage + +NewBlockTemplateNotificationMessage is sent whenever a new updated +block template is available for miners. + +See NotifyNewBlockTemplateRequestMessage + + + +### MempoolEntryByAddress + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| address | [string](#string) | | | +| sending | [MempoolEntry](#protowire.MempoolEntry) | repeated | | +| receiving | [MempoolEntry](#protowire.MempoolEntry) | repeated | | + + + +### GetMempoolEntriesByAddressesRequestMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| addresses | [string](#string) | repeated | | +| includeOrphanPool | [bool](#bool) | | | +| filterTransactionPool | [bool](#bool) | | | + + + +### GetMempoolEntriesByAddressesResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| entries | [MempoolEntryByAddress](#protowire.MempoolEntryByAddress) | repeated | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### GetCoinSupplyRequestMessage + + + +### GetCoinSupplyResponseMessage + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| maxSompi | [uint64](#uint64) | | note: this is a hard coded maxSupply, actual maxSupply is expected to deviate by upto -5%, but cannot be measured exactly. | +| circulatingSompi | [uint64](#uint64) | | | +| error | [RPCError](#protowire.RPCError) | | | + + + +### SubmitBlockResponseMessage.RejectReason + +| Name | Number | Description | +| ---- | ------ | ----------- | +| NONE | 0 | | +| BLOCK_INVALID | 1 | | +| IS_IN_IBD | 2 | | + +## Scalar Value Types + +| .proto Type | Notes | C++ | Java | Python | Go | C# | PHP | Ruby | +| ----------- | ----- | --- | ---- | ------ | -- | -- | --- | ---- | +| double | | double | double | float | float64 | double | float | Float | +| float | | float | float | float | float32 | float | float | Float | +| int32 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint32 instead. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| int64 | Uses variable-length encoding. Inefficient for encoding negative numbers – if your field is likely to have negative values, use sint64 instead. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| uint32 | Uses variable-length encoding. | uint32 | int | int/long | uint32 | uint | integer | Bignum or Fixnum (as required) | +| uint64 | Uses variable-length encoding. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum or Fixnum (as required) | +| sint32 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int32s. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sint64 | Uses variable-length encoding. Signed int value. These more efficiently encode negative numbers than regular int64s. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| fixed32 | Always four bytes. More efficient than uint32 if values are often greater than 2^28. | uint32 | int | int | uint32 | uint | integer | Bignum or Fixnum (as required) | +| fixed64 | Always eight bytes. More efficient than uint64 if values are often greater than 2^56. | uint64 | long | int/long | uint64 | ulong | integer/string | Bignum | +| sfixed32 | Always four bytes. | int32 | int | int | int32 | int | integer | Bignum or Fixnum (as required) | +| sfixed64 | Always eight bytes. | int64 | long | int/long | int64 | long | integer/string | Bignum | +| bool | | bool | boolean | boolean | bool | bool | boolean | TrueClass/FalseClass | +| string | A string must always contain UTF-8 encoded or 7-bit ASCII text. | string | String | str/unicode | string | string | string | String (UTF-8) | +| bytes | May contain any arbitrary sequence of bytes. | string | ByteString | str | []byte | ByteString | string | String (ASCII-8BIT) | diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.pb.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.pb.go new file mode 100644 index 0000000..46af409 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.pb.go @@ -0,0 +1,8455 @@ +// RPC-related types. Request messages, response messages, and dependant types. +// +// Clients are expected to build RequestMessages and wrap them in SpectredMessage. (see messages.proto) +// +// Having received a RequestMessage, (wrapped in a SpectredMessage) the RPC server will respond with a +// ResponseMessage (likewise wrapped in a SpectredMessage) respective to the original RequestMessage. +// +// **IMPORTANT:** This API is a work in progress and is subject to break between versions. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.32.0 +// protoc v4.25.3 +// source: rpc.proto + +package protowire + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SubmitBlockResponseMessage_RejectReason int32 + +const ( + SubmitBlockResponseMessage_NONE SubmitBlockResponseMessage_RejectReason = 0 + SubmitBlockResponseMessage_BLOCK_INVALID SubmitBlockResponseMessage_RejectReason = 1 + SubmitBlockResponseMessage_IS_IN_IBD SubmitBlockResponseMessage_RejectReason = 2 +) + +// Enum value maps for SubmitBlockResponseMessage_RejectReason. +var ( + SubmitBlockResponseMessage_RejectReason_name = map[int32]string{ + 0: "NONE", + 1: "BLOCK_INVALID", + 2: "IS_IN_IBD", + } + SubmitBlockResponseMessage_RejectReason_value = map[string]int32{ + "NONE": 0, + "BLOCK_INVALID": 1, + "IS_IN_IBD": 2, + } +) + +func (x SubmitBlockResponseMessage_RejectReason) Enum() *SubmitBlockResponseMessage_RejectReason { + p := new(SubmitBlockResponseMessage_RejectReason) + *p = x + return p +} + +func (x SubmitBlockResponseMessage_RejectReason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubmitBlockResponseMessage_RejectReason) Descriptor() protoreflect.EnumDescriptor { + return file_rpc_proto_enumTypes[0].Descriptor() +} + +func (SubmitBlockResponseMessage_RejectReason) Type() protoreflect.EnumType { + return &file_rpc_proto_enumTypes[0] +} + +func (x SubmitBlockResponseMessage_RejectReason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubmitBlockResponseMessage_RejectReason.Descriptor instead. +func (SubmitBlockResponseMessage_RejectReason) EnumDescriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{17, 0} +} + +// RPCError represents a generic non-internal error. +// +// Receivers of any ResponseMessage are expected to check whether its error field is not null. +type RPCError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *RPCError) Reset() { + *x = RPCError{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RPCError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RPCError) ProtoMessage() {} + +func (x *RPCError) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RPCError.ProtoReflect.Descriptor instead. +func (*RPCError) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{0} +} + +func (x *RPCError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type RpcBlock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *RpcBlockHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Transactions []*RpcTransaction `protobuf:"bytes,2,rep,name=transactions,proto3" json:"transactions,omitempty"` + VerboseData *RpcBlockVerboseData `protobuf:"bytes,3,opt,name=verboseData,proto3" json:"verboseData,omitempty"` +} + +func (x *RpcBlock) Reset() { + *x = RpcBlock{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcBlock) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcBlock) ProtoMessage() {} + +func (x *RpcBlock) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcBlock.ProtoReflect.Descriptor instead. +func (*RpcBlock) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{1} +} + +func (x *RpcBlock) GetHeader() *RpcBlockHeader { + if x != nil { + return x.Header + } + return nil +} + +func (x *RpcBlock) GetTransactions() []*RpcTransaction { + if x != nil { + return x.Transactions + } + return nil +} + +func (x *RpcBlock) GetVerboseData() *RpcBlockVerboseData { + if x != nil { + return x.VerboseData + } + return nil +} + +type RpcBlockHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Parents []*RpcBlockLevelParents `protobuf:"bytes,12,rep,name=parents,proto3" json:"parents,omitempty"` + HashMerkleRoot string `protobuf:"bytes,3,opt,name=hashMerkleRoot,proto3" json:"hashMerkleRoot,omitempty"` + AcceptedIdMerkleRoot string `protobuf:"bytes,4,opt,name=acceptedIdMerkleRoot,proto3" json:"acceptedIdMerkleRoot,omitempty"` + UtxoCommitment string `protobuf:"bytes,5,opt,name=utxoCommitment,proto3" json:"utxoCommitment,omitempty"` + Timestamp int64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Bits uint32 `protobuf:"varint,7,opt,name=bits,proto3" json:"bits,omitempty"` + Nonce uint64 `protobuf:"varint,8,opt,name=nonce,proto3" json:"nonce,omitempty"` + DaaScore uint64 `protobuf:"varint,9,opt,name=daaScore,proto3" json:"daaScore,omitempty"` + BlueWork string `protobuf:"bytes,10,opt,name=blueWork,proto3" json:"blueWork,omitempty"` + PruningPoint string `protobuf:"bytes,14,opt,name=pruningPoint,proto3" json:"pruningPoint,omitempty"` + BlueScore uint64 `protobuf:"varint,13,opt,name=blueScore,proto3" json:"blueScore,omitempty"` +} + +func (x *RpcBlockHeader) Reset() { + *x = RpcBlockHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcBlockHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcBlockHeader) ProtoMessage() {} + +func (x *RpcBlockHeader) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcBlockHeader.ProtoReflect.Descriptor instead. +func (*RpcBlockHeader) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{2} +} + +func (x *RpcBlockHeader) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *RpcBlockHeader) GetParents() []*RpcBlockLevelParents { + if x != nil { + return x.Parents + } + return nil +} + +func (x *RpcBlockHeader) GetHashMerkleRoot() string { + if x != nil { + return x.HashMerkleRoot + } + return "" +} + +func (x *RpcBlockHeader) GetAcceptedIdMerkleRoot() string { + if x != nil { + return x.AcceptedIdMerkleRoot + } + return "" +} + +func (x *RpcBlockHeader) GetUtxoCommitment() string { + if x != nil { + return x.UtxoCommitment + } + return "" +} + +func (x *RpcBlockHeader) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *RpcBlockHeader) GetBits() uint32 { + if x != nil { + return x.Bits + } + return 0 +} + +func (x *RpcBlockHeader) GetNonce() uint64 { + if x != nil { + return x.Nonce + } + return 0 +} + +func (x *RpcBlockHeader) GetDaaScore() uint64 { + if x != nil { + return x.DaaScore + } + return 0 +} + +func (x *RpcBlockHeader) GetBlueWork() string { + if x != nil { + return x.BlueWork + } + return "" +} + +func (x *RpcBlockHeader) GetPruningPoint() string { + if x != nil { + return x.PruningPoint + } + return "" +} + +func (x *RpcBlockHeader) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +type RpcBlockLevelParents struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ParentHashes []string `protobuf:"bytes,1,rep,name=parentHashes,proto3" json:"parentHashes,omitempty"` +} + +func (x *RpcBlockLevelParents) Reset() { + *x = RpcBlockLevelParents{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcBlockLevelParents) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcBlockLevelParents) ProtoMessage() {} + +func (x *RpcBlockLevelParents) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcBlockLevelParents.ProtoReflect.Descriptor instead. +func (*RpcBlockLevelParents) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{3} +} + +func (x *RpcBlockLevelParents) GetParentHashes() []string { + if x != nil { + return x.ParentHashes + } + return nil +} + +type RpcBlockVerboseData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Difficulty float64 `protobuf:"fixed64,11,opt,name=difficulty,proto3" json:"difficulty,omitempty"` + SelectedParentHash string `protobuf:"bytes,13,opt,name=selectedParentHash,proto3" json:"selectedParentHash,omitempty"` + TransactionIds []string `protobuf:"bytes,14,rep,name=transactionIds,proto3" json:"transactionIds,omitempty"` + IsHeaderOnly bool `protobuf:"varint,15,opt,name=isHeaderOnly,proto3" json:"isHeaderOnly,omitempty"` + BlueScore uint64 `protobuf:"varint,16,opt,name=blueScore,proto3" json:"blueScore,omitempty"` + ChildrenHashes []string `protobuf:"bytes,17,rep,name=childrenHashes,proto3" json:"childrenHashes,omitempty"` + MergeSetBluesHashes []string `protobuf:"bytes,18,rep,name=mergeSetBluesHashes,proto3" json:"mergeSetBluesHashes,omitempty"` + MergeSetRedsHashes []string `protobuf:"bytes,19,rep,name=mergeSetRedsHashes,proto3" json:"mergeSetRedsHashes,omitempty"` + IsChainBlock bool `protobuf:"varint,20,opt,name=isChainBlock,proto3" json:"isChainBlock,omitempty"` +} + +func (x *RpcBlockVerboseData) Reset() { + *x = RpcBlockVerboseData{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcBlockVerboseData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcBlockVerboseData) ProtoMessage() {} + +func (x *RpcBlockVerboseData) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcBlockVerboseData.ProtoReflect.Descriptor instead. +func (*RpcBlockVerboseData) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{4} +} + +func (x *RpcBlockVerboseData) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +func (x *RpcBlockVerboseData) GetDifficulty() float64 { + if x != nil { + return x.Difficulty + } + return 0 +} + +func (x *RpcBlockVerboseData) GetSelectedParentHash() string { + if x != nil { + return x.SelectedParentHash + } + return "" +} + +func (x *RpcBlockVerboseData) GetTransactionIds() []string { + if x != nil { + return x.TransactionIds + } + return nil +} + +func (x *RpcBlockVerboseData) GetIsHeaderOnly() bool { + if x != nil { + return x.IsHeaderOnly + } + return false +} + +func (x *RpcBlockVerboseData) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +func (x *RpcBlockVerboseData) GetChildrenHashes() []string { + if x != nil { + return x.ChildrenHashes + } + return nil +} + +func (x *RpcBlockVerboseData) GetMergeSetBluesHashes() []string { + if x != nil { + return x.MergeSetBluesHashes + } + return nil +} + +func (x *RpcBlockVerboseData) GetMergeSetRedsHashes() []string { + if x != nil { + return x.MergeSetRedsHashes + } + return nil +} + +func (x *RpcBlockVerboseData) GetIsChainBlock() bool { + if x != nil { + return x.IsChainBlock + } + return false +} + +type RpcTransaction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Inputs []*RpcTransactionInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*RpcTransactionOutput `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + LockTime uint64 `protobuf:"varint,4,opt,name=lockTime,proto3" json:"lockTime,omitempty"` + SubnetworkId string `protobuf:"bytes,5,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` + Gas uint64 `protobuf:"varint,6,opt,name=gas,proto3" json:"gas,omitempty"` + Payload string `protobuf:"bytes,8,opt,name=payload,proto3" json:"payload,omitempty"` + VerboseData *RpcTransactionVerboseData `protobuf:"bytes,9,opt,name=verboseData,proto3" json:"verboseData,omitempty"` +} + +func (x *RpcTransaction) Reset() { + *x = RpcTransaction{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransaction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransaction) ProtoMessage() {} + +func (x *RpcTransaction) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransaction.ProtoReflect.Descriptor instead. +func (*RpcTransaction) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{5} +} + +func (x *RpcTransaction) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *RpcTransaction) GetInputs() []*RpcTransactionInput { + if x != nil { + return x.Inputs + } + return nil +} + +func (x *RpcTransaction) GetOutputs() []*RpcTransactionOutput { + if x != nil { + return x.Outputs + } + return nil +} + +func (x *RpcTransaction) GetLockTime() uint64 { + if x != nil { + return x.LockTime + } + return 0 +} + +func (x *RpcTransaction) GetSubnetworkId() string { + if x != nil { + return x.SubnetworkId + } + return "" +} + +func (x *RpcTransaction) GetGas() uint64 { + if x != nil { + return x.Gas + } + return 0 +} + +func (x *RpcTransaction) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +func (x *RpcTransaction) GetVerboseData() *RpcTransactionVerboseData { + if x != nil { + return x.VerboseData + } + return nil +} + +type RpcTransactionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousOutpoint *RpcOutpoint `protobuf:"bytes,1,opt,name=previousOutpoint,proto3" json:"previousOutpoint,omitempty"` + SignatureScript string `protobuf:"bytes,2,opt,name=signatureScript,proto3" json:"signatureScript,omitempty"` + Sequence uint64 `protobuf:"varint,3,opt,name=sequence,proto3" json:"sequence,omitempty"` + SigOpCount uint32 `protobuf:"varint,5,opt,name=sigOpCount,proto3" json:"sigOpCount,omitempty"` + VerboseData *RpcTransactionInputVerboseData `protobuf:"bytes,4,opt,name=verboseData,proto3" json:"verboseData,omitempty"` +} + +func (x *RpcTransactionInput) Reset() { + *x = RpcTransactionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransactionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransactionInput) ProtoMessage() {} + +func (x *RpcTransactionInput) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransactionInput.ProtoReflect.Descriptor instead. +func (*RpcTransactionInput) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{6} +} + +func (x *RpcTransactionInput) GetPreviousOutpoint() *RpcOutpoint { + if x != nil { + return x.PreviousOutpoint + } + return nil +} + +func (x *RpcTransactionInput) GetSignatureScript() string { + if x != nil { + return x.SignatureScript + } + return "" +} + +func (x *RpcTransactionInput) GetSequence() uint64 { + if x != nil { + return x.Sequence + } + return 0 +} + +func (x *RpcTransactionInput) GetSigOpCount() uint32 { + if x != nil { + return x.SigOpCount + } + return 0 +} + +func (x *RpcTransactionInput) GetVerboseData() *RpcTransactionInputVerboseData { + if x != nil { + return x.VerboseData + } + return nil +} + +type RpcScriptPublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + ScriptPublicKey string `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` +} + +func (x *RpcScriptPublicKey) Reset() { + *x = RpcScriptPublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcScriptPublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcScriptPublicKey) ProtoMessage() {} + +func (x *RpcScriptPublicKey) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcScriptPublicKey.ProtoReflect.Descriptor instead. +func (*RpcScriptPublicKey) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{7} +} + +func (x *RpcScriptPublicKey) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *RpcScriptPublicKey) GetScriptPublicKey() string { + if x != nil { + return x.ScriptPublicKey + } + return "" +} + +type RpcTransactionOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + ScriptPublicKey *RpcScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` + VerboseData *RpcTransactionOutputVerboseData `protobuf:"bytes,3,opt,name=verboseData,proto3" json:"verboseData,omitempty"` +} + +func (x *RpcTransactionOutput) Reset() { + *x = RpcTransactionOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransactionOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransactionOutput) ProtoMessage() {} + +func (x *RpcTransactionOutput) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransactionOutput.ProtoReflect.Descriptor instead. +func (*RpcTransactionOutput) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{8} +} + +func (x *RpcTransactionOutput) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *RpcTransactionOutput) GetScriptPublicKey() *RpcScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +func (x *RpcTransactionOutput) GetVerboseData() *RpcTransactionOutputVerboseData { + if x != nil { + return x.VerboseData + } + return nil +} + +type RpcOutpoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId string `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *RpcOutpoint) Reset() { + *x = RpcOutpoint{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcOutpoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcOutpoint) ProtoMessage() {} + +func (x *RpcOutpoint) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcOutpoint.ProtoReflect.Descriptor instead. +func (*RpcOutpoint) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{9} +} + +func (x *RpcOutpoint) GetTransactionId() string { + if x != nil { + return x.TransactionId + } + return "" +} + +func (x *RpcOutpoint) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +type RpcUtxoEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + ScriptPublicKey *RpcScriptPublicKey `protobuf:"bytes,2,opt,name=scriptPublicKey,proto3" json:"scriptPublicKey,omitempty"` + BlockDaaScore uint64 `protobuf:"varint,3,opt,name=blockDaaScore,proto3" json:"blockDaaScore,omitempty"` + IsCoinbase bool `protobuf:"varint,4,opt,name=isCoinbase,proto3" json:"isCoinbase,omitempty"` +} + +func (x *RpcUtxoEntry) Reset() { + *x = RpcUtxoEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcUtxoEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcUtxoEntry) ProtoMessage() {} + +func (x *RpcUtxoEntry) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcUtxoEntry.ProtoReflect.Descriptor instead. +func (*RpcUtxoEntry) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{10} +} + +func (x *RpcUtxoEntry) GetAmount() uint64 { + if x != nil { + return x.Amount + } + return 0 +} + +func (x *RpcUtxoEntry) GetScriptPublicKey() *RpcScriptPublicKey { + if x != nil { + return x.ScriptPublicKey + } + return nil +} + +func (x *RpcUtxoEntry) GetBlockDaaScore() uint64 { + if x != nil { + return x.BlockDaaScore + } + return 0 +} + +func (x *RpcUtxoEntry) GetIsCoinbase() bool { + if x != nil { + return x.IsCoinbase + } + return false +} + +type RpcTransactionVerboseData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TransactionId string `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + Mass uint64 `protobuf:"varint,4,opt,name=mass,proto3" json:"mass,omitempty"` + BlockHash string `protobuf:"bytes,12,opt,name=blockHash,proto3" json:"blockHash,omitempty"` + BlockTime uint64 `protobuf:"varint,14,opt,name=blockTime,proto3" json:"blockTime,omitempty"` +} + +func (x *RpcTransactionVerboseData) Reset() { + *x = RpcTransactionVerboseData{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransactionVerboseData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransactionVerboseData) ProtoMessage() {} + +func (x *RpcTransactionVerboseData) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransactionVerboseData.ProtoReflect.Descriptor instead. +func (*RpcTransactionVerboseData) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{11} +} + +func (x *RpcTransactionVerboseData) GetTransactionId() string { + if x != nil { + return x.TransactionId + } + return "" +} + +func (x *RpcTransactionVerboseData) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +func (x *RpcTransactionVerboseData) GetMass() uint64 { + if x != nil { + return x.Mass + } + return 0 +} + +func (x *RpcTransactionVerboseData) GetBlockHash() string { + if x != nil { + return x.BlockHash + } + return "" +} + +func (x *RpcTransactionVerboseData) GetBlockTime() uint64 { + if x != nil { + return x.BlockTime + } + return 0 +} + +type RpcTransactionInputVerboseData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RpcTransactionInputVerboseData) Reset() { + *x = RpcTransactionInputVerboseData{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransactionInputVerboseData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransactionInputVerboseData) ProtoMessage() {} + +func (x *RpcTransactionInputVerboseData) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransactionInputVerboseData.ProtoReflect.Descriptor instead. +func (*RpcTransactionInputVerboseData) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{12} +} + +type RpcTransactionOutputVerboseData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScriptPublicKeyType string `protobuf:"bytes,5,opt,name=scriptPublicKeyType,proto3" json:"scriptPublicKeyType,omitempty"` + ScriptPublicKeyAddress string `protobuf:"bytes,6,opt,name=scriptPublicKeyAddress,proto3" json:"scriptPublicKeyAddress,omitempty"` +} + +func (x *RpcTransactionOutputVerboseData) Reset() { + *x = RpcTransactionOutputVerboseData{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RpcTransactionOutputVerboseData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RpcTransactionOutputVerboseData) ProtoMessage() {} + +func (x *RpcTransactionOutputVerboseData) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RpcTransactionOutputVerboseData.ProtoReflect.Descriptor instead. +func (*RpcTransactionOutputVerboseData) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{13} +} + +func (x *RpcTransactionOutputVerboseData) GetScriptPublicKeyType() string { + if x != nil { + return x.ScriptPublicKeyType + } + return "" +} + +func (x *RpcTransactionOutputVerboseData) GetScriptPublicKeyAddress() string { + if x != nil { + return x.ScriptPublicKeyAddress + } + return "" +} + +// GetCurrentNetworkRequestMessage requests the network spectred is currently running against. +// +// Possible networks are: Mainnet, Testnet, Simnet, Devnet +type GetCurrentNetworkRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetCurrentNetworkRequestMessage) Reset() { + *x = GetCurrentNetworkRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCurrentNetworkRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCurrentNetworkRequestMessage) ProtoMessage() {} + +func (x *GetCurrentNetworkRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCurrentNetworkRequestMessage.ProtoReflect.Descriptor instead. +func (*GetCurrentNetworkRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{14} +} + +type GetCurrentNetworkResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CurrentNetwork string `protobuf:"bytes,1,opt,name=currentNetwork,proto3" json:"currentNetwork,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetCurrentNetworkResponseMessage) Reset() { + *x = GetCurrentNetworkResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCurrentNetworkResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCurrentNetworkResponseMessage) ProtoMessage() {} + +func (x *GetCurrentNetworkResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCurrentNetworkResponseMessage.ProtoReflect.Descriptor instead. +func (*GetCurrentNetworkResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetCurrentNetworkResponseMessage) GetCurrentNetwork() string { + if x != nil { + return x.CurrentNetwork + } + return "" +} + +func (x *GetCurrentNetworkResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// SubmitBlockRequestMessage requests to submit a block into the DAG. +// Blocks are generally expected to have been generated using the getBlockTemplate call. +// +// See: GetBlockTemplateRequestMessage +type SubmitBlockRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *RpcBlock `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + AllowNonDAABlocks bool `protobuf:"varint,3,opt,name=allowNonDAABlocks,proto3" json:"allowNonDAABlocks,omitempty"` +} + +func (x *SubmitBlockRequestMessage) Reset() { + *x = SubmitBlockRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubmitBlockRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubmitBlockRequestMessage) ProtoMessage() {} + +func (x *SubmitBlockRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubmitBlockRequestMessage.ProtoReflect.Descriptor instead. +func (*SubmitBlockRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{16} +} + +func (x *SubmitBlockRequestMessage) GetBlock() *RpcBlock { + if x != nil { + return x.Block + } + return nil +} + +func (x *SubmitBlockRequestMessage) GetAllowNonDAABlocks() bool { + if x != nil { + return x.AllowNonDAABlocks + } + return false +} + +type SubmitBlockResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RejectReason SubmitBlockResponseMessage_RejectReason `protobuf:"varint,1,opt,name=rejectReason,proto3,enum=protowire.SubmitBlockResponseMessage_RejectReason" json:"rejectReason,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *SubmitBlockResponseMessage) Reset() { + *x = SubmitBlockResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubmitBlockResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubmitBlockResponseMessage) ProtoMessage() {} + +func (x *SubmitBlockResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubmitBlockResponseMessage.ProtoReflect.Descriptor instead. +func (*SubmitBlockResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{17} +} + +func (x *SubmitBlockResponseMessage) GetRejectReason() SubmitBlockResponseMessage_RejectReason { + if x != nil { + return x.RejectReason + } + return SubmitBlockResponseMessage_NONE +} + +func (x *SubmitBlockResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetBlockTemplateRequestMessage requests a current block template. +// Callers are expected to solve the block template and submit it using the submitBlock call +// +// See: SubmitBlockRequestMessage +type GetBlockTemplateRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Which spectre address should the coinbase block reward transaction pay into + PayAddress string `protobuf:"bytes,1,opt,name=payAddress,proto3" json:"payAddress,omitempty"` + ExtraData string `protobuf:"bytes,2,opt,name=extraData,proto3" json:"extraData,omitempty"` +} + +func (x *GetBlockTemplateRequestMessage) Reset() { + *x = GetBlockTemplateRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockTemplateRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockTemplateRequestMessage) ProtoMessage() {} + +func (x *GetBlockTemplateRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockTemplateRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBlockTemplateRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{18} +} + +func (x *GetBlockTemplateRequestMessage) GetPayAddress() string { + if x != nil { + return x.PayAddress + } + return "" +} + +func (x *GetBlockTemplateRequestMessage) GetExtraData() string { + if x != nil { + return x.ExtraData + } + return "" +} + +type GetBlockTemplateResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *RpcBlock `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` + // Whether spectred thinks that it's synced. + // Callers are discouraged (but not forbidden) from solving blocks when spectred is not synced. + // That is because when spectred isn't in sync with the rest of the network there's a high + // chance the block will never be accepted, thus the solving effort would have been wasted. + IsSynced bool `protobuf:"varint,2,opt,name=isSynced,proto3" json:"isSynced,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBlockTemplateResponseMessage) Reset() { + *x = GetBlockTemplateResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockTemplateResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockTemplateResponseMessage) ProtoMessage() {} + +func (x *GetBlockTemplateResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockTemplateResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBlockTemplateResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{19} +} + +func (x *GetBlockTemplateResponseMessage) GetBlock() *RpcBlock { + if x != nil { + return x.Block + } + return nil +} + +func (x *GetBlockTemplateResponseMessage) GetIsSynced() bool { + if x != nil { + return x.IsSynced + } + return false +} + +func (x *GetBlockTemplateResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NotifyBlockAddedRequestMessage registers this connection for blockAdded notifications. +// +// See: BlockAddedNotificationMessage +type NotifyBlockAddedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyBlockAddedRequestMessage) Reset() { + *x = NotifyBlockAddedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyBlockAddedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyBlockAddedRequestMessage) ProtoMessage() {} + +func (x *NotifyBlockAddedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyBlockAddedRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyBlockAddedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{20} +} + +type NotifyBlockAddedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyBlockAddedResponseMessage) Reset() { + *x = NotifyBlockAddedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyBlockAddedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyBlockAddedResponseMessage) ProtoMessage() {} + +func (x *NotifyBlockAddedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyBlockAddedResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyBlockAddedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{21} +} + +func (x *NotifyBlockAddedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// BlockAddedNotificationMessage is sent whenever a blocks has been added (NOT accepted) +// into the DAG. +// +// See: NotifyBlockAddedRequestMessage +type BlockAddedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *RpcBlock `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` +} + +func (x *BlockAddedNotificationMessage) Reset() { + *x = BlockAddedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockAddedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockAddedNotificationMessage) ProtoMessage() {} + +func (x *BlockAddedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockAddedNotificationMessage.ProtoReflect.Descriptor instead. +func (*BlockAddedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{22} +} + +func (x *BlockAddedNotificationMessage) GetBlock() *RpcBlock { + if x != nil { + return x.Block + } + return nil +} + +// GetPeerAddressesRequestMessage requests the list of known spectred addresses in the +// current network. (mainnet, testnet, etc.) +type GetPeerAddressesRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetPeerAddressesRequestMessage) Reset() { + *x = GetPeerAddressesRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPeerAddressesRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPeerAddressesRequestMessage) ProtoMessage() {} + +func (x *GetPeerAddressesRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPeerAddressesRequestMessage.ProtoReflect.Descriptor instead. +func (*GetPeerAddressesRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{23} +} + +type GetPeerAddressesResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []*GetPeerAddressesKnownAddressMessage `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` + BannedAddresses []*GetPeerAddressesKnownAddressMessage `protobuf:"bytes,2,rep,name=bannedAddresses,proto3" json:"bannedAddresses,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetPeerAddressesResponseMessage) Reset() { + *x = GetPeerAddressesResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPeerAddressesResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPeerAddressesResponseMessage) ProtoMessage() {} + +func (x *GetPeerAddressesResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPeerAddressesResponseMessage.ProtoReflect.Descriptor instead. +func (*GetPeerAddressesResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{24} +} + +func (x *GetPeerAddressesResponseMessage) GetAddresses() []*GetPeerAddressesKnownAddressMessage { + if x != nil { + return x.Addresses + } + return nil +} + +func (x *GetPeerAddressesResponseMessage) GetBannedAddresses() []*GetPeerAddressesKnownAddressMessage { + if x != nil { + return x.BannedAddresses + } + return nil +} + +func (x *GetPeerAddressesResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type GetPeerAddressesKnownAddressMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addr string `protobuf:"bytes,1,opt,name=Addr,proto3" json:"Addr,omitempty"` +} + +func (x *GetPeerAddressesKnownAddressMessage) Reset() { + *x = GetPeerAddressesKnownAddressMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetPeerAddressesKnownAddressMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPeerAddressesKnownAddressMessage) ProtoMessage() {} + +func (x *GetPeerAddressesKnownAddressMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPeerAddressesKnownAddressMessage.ProtoReflect.Descriptor instead. +func (*GetPeerAddressesKnownAddressMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{25} +} + +func (x *GetPeerAddressesKnownAddressMessage) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +// GetSelectedTipHashRequestMessage requests the hash of the current virtual's +// selected parent. +type GetSelectedTipHashRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetSelectedTipHashRequestMessage) Reset() { + *x = GetSelectedTipHashRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSelectedTipHashRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSelectedTipHashRequestMessage) ProtoMessage() {} + +func (x *GetSelectedTipHashRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSelectedTipHashRequestMessage.ProtoReflect.Descriptor instead. +func (*GetSelectedTipHashRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{26} +} + +type GetSelectedTipHashResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SelectedTipHash string `protobuf:"bytes,1,opt,name=selectedTipHash,proto3" json:"selectedTipHash,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetSelectedTipHashResponseMessage) Reset() { + *x = GetSelectedTipHashResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSelectedTipHashResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSelectedTipHashResponseMessage) ProtoMessage() {} + +func (x *GetSelectedTipHashResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSelectedTipHashResponseMessage.ProtoReflect.Descriptor instead. +func (*GetSelectedTipHashResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{27} +} + +func (x *GetSelectedTipHashResponseMessage) GetSelectedTipHash() string { + if x != nil { + return x.SelectedTipHash + } + return "" +} + +func (x *GetSelectedTipHashResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetMempoolEntryRequestMessage requests information about a specific transaction +// in the mempool. +type GetMempoolEntryRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The transaction's TransactionID. + TxId string `protobuf:"bytes,1,opt,name=txId,proto3" json:"txId,omitempty"` + IncludeOrphanPool bool `protobuf:"varint,2,opt,name=includeOrphanPool,proto3" json:"includeOrphanPool,omitempty"` + FilterTransactionPool bool `protobuf:"varint,3,opt,name=filterTransactionPool,proto3" json:"filterTransactionPool,omitempty"` +} + +func (x *GetMempoolEntryRequestMessage) Reset() { + *x = GetMempoolEntryRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntryRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntryRequestMessage) ProtoMessage() {} + +func (x *GetMempoolEntryRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntryRequestMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntryRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{28} +} + +func (x *GetMempoolEntryRequestMessage) GetTxId() string { + if x != nil { + return x.TxId + } + return "" +} + +func (x *GetMempoolEntryRequestMessage) GetIncludeOrphanPool() bool { + if x != nil { + return x.IncludeOrphanPool + } + return false +} + +func (x *GetMempoolEntryRequestMessage) GetFilterTransactionPool() bool { + if x != nil { + return x.FilterTransactionPool + } + return false +} + +type GetMempoolEntryResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *MempoolEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetMempoolEntryResponseMessage) Reset() { + *x = GetMempoolEntryResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntryResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntryResponseMessage) ProtoMessage() {} + +func (x *GetMempoolEntryResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntryResponseMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntryResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{29} +} + +func (x *GetMempoolEntryResponseMessage) GetEntry() *MempoolEntry { + if x != nil { + return x.Entry + } + return nil +} + +func (x *GetMempoolEntryResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetMempoolEntriesRequestMessage requests information about all the transactions +// currently in the mempool. +type GetMempoolEntriesRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeOrphanPool bool `protobuf:"varint,1,opt,name=includeOrphanPool,proto3" json:"includeOrphanPool,omitempty"` + FilterTransactionPool bool `protobuf:"varint,2,opt,name=filterTransactionPool,proto3" json:"filterTransactionPool,omitempty"` +} + +func (x *GetMempoolEntriesRequestMessage) Reset() { + *x = GetMempoolEntriesRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntriesRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntriesRequestMessage) ProtoMessage() {} + +func (x *GetMempoolEntriesRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntriesRequestMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntriesRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{30} +} + +func (x *GetMempoolEntriesRequestMessage) GetIncludeOrphanPool() bool { + if x != nil { + return x.IncludeOrphanPool + } + return false +} + +func (x *GetMempoolEntriesRequestMessage) GetFilterTransactionPool() bool { + if x != nil { + return x.FilterTransactionPool + } + return false +} + +type GetMempoolEntriesResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MempoolEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetMempoolEntriesResponseMessage) Reset() { + *x = GetMempoolEntriesResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntriesResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntriesResponseMessage) ProtoMessage() {} + +func (x *GetMempoolEntriesResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntriesResponseMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntriesResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{31} +} + +func (x *GetMempoolEntriesResponseMessage) GetEntries() []*MempoolEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *GetMempoolEntriesResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type MempoolEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fee uint64 `protobuf:"varint,1,opt,name=fee,proto3" json:"fee,omitempty"` + Transaction *RpcTransaction `protobuf:"bytes,3,opt,name=transaction,proto3" json:"transaction,omitempty"` + IsOrphan bool `protobuf:"varint,4,opt,name=isOrphan,proto3" json:"isOrphan,omitempty"` +} + +func (x *MempoolEntry) Reset() { + *x = MempoolEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MempoolEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MempoolEntry) ProtoMessage() {} + +func (x *MempoolEntry) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MempoolEntry.ProtoReflect.Descriptor instead. +func (*MempoolEntry) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{32} +} + +func (x *MempoolEntry) GetFee() uint64 { + if x != nil { + return x.Fee + } + return 0 +} + +func (x *MempoolEntry) GetTransaction() *RpcTransaction { + if x != nil { + return x.Transaction + } + return nil +} + +func (x *MempoolEntry) GetIsOrphan() bool { + if x != nil { + return x.IsOrphan + } + return false +} + +// GetConnectedPeerInfoRequestMessage requests information about all the p2p peers +// currently connected to this spectred. +type GetConnectedPeerInfoRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetConnectedPeerInfoRequestMessage) Reset() { + *x = GetConnectedPeerInfoRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetConnectedPeerInfoRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectedPeerInfoRequestMessage) ProtoMessage() {} + +func (x *GetConnectedPeerInfoRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConnectedPeerInfoRequestMessage.ProtoReflect.Descriptor instead. +func (*GetConnectedPeerInfoRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{33} +} + +type GetConnectedPeerInfoResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Infos []*GetConnectedPeerInfoMessage `protobuf:"bytes,1,rep,name=infos,proto3" json:"infos,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetConnectedPeerInfoResponseMessage) Reset() { + *x = GetConnectedPeerInfoResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetConnectedPeerInfoResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectedPeerInfoResponseMessage) ProtoMessage() {} + +func (x *GetConnectedPeerInfoResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConnectedPeerInfoResponseMessage.ProtoReflect.Descriptor instead. +func (*GetConnectedPeerInfoResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{34} +} + +func (x *GetConnectedPeerInfoResponseMessage) GetInfos() []*GetConnectedPeerInfoMessage { + if x != nil { + return x.Infos + } + return nil +} + +func (x *GetConnectedPeerInfoResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type GetConnectedPeerInfoMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // How long did the last ping/pong exchange take + LastPingDuration int64 `protobuf:"varint,3,opt,name=lastPingDuration,proto3" json:"lastPingDuration,omitempty"` + // Whether this spectred initiated the connection + IsOutbound bool `protobuf:"varint,6,opt,name=isOutbound,proto3" json:"isOutbound,omitempty"` + TimeOffset int64 `protobuf:"varint,7,opt,name=timeOffset,proto3" json:"timeOffset,omitempty"` + UserAgent string `protobuf:"bytes,8,opt,name=userAgent,proto3" json:"userAgent,omitempty"` + // The protocol version that this peer claims to support + AdvertisedProtocolVersion uint32 `protobuf:"varint,9,opt,name=advertisedProtocolVersion,proto3" json:"advertisedProtocolVersion,omitempty"` + // The timestamp of when this peer connected to this spectred + TimeConnected int64 `protobuf:"varint,10,opt,name=timeConnected,proto3" json:"timeConnected,omitempty"` + // Whether this peer is the IBD peer (if IBD is running) + IsIbdPeer bool `protobuf:"varint,11,opt,name=isIbdPeer,proto3" json:"isIbdPeer,omitempty"` +} + +func (x *GetConnectedPeerInfoMessage) Reset() { + *x = GetConnectedPeerInfoMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetConnectedPeerInfoMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConnectedPeerInfoMessage) ProtoMessage() {} + +func (x *GetConnectedPeerInfoMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConnectedPeerInfoMessage.ProtoReflect.Descriptor instead. +func (*GetConnectedPeerInfoMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{35} +} + +func (x *GetConnectedPeerInfoMessage) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *GetConnectedPeerInfoMessage) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *GetConnectedPeerInfoMessage) GetLastPingDuration() int64 { + if x != nil { + return x.LastPingDuration + } + return 0 +} + +func (x *GetConnectedPeerInfoMessage) GetIsOutbound() bool { + if x != nil { + return x.IsOutbound + } + return false +} + +func (x *GetConnectedPeerInfoMessage) GetTimeOffset() int64 { + if x != nil { + return x.TimeOffset + } + return 0 +} + +func (x *GetConnectedPeerInfoMessage) GetUserAgent() string { + if x != nil { + return x.UserAgent + } + return "" +} + +func (x *GetConnectedPeerInfoMessage) GetAdvertisedProtocolVersion() uint32 { + if x != nil { + return x.AdvertisedProtocolVersion + } + return 0 +} + +func (x *GetConnectedPeerInfoMessage) GetTimeConnected() int64 { + if x != nil { + return x.TimeConnected + } + return 0 +} + +func (x *GetConnectedPeerInfoMessage) GetIsIbdPeer() bool { + if x != nil { + return x.IsIbdPeer + } + return false +} + +// AddPeerRequestMessage adds a peer to spectred's outgoing connection list. +// This will, in most cases, result in spectred connecting to said peer. +type AddPeerRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // Whether to keep attempting to connect to this peer after disconnection + IsPermanent bool `protobuf:"varint,2,opt,name=isPermanent,proto3" json:"isPermanent,omitempty"` +} + +func (x *AddPeerRequestMessage) Reset() { + *x = AddPeerRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddPeerRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddPeerRequestMessage) ProtoMessage() {} + +func (x *AddPeerRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddPeerRequestMessage.ProtoReflect.Descriptor instead. +func (*AddPeerRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{36} +} + +func (x *AddPeerRequestMessage) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *AddPeerRequestMessage) GetIsPermanent() bool { + if x != nil { + return x.IsPermanent + } + return false +} + +type AddPeerResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *AddPeerResponseMessage) Reset() { + *x = AddPeerResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddPeerResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddPeerResponseMessage) ProtoMessage() {} + +func (x *AddPeerResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddPeerResponseMessage.ProtoReflect.Descriptor instead. +func (*AddPeerResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{37} +} + +func (x *AddPeerResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// SubmitTransactionRequestMessage submits a transaction to the mempool +type SubmitTransactionRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Transaction *RpcTransaction `protobuf:"bytes,1,opt,name=transaction,proto3" json:"transaction,omitempty"` + AllowOrphan bool `protobuf:"varint,2,opt,name=allowOrphan,proto3" json:"allowOrphan,omitempty"` +} + +func (x *SubmitTransactionRequestMessage) Reset() { + *x = SubmitTransactionRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubmitTransactionRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubmitTransactionRequestMessage) ProtoMessage() {} + +func (x *SubmitTransactionRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubmitTransactionRequestMessage.ProtoReflect.Descriptor instead. +func (*SubmitTransactionRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{38} +} + +func (x *SubmitTransactionRequestMessage) GetTransaction() *RpcTransaction { + if x != nil { + return x.Transaction + } + return nil +} + +func (x *SubmitTransactionRequestMessage) GetAllowOrphan() bool { + if x != nil { + return x.AllowOrphan + } + return false +} + +type SubmitTransactionResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The transaction ID of the submitted transaction + TransactionId string `protobuf:"bytes,1,opt,name=transactionId,proto3" json:"transactionId,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *SubmitTransactionResponseMessage) Reset() { + *x = SubmitTransactionResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubmitTransactionResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubmitTransactionResponseMessage) ProtoMessage() {} + +func (x *SubmitTransactionResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubmitTransactionResponseMessage.ProtoReflect.Descriptor instead. +func (*SubmitTransactionResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{39} +} + +func (x *SubmitTransactionResponseMessage) GetTransactionId() string { + if x != nil { + return x.TransactionId + } + return "" +} + +func (x *SubmitTransactionResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NotifyVirtualSelectedParentChainChangedRequestMessage registers this connection for virtualSelectedParentChainChanged notifications. +// +// See: VirtualSelectedParentChainChangedNotificationMessage +type NotifyVirtualSelectedParentChainChangedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeAcceptedTransactionIds bool `protobuf:"varint,1,opt,name=includeAcceptedTransactionIds,proto3" json:"includeAcceptedTransactionIds,omitempty"` +} + +func (x *NotifyVirtualSelectedParentChainChangedRequestMessage) Reset() { + *x = NotifyVirtualSelectedParentChainChangedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualSelectedParentChainChangedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualSelectedParentChainChangedRequestMessage) ProtoMessage() {} + +func (x *NotifyVirtualSelectedParentChainChangedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualSelectedParentChainChangedRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualSelectedParentChainChangedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{40} +} + +func (x *NotifyVirtualSelectedParentChainChangedRequestMessage) GetIncludeAcceptedTransactionIds() bool { + if x != nil { + return x.IncludeAcceptedTransactionIds + } + return false +} + +type NotifyVirtualSelectedParentChainChangedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyVirtualSelectedParentChainChangedResponseMessage) Reset() { + *x = NotifyVirtualSelectedParentChainChangedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualSelectedParentChainChangedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualSelectedParentChainChangedResponseMessage) ProtoMessage() {} + +func (x *NotifyVirtualSelectedParentChainChangedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualSelectedParentChainChangedResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualSelectedParentChainChangedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{41} +} + +func (x *NotifyVirtualSelectedParentChainChangedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// VirtualSelectedParentChainChangedNotificationMessage is sent whenever the DAG's selected parent +// chain had changed. +// +// See: NotifyVirtualSelectedParentChainChangedRequestMessage +type VirtualSelectedParentChainChangedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain blocks that were removed, in high-to-low order + RemovedChainBlockHashes []string `protobuf:"bytes,1,rep,name=removedChainBlockHashes,proto3" json:"removedChainBlockHashes,omitempty"` + // The chain blocks that were added, in low-to-high order + AddedChainBlockHashes []string `protobuf:"bytes,3,rep,name=addedChainBlockHashes,proto3" json:"addedChainBlockHashes,omitempty"` + // Will be filled only if `includeAcceptedTransactionIds = true` in the notify request. + AcceptedTransactionIds []*AcceptedTransactionIds `protobuf:"bytes,2,rep,name=acceptedTransactionIds,proto3" json:"acceptedTransactionIds,omitempty"` +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) Reset() { + *x = VirtualSelectedParentChainChangedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualSelectedParentChainChangedNotificationMessage) ProtoMessage() {} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualSelectedParentChainChangedNotificationMessage.ProtoReflect.Descriptor instead. +func (*VirtualSelectedParentChainChangedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{42} +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) GetRemovedChainBlockHashes() []string { + if x != nil { + return x.RemovedChainBlockHashes + } + return nil +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) GetAddedChainBlockHashes() []string { + if x != nil { + return x.AddedChainBlockHashes + } + return nil +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) GetAcceptedTransactionIds() []*AcceptedTransactionIds { + if x != nil { + return x.AcceptedTransactionIds + } + return nil +} + +// GetBlockRequestMessage requests information about a specific block +type GetBlockRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The hash of the requested block + Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + // Whether to include transaction data in the response + IncludeTransactions bool `protobuf:"varint,3,opt,name=includeTransactions,proto3" json:"includeTransactions,omitempty"` +} + +func (x *GetBlockRequestMessage) Reset() { + *x = GetBlockRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockRequestMessage) ProtoMessage() {} + +func (x *GetBlockRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBlockRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{43} +} + +func (x *GetBlockRequestMessage) GetHash() string { + if x != nil { + return x.Hash + } + return "" +} + +func (x *GetBlockRequestMessage) GetIncludeTransactions() bool { + if x != nil { + return x.IncludeTransactions + } + return false +} + +type GetBlockResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Block *RpcBlock `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBlockResponseMessage) Reset() { + *x = GetBlockResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockResponseMessage) ProtoMessage() {} + +func (x *GetBlockResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBlockResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{44} +} + +func (x *GetBlockResponseMessage) GetBlock() *RpcBlock { + if x != nil { + return x.Block + } + return nil +} + +func (x *GetBlockResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetSubnetworkRequestMessage requests information about a specific subnetwork +// +// Currently unimplemented +type GetSubnetworkRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubnetworkId string `protobuf:"bytes,1,opt,name=subnetworkId,proto3" json:"subnetworkId,omitempty"` +} + +func (x *GetSubnetworkRequestMessage) Reset() { + *x = GetSubnetworkRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubnetworkRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubnetworkRequestMessage) ProtoMessage() {} + +func (x *GetSubnetworkRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubnetworkRequestMessage.ProtoReflect.Descriptor instead. +func (*GetSubnetworkRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{45} +} + +func (x *GetSubnetworkRequestMessage) GetSubnetworkId() string { + if x != nil { + return x.SubnetworkId + } + return "" +} + +type GetSubnetworkResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GasLimit uint64 `protobuf:"varint,1,opt,name=gasLimit,proto3" json:"gasLimit,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetSubnetworkResponseMessage) Reset() { + *x = GetSubnetworkResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubnetworkResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubnetworkResponseMessage) ProtoMessage() {} + +func (x *GetSubnetworkResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubnetworkResponseMessage.ProtoReflect.Descriptor instead. +func (*GetSubnetworkResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{46} +} + +func (x *GetSubnetworkResponseMessage) GetGasLimit() uint64 { + if x != nil { + return x.GasLimit + } + return 0 +} + +func (x *GetSubnetworkResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetVirtualSelectedParentChainFromBlockRequestMessage requests the virtual selected +// parent chain from some startHash to this spectred's current virtual +type GetVirtualSelectedParentChainFromBlockRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartHash string `protobuf:"bytes,1,opt,name=startHash,proto3" json:"startHash,omitempty"` + IncludeAcceptedTransactionIds bool `protobuf:"varint,2,opt,name=includeAcceptedTransactionIds,proto3" json:"includeAcceptedTransactionIds,omitempty"` +} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) Reset() { + *x = GetVirtualSelectedParentChainFromBlockRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVirtualSelectedParentChainFromBlockRequestMessage) ProtoMessage() {} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVirtualSelectedParentChainFromBlockRequestMessage.ProtoReflect.Descriptor instead. +func (*GetVirtualSelectedParentChainFromBlockRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{47} +} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) GetStartHash() string { + if x != nil { + return x.StartHash + } + return "" +} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) GetIncludeAcceptedTransactionIds() bool { + if x != nil { + return x.IncludeAcceptedTransactionIds + } + return false +} + +type AcceptedTransactionIds struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AcceptingBlockHash string `protobuf:"bytes,1,opt,name=acceptingBlockHash,proto3" json:"acceptingBlockHash,omitempty"` + AcceptedTransactionIds []string `protobuf:"bytes,2,rep,name=acceptedTransactionIds,proto3" json:"acceptedTransactionIds,omitempty"` +} + +func (x *AcceptedTransactionIds) Reset() { + *x = AcceptedTransactionIds{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AcceptedTransactionIds) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AcceptedTransactionIds) ProtoMessage() {} + +func (x *AcceptedTransactionIds) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AcceptedTransactionIds.ProtoReflect.Descriptor instead. +func (*AcceptedTransactionIds) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{48} +} + +func (x *AcceptedTransactionIds) GetAcceptingBlockHash() string { + if x != nil { + return x.AcceptingBlockHash + } + return "" +} + +func (x *AcceptedTransactionIds) GetAcceptedTransactionIds() []string { + if x != nil { + return x.AcceptedTransactionIds + } + return nil +} + +type GetVirtualSelectedParentChainFromBlockResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain blocks that were removed, in high-to-low order + RemovedChainBlockHashes []string `protobuf:"bytes,1,rep,name=removedChainBlockHashes,proto3" json:"removedChainBlockHashes,omitempty"` + // The chain blocks that were added, in low-to-high order + AddedChainBlockHashes []string `protobuf:"bytes,3,rep,name=addedChainBlockHashes,proto3" json:"addedChainBlockHashes,omitempty"` + // The transactions accepted by each block in addedChainBlockHashes. + // Will be filled only if `includeAcceptedTransactionIds = true` in the request. + AcceptedTransactionIds []*AcceptedTransactionIds `protobuf:"bytes,2,rep,name=acceptedTransactionIds,proto3" json:"acceptedTransactionIds,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) Reset() { + *x = GetVirtualSelectedParentChainFromBlockResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVirtualSelectedParentChainFromBlockResponseMessage) ProtoMessage() {} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVirtualSelectedParentChainFromBlockResponseMessage.ProtoReflect.Descriptor instead. +func (*GetVirtualSelectedParentChainFromBlockResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{49} +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) GetRemovedChainBlockHashes() []string { + if x != nil { + return x.RemovedChainBlockHashes + } + return nil +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) GetAddedChainBlockHashes() []string { + if x != nil { + return x.AddedChainBlockHashes + } + return nil +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) GetAcceptedTransactionIds() []*AcceptedTransactionIds { + if x != nil { + return x.AcceptedTransactionIds + } + return nil +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetBlocksRequestMessage requests blocks between a certain block lowHash up to this +// spectred's current virtual. +type GetBlocksRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LowHash string `protobuf:"bytes,1,opt,name=lowHash,proto3" json:"lowHash,omitempty"` + IncludeBlocks bool `protobuf:"varint,2,opt,name=includeBlocks,proto3" json:"includeBlocks,omitempty"` + IncludeTransactions bool `protobuf:"varint,3,opt,name=includeTransactions,proto3" json:"includeTransactions,omitempty"` +} + +func (x *GetBlocksRequestMessage) Reset() { + *x = GetBlocksRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlocksRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlocksRequestMessage) ProtoMessage() {} + +func (x *GetBlocksRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlocksRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBlocksRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{50} +} + +func (x *GetBlocksRequestMessage) GetLowHash() string { + if x != nil { + return x.LowHash + } + return "" +} + +func (x *GetBlocksRequestMessage) GetIncludeBlocks() bool { + if x != nil { + return x.IncludeBlocks + } + return false +} + +func (x *GetBlocksRequestMessage) GetIncludeTransactions() bool { + if x != nil { + return x.IncludeTransactions + } + return false +} + +type GetBlocksResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockHashes []string `protobuf:"bytes,4,rep,name=blockHashes,proto3" json:"blockHashes,omitempty"` + Blocks []*RpcBlock `protobuf:"bytes,3,rep,name=blocks,proto3" json:"blocks,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBlocksResponseMessage) Reset() { + *x = GetBlocksResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlocksResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlocksResponseMessage) ProtoMessage() {} + +func (x *GetBlocksResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlocksResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBlocksResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{51} +} + +func (x *GetBlocksResponseMessage) GetBlockHashes() []string { + if x != nil { + return x.BlockHashes + } + return nil +} + +func (x *GetBlocksResponseMessage) GetBlocks() []*RpcBlock { + if x != nil { + return x.Blocks + } + return nil +} + +func (x *GetBlocksResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetBlockCountRequestMessage requests the current number of blocks in this spectred. +// Note that this number may decrease as pruning occurs. +type GetBlockCountRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetBlockCountRequestMessage) Reset() { + *x = GetBlockCountRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockCountRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockCountRequestMessage) ProtoMessage() {} + +func (x *GetBlockCountRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockCountRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBlockCountRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{52} +} + +type GetBlockCountResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockCount uint64 `protobuf:"varint,1,opt,name=blockCount,proto3" json:"blockCount,omitempty"` + HeaderCount uint64 `protobuf:"varint,2,opt,name=headerCount,proto3" json:"headerCount,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBlockCountResponseMessage) Reset() { + *x = GetBlockCountResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockCountResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockCountResponseMessage) ProtoMessage() {} + +func (x *GetBlockCountResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockCountResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBlockCountResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{53} +} + +func (x *GetBlockCountResponseMessage) GetBlockCount() uint64 { + if x != nil { + return x.BlockCount + } + return 0 +} + +func (x *GetBlockCountResponseMessage) GetHeaderCount() uint64 { + if x != nil { + return x.HeaderCount + } + return 0 +} + +func (x *GetBlockCountResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetBlockDagInfoRequestMessage requests general information about the current state +// of this spectred's DAG. +type GetBlockDagInfoRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetBlockDagInfoRequestMessage) Reset() { + *x = GetBlockDagInfoRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockDagInfoRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockDagInfoRequestMessage) ProtoMessage() {} + +func (x *GetBlockDagInfoRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockDagInfoRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBlockDagInfoRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{54} +} + +type GetBlockDagInfoResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NetworkName string `protobuf:"bytes,1,opt,name=networkName,proto3" json:"networkName,omitempty"` + BlockCount uint64 `protobuf:"varint,2,opt,name=blockCount,proto3" json:"blockCount,omitempty"` + HeaderCount uint64 `protobuf:"varint,3,opt,name=headerCount,proto3" json:"headerCount,omitempty"` + TipHashes []string `protobuf:"bytes,4,rep,name=tipHashes,proto3" json:"tipHashes,omitempty"` + Difficulty float64 `protobuf:"fixed64,5,opt,name=difficulty,proto3" json:"difficulty,omitempty"` + PastMedianTime int64 `protobuf:"varint,6,opt,name=pastMedianTime,proto3" json:"pastMedianTime,omitempty"` + VirtualParentHashes []string `protobuf:"bytes,7,rep,name=virtualParentHashes,proto3" json:"virtualParentHashes,omitempty"` + PruningPointHash string `protobuf:"bytes,8,opt,name=pruningPointHash,proto3" json:"pruningPointHash,omitempty"` + VirtualDaaScore uint64 `protobuf:"varint,9,opt,name=virtualDaaScore,proto3" json:"virtualDaaScore,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBlockDagInfoResponseMessage) Reset() { + *x = GetBlockDagInfoResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockDagInfoResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockDagInfoResponseMessage) ProtoMessage() {} + +func (x *GetBlockDagInfoResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockDagInfoResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBlockDagInfoResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{55} +} + +func (x *GetBlockDagInfoResponseMessage) GetNetworkName() string { + if x != nil { + return x.NetworkName + } + return "" +} + +func (x *GetBlockDagInfoResponseMessage) GetBlockCount() uint64 { + if x != nil { + return x.BlockCount + } + return 0 +} + +func (x *GetBlockDagInfoResponseMessage) GetHeaderCount() uint64 { + if x != nil { + return x.HeaderCount + } + return 0 +} + +func (x *GetBlockDagInfoResponseMessage) GetTipHashes() []string { + if x != nil { + return x.TipHashes + } + return nil +} + +func (x *GetBlockDagInfoResponseMessage) GetDifficulty() float64 { + if x != nil { + return x.Difficulty + } + return 0 +} + +func (x *GetBlockDagInfoResponseMessage) GetPastMedianTime() int64 { + if x != nil { + return x.PastMedianTime + } + return 0 +} + +func (x *GetBlockDagInfoResponseMessage) GetVirtualParentHashes() []string { + if x != nil { + return x.VirtualParentHashes + } + return nil +} + +func (x *GetBlockDagInfoResponseMessage) GetPruningPointHash() string { + if x != nil { + return x.PruningPointHash + } + return "" +} + +func (x *GetBlockDagInfoResponseMessage) GetVirtualDaaScore() uint64 { + if x != nil { + return x.VirtualDaaScore + } + return 0 +} + +func (x *GetBlockDagInfoResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type ResolveFinalityConflictRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FinalityBlockHash string `protobuf:"bytes,1,opt,name=finalityBlockHash,proto3" json:"finalityBlockHash,omitempty"` +} + +func (x *ResolveFinalityConflictRequestMessage) Reset() { + *x = ResolveFinalityConflictRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResolveFinalityConflictRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResolveFinalityConflictRequestMessage) ProtoMessage() {} + +func (x *ResolveFinalityConflictRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResolveFinalityConflictRequestMessage.ProtoReflect.Descriptor instead. +func (*ResolveFinalityConflictRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{56} +} + +func (x *ResolveFinalityConflictRequestMessage) GetFinalityBlockHash() string { + if x != nil { + return x.FinalityBlockHash + } + return "" +} + +type ResolveFinalityConflictResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ResolveFinalityConflictResponseMessage) Reset() { + *x = ResolveFinalityConflictResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResolveFinalityConflictResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResolveFinalityConflictResponseMessage) ProtoMessage() {} + +func (x *ResolveFinalityConflictResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResolveFinalityConflictResponseMessage.ProtoReflect.Descriptor instead. +func (*ResolveFinalityConflictResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{57} +} + +func (x *ResolveFinalityConflictResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type NotifyFinalityConflictsRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyFinalityConflictsRequestMessage) Reset() { + *x = NotifyFinalityConflictsRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyFinalityConflictsRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyFinalityConflictsRequestMessage) ProtoMessage() {} + +func (x *NotifyFinalityConflictsRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyFinalityConflictsRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyFinalityConflictsRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{58} +} + +type NotifyFinalityConflictsResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyFinalityConflictsResponseMessage) Reset() { + *x = NotifyFinalityConflictsResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyFinalityConflictsResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyFinalityConflictsResponseMessage) ProtoMessage() {} + +func (x *NotifyFinalityConflictsResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyFinalityConflictsResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyFinalityConflictsResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{59} +} + +func (x *NotifyFinalityConflictsResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type FinalityConflictNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ViolatingBlockHash string `protobuf:"bytes,1,opt,name=violatingBlockHash,proto3" json:"violatingBlockHash,omitempty"` +} + +func (x *FinalityConflictNotificationMessage) Reset() { + *x = FinalityConflictNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FinalityConflictNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FinalityConflictNotificationMessage) ProtoMessage() {} + +func (x *FinalityConflictNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FinalityConflictNotificationMessage.ProtoReflect.Descriptor instead. +func (*FinalityConflictNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{60} +} + +func (x *FinalityConflictNotificationMessage) GetViolatingBlockHash() string { + if x != nil { + return x.ViolatingBlockHash + } + return "" +} + +type FinalityConflictResolvedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FinalityBlockHash string `protobuf:"bytes,1,opt,name=finalityBlockHash,proto3" json:"finalityBlockHash,omitempty"` +} + +func (x *FinalityConflictResolvedNotificationMessage) Reset() { + *x = FinalityConflictResolvedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FinalityConflictResolvedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FinalityConflictResolvedNotificationMessage) ProtoMessage() {} + +func (x *FinalityConflictResolvedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FinalityConflictResolvedNotificationMessage.ProtoReflect.Descriptor instead. +func (*FinalityConflictResolvedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{61} +} + +func (x *FinalityConflictResolvedNotificationMessage) GetFinalityBlockHash() string { + if x != nil { + return x.FinalityBlockHash + } + return "" +} + +// ShutDownRequestMessage shuts down this spectred. +type ShutDownRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShutDownRequestMessage) Reset() { + *x = ShutDownRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutDownRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutDownRequestMessage) ProtoMessage() {} + +func (x *ShutDownRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutDownRequestMessage.ProtoReflect.Descriptor instead. +func (*ShutDownRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{62} +} + +type ShutDownResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ShutDownResponseMessage) Reset() { + *x = ShutDownResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutDownResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutDownResponseMessage) ProtoMessage() {} + +func (x *ShutDownResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutDownResponseMessage.ProtoReflect.Descriptor instead. +func (*ShutDownResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{63} +} + +func (x *ShutDownResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetHeadersRequestMessage requests headers between the given startHash and the +// current virtual, up to the given limit. +type GetHeadersRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartHash string `protobuf:"bytes,1,opt,name=startHash,proto3" json:"startHash,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + IsAscending bool `protobuf:"varint,3,opt,name=isAscending,proto3" json:"isAscending,omitempty"` +} + +func (x *GetHeadersRequestMessage) Reset() { + *x = GetHeadersRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHeadersRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHeadersRequestMessage) ProtoMessage() {} + +func (x *GetHeadersRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHeadersRequestMessage.ProtoReflect.Descriptor instead. +func (*GetHeadersRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{64} +} + +func (x *GetHeadersRequestMessage) GetStartHash() string { + if x != nil { + return x.StartHash + } + return "" +} + +func (x *GetHeadersRequestMessage) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *GetHeadersRequestMessage) GetIsAscending() bool { + if x != nil { + return x.IsAscending + } + return false +} + +type GetHeadersResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Headers []string `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetHeadersResponseMessage) Reset() { + *x = GetHeadersResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetHeadersResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetHeadersResponseMessage) ProtoMessage() {} + +func (x *GetHeadersResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetHeadersResponseMessage.ProtoReflect.Descriptor instead. +func (*GetHeadersResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{65} +} + +func (x *GetHeadersResponseMessage) GetHeaders() []string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *GetHeadersResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NotifyUtxosChangedRequestMessage registers this connection for utxoChanged notifications +// for the given addresses. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: UtxosChangedNotificationMessage +type NotifyUtxosChangedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` // Leave empty to get all updates +} + +func (x *NotifyUtxosChangedRequestMessage) Reset() { + *x = NotifyUtxosChangedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyUtxosChangedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyUtxosChangedRequestMessage) ProtoMessage() {} + +func (x *NotifyUtxosChangedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyUtxosChangedRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyUtxosChangedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{66} +} + +func (x *NotifyUtxosChangedRequestMessage) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +type NotifyUtxosChangedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyUtxosChangedResponseMessage) Reset() { + *x = NotifyUtxosChangedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyUtxosChangedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyUtxosChangedResponseMessage) ProtoMessage() {} + +func (x *NotifyUtxosChangedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyUtxosChangedResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyUtxosChangedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{67} +} + +func (x *NotifyUtxosChangedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// UtxosChangedNotificationMessage is sent whenever the UTXO index had been updated. +// +// See: NotifyUtxosChangedRequestMessage +type UtxosChangedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Added []*UtxosByAddressesEntry `protobuf:"bytes,1,rep,name=added,proto3" json:"added,omitempty"` + Removed []*UtxosByAddressesEntry `protobuf:"bytes,2,rep,name=removed,proto3" json:"removed,omitempty"` +} + +func (x *UtxosChangedNotificationMessage) Reset() { + *x = UtxosChangedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UtxosChangedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UtxosChangedNotificationMessage) ProtoMessage() {} + +func (x *UtxosChangedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UtxosChangedNotificationMessage.ProtoReflect.Descriptor instead. +func (*UtxosChangedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{68} +} + +func (x *UtxosChangedNotificationMessage) GetAdded() []*UtxosByAddressesEntry { + if x != nil { + return x.Added + } + return nil +} + +func (x *UtxosChangedNotificationMessage) GetRemoved() []*UtxosByAddressesEntry { + if x != nil { + return x.Removed + } + return nil +} + +type UtxosByAddressesEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Outpoint *RpcOutpoint `protobuf:"bytes,2,opt,name=outpoint,proto3" json:"outpoint,omitempty"` + UtxoEntry *RpcUtxoEntry `protobuf:"bytes,3,opt,name=utxoEntry,proto3" json:"utxoEntry,omitempty"` +} + +func (x *UtxosByAddressesEntry) Reset() { + *x = UtxosByAddressesEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UtxosByAddressesEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UtxosByAddressesEntry) ProtoMessage() {} + +func (x *UtxosByAddressesEntry) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UtxosByAddressesEntry.ProtoReflect.Descriptor instead. +func (*UtxosByAddressesEntry) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{69} +} + +func (x *UtxosByAddressesEntry) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *UtxosByAddressesEntry) GetOutpoint() *RpcOutpoint { + if x != nil { + return x.Outpoint + } + return nil +} + +func (x *UtxosByAddressesEntry) GetUtxoEntry() *RpcUtxoEntry { + if x != nil { + return x.UtxoEntry + } + return nil +} + +// StopNotifyingUtxosChangedRequestMessage unregisters this connection for utxoChanged notifications +// for the given addresses. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: UtxosChangedNotificationMessage +type StopNotifyingUtxosChangedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (x *StopNotifyingUtxosChangedRequestMessage) Reset() { + *x = StopNotifyingUtxosChangedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopNotifyingUtxosChangedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopNotifyingUtxosChangedRequestMessage) ProtoMessage() {} + +func (x *StopNotifyingUtxosChangedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopNotifyingUtxosChangedRequestMessage.ProtoReflect.Descriptor instead. +func (*StopNotifyingUtxosChangedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{70} +} + +func (x *StopNotifyingUtxosChangedRequestMessage) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +type StopNotifyingUtxosChangedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *StopNotifyingUtxosChangedResponseMessage) Reset() { + *x = StopNotifyingUtxosChangedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopNotifyingUtxosChangedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopNotifyingUtxosChangedResponseMessage) ProtoMessage() {} + +func (x *StopNotifyingUtxosChangedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopNotifyingUtxosChangedResponseMessage.ProtoReflect.Descriptor instead. +func (*StopNotifyingUtxosChangedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{71} +} + +func (x *StopNotifyingUtxosChangedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetUtxosByAddressesRequestMessage requests all current UTXOs for the given spectred addresses +// +// This call is only available when this spectred was started with `--utxoindex` +type GetUtxosByAddressesRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (x *GetUtxosByAddressesRequestMessage) Reset() { + *x = GetUtxosByAddressesRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUtxosByAddressesRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUtxosByAddressesRequestMessage) ProtoMessage() {} + +func (x *GetUtxosByAddressesRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUtxosByAddressesRequestMessage.ProtoReflect.Descriptor instead. +func (*GetUtxosByAddressesRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{72} +} + +func (x *GetUtxosByAddressesRequestMessage) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +type GetUtxosByAddressesResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*UtxosByAddressesEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetUtxosByAddressesResponseMessage) Reset() { + *x = GetUtxosByAddressesResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUtxosByAddressesResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUtxosByAddressesResponseMessage) ProtoMessage() {} + +func (x *GetUtxosByAddressesResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUtxosByAddressesResponseMessage.ProtoReflect.Descriptor instead. +func (*GetUtxosByAddressesResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{73} +} + +func (x *GetUtxosByAddressesResponseMessage) GetEntries() []*UtxosByAddressesEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *GetUtxosByAddressesResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetBalanceByAddressRequest returns the total balance in unspent transactions towards a given address +// +// This call is only available when this spectred was started with `--utxoindex` +type GetBalanceByAddressRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *GetBalanceByAddressRequestMessage) Reset() { + *x = GetBalanceByAddressRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalanceByAddressRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalanceByAddressRequestMessage) ProtoMessage() {} + +func (x *GetBalanceByAddressRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalanceByAddressRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBalanceByAddressRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{74} +} + +func (x *GetBalanceByAddressRequestMessage) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type GetBalanceByAddressResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Balance uint64 `protobuf:"varint,1,opt,name=balance,proto3" json:"balance,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBalanceByAddressResponseMessage) Reset() { + *x = GetBalanceByAddressResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalanceByAddressResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalanceByAddressResponseMessage) ProtoMessage() {} + +func (x *GetBalanceByAddressResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalanceByAddressResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBalanceByAddressResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{75} +} + +func (x *GetBalanceByAddressResponseMessage) GetBalance() uint64 { + if x != nil { + return x.Balance + } + return 0 +} + +func (x *GetBalanceByAddressResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type GetBalancesByAddressesRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (x *GetBalancesByAddressesRequestMessage) Reset() { + *x = GetBalancesByAddressesRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalancesByAddressesRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalancesByAddressesRequestMessage) ProtoMessage() {} + +func (x *GetBalancesByAddressesRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalancesByAddressesRequestMessage.ProtoReflect.Descriptor instead. +func (*GetBalancesByAddressesRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{76} +} + +func (x *GetBalancesByAddressesRequestMessage) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +type BalancesByAddressEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Balance uint64 `protobuf:"varint,2,opt,name=balance,proto3" json:"balance,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *BalancesByAddressEntry) Reset() { + *x = BalancesByAddressEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BalancesByAddressEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BalancesByAddressEntry) ProtoMessage() {} + +func (x *BalancesByAddressEntry) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BalancesByAddressEntry.ProtoReflect.Descriptor instead. +func (*BalancesByAddressEntry) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{77} +} + +func (x *BalancesByAddressEntry) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *BalancesByAddressEntry) GetBalance() uint64 { + if x != nil { + return x.Balance + } + return 0 +} + +func (x *BalancesByAddressEntry) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type GetBalancesByAddressesResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*BalancesByAddressEntry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetBalancesByAddressesResponseMessage) Reset() { + *x = GetBalancesByAddressesResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBalancesByAddressesResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBalancesByAddressesResponseMessage) ProtoMessage() {} + +func (x *GetBalancesByAddressesResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBalancesByAddressesResponseMessage.ProtoReflect.Descriptor instead. +func (*GetBalancesByAddressesResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{78} +} + +func (x *GetBalancesByAddressesResponseMessage) GetEntries() []*BalancesByAddressEntry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *GetBalancesByAddressesResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetVirtualSelectedParentBlueScoreRequestMessage requests the blue score of the current selected parent +// of the virtual block. +type GetVirtualSelectedParentBlueScoreRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetVirtualSelectedParentBlueScoreRequestMessage) Reset() { + *x = GetVirtualSelectedParentBlueScoreRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVirtualSelectedParentBlueScoreRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVirtualSelectedParentBlueScoreRequestMessage) ProtoMessage() {} + +func (x *GetVirtualSelectedParentBlueScoreRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVirtualSelectedParentBlueScoreRequestMessage.ProtoReflect.Descriptor instead. +func (*GetVirtualSelectedParentBlueScoreRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{79} +} + +type GetVirtualSelectedParentBlueScoreResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlueScore uint64 `protobuf:"varint,1,opt,name=blueScore,proto3" json:"blueScore,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) Reset() { + *x = GetVirtualSelectedParentBlueScoreResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVirtualSelectedParentBlueScoreResponseMessage) ProtoMessage() {} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[80] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVirtualSelectedParentBlueScoreResponseMessage.ProtoReflect.Descriptor instead. +func (*GetVirtualSelectedParentBlueScoreResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{80} +} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) GetBlueScore() uint64 { + if x != nil { + return x.BlueScore + } + return 0 +} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NotifyVirtualSelectedParentBlueScoreChangedRequestMessage registers this connection for +// virtualSelectedParentBlueScoreChanged notifications. +// +// See: VirtualSelectedParentBlueScoreChangedNotificationMessage +type NotifyVirtualSelectedParentBlueScoreChangedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) Reset() { + *x = NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) ProtoMessage() {} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[81] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualSelectedParentBlueScoreChangedRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{81} +} + +type NotifyVirtualSelectedParentBlueScoreChangedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) Reset() { + *x = NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) ProtoMessage() {} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[82] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualSelectedParentBlueScoreChangedResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{82} +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// VirtualSelectedParentBlueScoreChangedNotificationMessage is sent whenever the blue score +// of the virtual's selected parent changes. +// +// See NotifyVirtualSelectedParentBlueScoreChangedRequestMessage +type VirtualSelectedParentBlueScoreChangedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VirtualSelectedParentBlueScore uint64 `protobuf:"varint,1,opt,name=virtualSelectedParentBlueScore,proto3" json:"virtualSelectedParentBlueScore,omitempty"` +} + +func (x *VirtualSelectedParentBlueScoreChangedNotificationMessage) Reset() { + *x = VirtualSelectedParentBlueScoreChangedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualSelectedParentBlueScoreChangedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualSelectedParentBlueScoreChangedNotificationMessage) ProtoMessage() {} + +func (x *VirtualSelectedParentBlueScoreChangedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[83] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualSelectedParentBlueScoreChangedNotificationMessage.ProtoReflect.Descriptor instead. +func (*VirtualSelectedParentBlueScoreChangedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{83} +} + +func (x *VirtualSelectedParentBlueScoreChangedNotificationMessage) GetVirtualSelectedParentBlueScore() uint64 { + if x != nil { + return x.VirtualSelectedParentBlueScore + } + return 0 +} + +// NotifyVirtualDaaScoreChangedRequestMessage registers this connection for +// virtualDaaScoreChanged notifications. +// +// See: VirtualDaaScoreChangedNotificationMessage +type NotifyVirtualDaaScoreChangedRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyVirtualDaaScoreChangedRequestMessage) Reset() { + *x = NotifyVirtualDaaScoreChangedRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualDaaScoreChangedRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualDaaScoreChangedRequestMessage) ProtoMessage() {} + +func (x *NotifyVirtualDaaScoreChangedRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[84] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualDaaScoreChangedRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualDaaScoreChangedRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{84} +} + +type NotifyVirtualDaaScoreChangedResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyVirtualDaaScoreChangedResponseMessage) Reset() { + *x = NotifyVirtualDaaScoreChangedResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyVirtualDaaScoreChangedResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyVirtualDaaScoreChangedResponseMessage) ProtoMessage() {} + +func (x *NotifyVirtualDaaScoreChangedResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[85] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyVirtualDaaScoreChangedResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyVirtualDaaScoreChangedResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{85} +} + +func (x *NotifyVirtualDaaScoreChangedResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// VirtualDaaScoreChangedNotificationMessage is sent whenever the DAA score +// of the virtual changes. +// +// See NotifyVirtualDaaScoreChangedRequestMessage +type VirtualDaaScoreChangedNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VirtualDaaScore uint64 `protobuf:"varint,1,opt,name=virtualDaaScore,proto3" json:"virtualDaaScore,omitempty"` +} + +func (x *VirtualDaaScoreChangedNotificationMessage) Reset() { + *x = VirtualDaaScoreChangedNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VirtualDaaScoreChangedNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VirtualDaaScoreChangedNotificationMessage) ProtoMessage() {} + +func (x *VirtualDaaScoreChangedNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[86] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VirtualDaaScoreChangedNotificationMessage.ProtoReflect.Descriptor instead. +func (*VirtualDaaScoreChangedNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{86} +} + +func (x *VirtualDaaScoreChangedNotificationMessage) GetVirtualDaaScore() uint64 { + if x != nil { + return x.VirtualDaaScore + } + return 0 +} + +// NotifyPruningPointUTXOSetOverrideRequestMessage registers this connection for +// pruning point UTXO set override notifications. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: NotifyPruningPointUTXOSetOverrideResponseMessage +type NotifyPruningPointUTXOSetOverrideRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyPruningPointUTXOSetOverrideRequestMessage) Reset() { + *x = NotifyPruningPointUTXOSetOverrideRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyPruningPointUTXOSetOverrideRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyPruningPointUTXOSetOverrideRequestMessage) ProtoMessage() {} + +func (x *NotifyPruningPointUTXOSetOverrideRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[87] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyPruningPointUTXOSetOverrideRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyPruningPointUTXOSetOverrideRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{87} +} + +type NotifyPruningPointUTXOSetOverrideResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyPruningPointUTXOSetOverrideResponseMessage) Reset() { + *x = NotifyPruningPointUTXOSetOverrideResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyPruningPointUTXOSetOverrideResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyPruningPointUTXOSetOverrideResponseMessage) ProtoMessage() {} + +func (x *NotifyPruningPointUTXOSetOverrideResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[88] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyPruningPointUTXOSetOverrideResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyPruningPointUTXOSetOverrideResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{88} +} + +func (x *NotifyPruningPointUTXOSetOverrideResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// PruningPointUTXOSetOverrideNotificationMessage is sent whenever the UTXO index +// resets due to pruning point change via IBD. +// +// See NotifyPruningPointUTXOSetOverrideRequestMessage +type PruningPointUTXOSetOverrideNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PruningPointUTXOSetOverrideNotificationMessage) Reset() { + *x = PruningPointUTXOSetOverrideNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PruningPointUTXOSetOverrideNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PruningPointUTXOSetOverrideNotificationMessage) ProtoMessage() {} + +func (x *PruningPointUTXOSetOverrideNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[89] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PruningPointUTXOSetOverrideNotificationMessage.ProtoReflect.Descriptor instead. +func (*PruningPointUTXOSetOverrideNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{89} +} + +// StopNotifyingPruningPointUTXOSetOverrideRequestMessage unregisters this connection for +// pruning point UTXO set override notifications. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: PruningPointUTXOSetOverrideNotificationMessage +type StopNotifyingPruningPointUTXOSetOverrideRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Reset() { + *x = StopNotifyingPruningPointUTXOSetOverrideRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopNotifyingPruningPointUTXOSetOverrideRequestMessage) ProtoMessage() {} + +func (x *StopNotifyingPruningPointUTXOSetOverrideRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[90] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopNotifyingPruningPointUTXOSetOverrideRequestMessage.ProtoReflect.Descriptor instead. +func (*StopNotifyingPruningPointUTXOSetOverrideRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{90} +} + +type StopNotifyingPruningPointUTXOSetOverrideResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Reset() { + *x = StopNotifyingPruningPointUTXOSetOverrideResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopNotifyingPruningPointUTXOSetOverrideResponseMessage) ProtoMessage() {} + +func (x *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[91] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopNotifyingPruningPointUTXOSetOverrideResponseMessage.ProtoReflect.Descriptor instead. +func (*StopNotifyingPruningPointUTXOSetOverrideResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{91} +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// BanRequestMessage bans the given ip. +type BanRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` +} + +func (x *BanRequestMessage) Reset() { + *x = BanRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[92] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BanRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BanRequestMessage) ProtoMessage() {} + +func (x *BanRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[92] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BanRequestMessage.ProtoReflect.Descriptor instead. +func (*BanRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{92} +} + +func (x *BanRequestMessage) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +type BanResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *BanResponseMessage) Reset() { + *x = BanResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[93] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BanResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BanResponseMessage) ProtoMessage() {} + +func (x *BanResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[93] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BanResponseMessage.ProtoReflect.Descriptor instead. +func (*BanResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{93} +} + +func (x *BanResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// UnbanRequestMessage unbans the given ip. +type UnbanRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` +} + +func (x *UnbanRequestMessage) Reset() { + *x = UnbanRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[94] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnbanRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnbanRequestMessage) ProtoMessage() {} + +func (x *UnbanRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[94] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnbanRequestMessage.ProtoReflect.Descriptor instead. +func (*UnbanRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{94} +} + +func (x *UnbanRequestMessage) GetIp() string { + if x != nil { + return x.Ip + } + return "" +} + +type UnbanResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *UnbanResponseMessage) Reset() { + *x = UnbanResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[95] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnbanResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnbanResponseMessage) ProtoMessage() {} + +func (x *UnbanResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[95] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnbanResponseMessage.ProtoReflect.Descriptor instead. +func (*UnbanResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{95} +} + +func (x *UnbanResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// GetInfoRequestMessage returns info about the node. +type GetInfoRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetInfoRequestMessage) Reset() { + *x = GetInfoRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[96] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetInfoRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInfoRequestMessage) ProtoMessage() {} + +func (x *GetInfoRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[96] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInfoRequestMessage.ProtoReflect.Descriptor instead. +func (*GetInfoRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{96} +} + +type GetInfoResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + P2PId string `protobuf:"bytes,1,opt,name=p2pId,proto3" json:"p2pId,omitempty"` + MempoolSize uint64 `protobuf:"varint,2,opt,name=mempoolSize,proto3" json:"mempoolSize,omitempty"` + ServerVersion string `protobuf:"bytes,3,opt,name=serverVersion,proto3" json:"serverVersion,omitempty"` + IsUtxoIndexed bool `protobuf:"varint,4,opt,name=isUtxoIndexed,proto3" json:"isUtxoIndexed,omitempty"` + IsSynced bool `protobuf:"varint,5,opt,name=isSynced,proto3" json:"isSynced,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetInfoResponseMessage) Reset() { + *x = GetInfoResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[97] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetInfoResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetInfoResponseMessage) ProtoMessage() {} + +func (x *GetInfoResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[97] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetInfoResponseMessage.ProtoReflect.Descriptor instead. +func (*GetInfoResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{97} +} + +func (x *GetInfoResponseMessage) GetP2PId() string { + if x != nil { + return x.P2PId + } + return "" +} + +func (x *GetInfoResponseMessage) GetMempoolSize() uint64 { + if x != nil { + return x.MempoolSize + } + return 0 +} + +func (x *GetInfoResponseMessage) GetServerVersion() string { + if x != nil { + return x.ServerVersion + } + return "" +} + +func (x *GetInfoResponseMessage) GetIsUtxoIndexed() bool { + if x != nil { + return x.IsUtxoIndexed + } + return false +} + +func (x *GetInfoResponseMessage) GetIsSynced() bool { + if x != nil { + return x.IsSynced + } + return false +} + +func (x *GetInfoResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type EstimateNetworkHashesPerSecondRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WindowSize uint32 `protobuf:"varint,1,opt,name=windowSize,proto3" json:"windowSize,omitempty"` + StartHash string `protobuf:"bytes,2,opt,name=startHash,proto3" json:"startHash,omitempty"` +} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) Reset() { + *x = EstimateNetworkHashesPerSecondRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[98] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EstimateNetworkHashesPerSecondRequestMessage) ProtoMessage() {} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[98] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EstimateNetworkHashesPerSecondRequestMessage.ProtoReflect.Descriptor instead. +func (*EstimateNetworkHashesPerSecondRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{98} +} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) GetWindowSize() uint32 { + if x != nil { + return x.WindowSize + } + return 0 +} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) GetStartHash() string { + if x != nil { + return x.StartHash + } + return "" +} + +type EstimateNetworkHashesPerSecondResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NetworkHashesPerSecond uint64 `protobuf:"varint,1,opt,name=networkHashesPerSecond,proto3" json:"networkHashesPerSecond,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) Reset() { + *x = EstimateNetworkHashesPerSecondResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[99] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EstimateNetworkHashesPerSecondResponseMessage) ProtoMessage() {} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[99] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EstimateNetworkHashesPerSecondResponseMessage.ProtoReflect.Descriptor instead. +func (*EstimateNetworkHashesPerSecondResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{99} +} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) GetNetworkHashesPerSecond() uint64 { + if x != nil { + return x.NetworkHashesPerSecond + } + return 0 +} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NotifyNewBlockTemplateRequestMessage registers this connection for +// NewBlockTemplate notifications. +// +// See: NewBlockTemplateNotificationMessage +type NotifyNewBlockTemplateRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NotifyNewBlockTemplateRequestMessage) Reset() { + *x = NotifyNewBlockTemplateRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[100] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyNewBlockTemplateRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyNewBlockTemplateRequestMessage) ProtoMessage() {} + +func (x *NotifyNewBlockTemplateRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[100] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyNewBlockTemplateRequestMessage.ProtoReflect.Descriptor instead. +func (*NotifyNewBlockTemplateRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{100} +} + +type NotifyNewBlockTemplateResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *NotifyNewBlockTemplateResponseMessage) Reset() { + *x = NotifyNewBlockTemplateResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[101] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NotifyNewBlockTemplateResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NotifyNewBlockTemplateResponseMessage) ProtoMessage() {} + +func (x *NotifyNewBlockTemplateResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[101] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NotifyNewBlockTemplateResponseMessage.ProtoReflect.Descriptor instead. +func (*NotifyNewBlockTemplateResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{101} +} + +func (x *NotifyNewBlockTemplateResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +// NewBlockTemplateNotificationMessage is sent whenever a new updated block template is +// available for miners. +// +// See NotifyNewBlockTemplateRequestMessage +type NewBlockTemplateNotificationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *NewBlockTemplateNotificationMessage) Reset() { + *x = NewBlockTemplateNotificationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewBlockTemplateNotificationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewBlockTemplateNotificationMessage) ProtoMessage() {} + +func (x *NewBlockTemplateNotificationMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[102] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewBlockTemplateNotificationMessage.ProtoReflect.Descriptor instead. +func (*NewBlockTemplateNotificationMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{102} +} + +type MempoolEntryByAddress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Sending []*MempoolEntry `protobuf:"bytes,2,rep,name=sending,proto3" json:"sending,omitempty"` + Receiving []*MempoolEntry `protobuf:"bytes,3,rep,name=receiving,proto3" json:"receiving,omitempty"` +} + +func (x *MempoolEntryByAddress) Reset() { + *x = MempoolEntryByAddress{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MempoolEntryByAddress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MempoolEntryByAddress) ProtoMessage() {} + +func (x *MempoolEntryByAddress) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[103] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MempoolEntryByAddress.ProtoReflect.Descriptor instead. +func (*MempoolEntryByAddress) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{103} +} + +func (x *MempoolEntryByAddress) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *MempoolEntryByAddress) GetSending() []*MempoolEntry { + if x != nil { + return x.Sending + } + return nil +} + +func (x *MempoolEntryByAddress) GetReceiving() []*MempoolEntry { + if x != nil { + return x.Receiving + } + return nil +} + +type GetMempoolEntriesByAddressesRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` + IncludeOrphanPool bool `protobuf:"varint,2,opt,name=includeOrphanPool,proto3" json:"includeOrphanPool,omitempty"` + FilterTransactionPool bool `protobuf:"varint,3,opt,name=filterTransactionPool,proto3" json:"filterTransactionPool,omitempty"` +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) Reset() { + *x = GetMempoolEntriesByAddressesRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntriesByAddressesRequestMessage) ProtoMessage() {} + +func (x *GetMempoolEntriesByAddressesRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[104] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntriesByAddressesRequestMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntriesByAddressesRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{104} +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) GetAddresses() []string { + if x != nil { + return x.Addresses + } + return nil +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) GetIncludeOrphanPool() bool { + if x != nil { + return x.IncludeOrphanPool + } + return false +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) GetFilterTransactionPool() bool { + if x != nil { + return x.FilterTransactionPool + } + return false +} + +type GetMempoolEntriesByAddressesResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MempoolEntryByAddress `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetMempoolEntriesByAddressesResponseMessage) Reset() { + *x = GetMempoolEntriesByAddressesResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMempoolEntriesByAddressesResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMempoolEntriesByAddressesResponseMessage) ProtoMessage() {} + +func (x *GetMempoolEntriesByAddressesResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[105] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMempoolEntriesByAddressesResponseMessage.ProtoReflect.Descriptor instead. +func (*GetMempoolEntriesByAddressesResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{105} +} + +func (x *GetMempoolEntriesByAddressesResponseMessage) GetEntries() []*MempoolEntryByAddress { + if x != nil { + return x.Entries + } + return nil +} + +func (x *GetMempoolEntriesByAddressesResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +type GetCoinSupplyRequestMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetCoinSupplyRequestMessage) Reset() { + *x = GetCoinSupplyRequestMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCoinSupplyRequestMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCoinSupplyRequestMessage) ProtoMessage() {} + +func (x *GetCoinSupplyRequestMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[106] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCoinSupplyRequestMessage.ProtoReflect.Descriptor instead. +func (*GetCoinSupplyRequestMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{106} +} + +type GetCoinSupplyResponseMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxSompi uint64 `protobuf:"varint,1,opt,name=maxSompi,proto3" json:"maxSompi,omitempty"` // note: this is a hard coded maxSupply, actual maxSupply is expected to deviate by upto -5%, but cannot be measured exactly. + CirculatingSompi uint64 `protobuf:"varint,2,opt,name=circulatingSompi,proto3" json:"circulatingSompi,omitempty"` + Error *RPCError `protobuf:"bytes,1000,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *GetCoinSupplyResponseMessage) Reset() { + *x = GetCoinSupplyResponseMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_rpc_proto_msgTypes[107] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCoinSupplyResponseMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCoinSupplyResponseMessage) ProtoMessage() {} + +func (x *GetCoinSupplyResponseMessage) ProtoReflect() protoreflect.Message { + mi := &file_rpc_proto_msgTypes[107] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCoinSupplyResponseMessage.ProtoReflect.Descriptor instead. +func (*GetCoinSupplyResponseMessage) Descriptor() ([]byte, []int) { + return file_rpc_proto_rawDescGZIP(), []int{107} +} + +func (x *GetCoinSupplyResponseMessage) GetMaxSompi() uint64 { + if x != nil { + return x.MaxSompi + } + return 0 +} + +func (x *GetCoinSupplyResponseMessage) GetCirculatingSompi() uint64 { + if x != nil { + return x.CirculatingSompi + } + return 0 +} + +func (x *GetCoinSupplyResponseMessage) GetError() *RPCError { + if x != nil { + return x.Error + } + return nil +} + +var File_rpc_proto protoreflect.FileDescriptor + +var file_rpc_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x22, 0x24, 0x0a, 0x08, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xbe, 0x01, 0x0a, + 0x08, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x76, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x52, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0xab, 0x03, + 0x0a, 0x0e, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x07, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x07, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, + 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x68, + 0x61, 0x73, 0x68, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x32, 0x0a, + 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x4d, 0x65, 0x72, 0x6b, 0x6c, + 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x4d, 0x65, 0x72, 0x6b, 0x6c, 0x65, 0x52, 0x6f, 0x6f, + 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x75, 0x74, 0x78, 0x6f, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x75, 0x74, 0x78, 0x6f, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x69, 0x74, 0x73, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x62, 0x69, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, + 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x62, 0x6c, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x72, 0x75, + 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x3a, 0x0a, 0x14, 0x52, + 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x91, 0x03, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x61, 0x73, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, + 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, + 0x6c, 0x74, 0x79, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x26, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x69, + 0x73, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x6e, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x69, 0x73, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4f, 0x6e, 0x6c, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x26, 0x0a, + 0x0e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, + 0x11, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x48, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, + 0x74, 0x42, 0x6c, 0x75, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x12, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x42, 0x6c, 0x75, 0x65, + 0x73, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, 0x73, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x13, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x12, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x64, + 0x73, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x73, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0xd1, 0x02, 0x0a, 0x0e, + 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, + 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, + 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, + 0x61, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x46, 0x0a, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, + 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, + 0x8c, 0x02, 0x0a, 0x13, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x42, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, + 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, + 0x63, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, + 0x6f, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x4f, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x4f, 0x70, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x4b, 0x0a, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0x58, + 0x0a, 0x12, 0x52, 0x70, 0x63, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, + 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xc5, 0x01, 0x0a, 0x14, 0x52, 0x70, 0x63, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0f, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x70, 0x63, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x12, 0x4c, 0x0a, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x22, 0x49, 0x0a, 0x0b, 0x52, 0x70, 0x63, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xb5, 0x01, 0x0a, 0x0c, + 0x52, 0x70, 0x63, 0x55, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, + 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x61, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x53, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0f, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, + 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x61, 0x53, 0x63, + 0x6f, 0x72, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x43, 0x6f, 0x69, 0x6e, 0x62, + 0x61, 0x73, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x19, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6d, + 0x61, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x73, 0x12, + 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c, 0x0a, + 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x20, 0x0a, 0x1e, 0x52, + 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, + 0x0a, 0x1f, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x16, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x4b, 0x65, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x21, 0x0a, 0x1f, 0x47, + 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x76, + 0x0a, 0x20, 0x47, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x74, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, + 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x6e, 0x44, 0x41, 0x41, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x4e, 0x6f, 0x6e, 0x44, 0x41, 0x41, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0xdc, 0x01, 0x0a, + 0x1a, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x56, 0x0a, 0x0c, 0x72, + 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x75, + 0x62, 0x6d, 0x69, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0c, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x3a, 0x0a, 0x0c, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x42, 0x4c, 0x4f, + 0x43, 0x4b, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x49, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x49, 0x42, 0x44, 0x10, 0x02, 0x22, 0x5e, 0x0a, 0x1e, 0x47, + 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, + 0x0a, 0x70, 0x61, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x70, 0x61, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1c, 0x0a, + 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x22, 0x94, 0x01, 0x0a, 0x1f, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, + 0x53, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, + 0x53, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x20, 0x0a, 0x1e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x4d, 0x0a, 0x1f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x4a, 0x0a, 0x1d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x64, 0x64, 0x65, + 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, + 0x20, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0xf5, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x62, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x39, 0x0a, 0x23, 0x47, 0x65, 0x74, + 0x50, 0x65, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x41, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x41, 0x64, 0x64, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x79, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, + 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x70, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x97, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x78, 0x49, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x72, 0x70, + 0x68, 0x61, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x34, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x7b, 0x0a, + 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x85, 0x01, 0x0a, 0x1f, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, + 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x50, + 0x6f, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x34, 0x0a, 0x15, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x6f, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, + 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x79, 0x0a, 0x0c, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, + 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x65, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x03, 0x66, 0x65, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x4f, 0x72, 0x70, 0x68, 0x61, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4f, 0x72, 0x70, 0x68, 0x61, + 0x6e, 0x22, 0x24, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x3c, 0x0a, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xd3, 0x02, 0x0a, 0x1b, 0x47, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x69, 0x6e, 0x67, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6c, + 0x61, 0x73, 0x74, 0x50, 0x69, 0x6e, 0x67, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1e, 0x0a, 0x0a, 0x69, 0x73, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, + 0x19, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x19, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x73, 0x49, 0x62, 0x64, 0x50, 0x65, 0x65, 0x72, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x49, 0x62, 0x64, 0x50, 0x65, 0x65, 0x72, 0x22, + 0x53, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x61, 0x6e, 0x65, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x61, + 0x6e, 0x65, 0x6e, 0x74, 0x22, 0x44, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, 0x01, 0x0a, 0x1f, 0x53, + 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3b, + 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x52, 0x70, 0x63, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x22, 0x74, 0x0a, + 0x20, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x7d, 0x0a, 0x35, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x1d, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x73, 0x22, 0x64, 0x0a, 0x36, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x81, 0x02, 0x0a, 0x34, 0x56, 0x69, 0x72, + 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x38, 0x0a, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x61, 0x64, 0x64, 0x65, + 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x12, 0x59, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x73, 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x70, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x41, + 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, + 0x64, 0x22, 0x66, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9a, 0x01, 0x0a, 0x34, 0x47, 0x65, + 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x44, 0x0a, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x16, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x36, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x22, 0xae, 0x02, 0x0a, 0x35, 0x47, 0x65, + 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x34, 0x0a, + 0x15, 0x61, 0x64, 0x64, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x61, 0x64, + 0x64, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x52, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x73, 0x12, 0x2a, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x47, + 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x6f, 0x77, 0x48, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x77, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x06, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x20, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x1f, + 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x9e, 0x03, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x44, 0x61, 0x67, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x70, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x74, 0x69, 0x70, 0x48, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x70, 0x61, 0x73, 0x74, 0x4d, 0x65, 0x64, 0x69, + 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x70, 0x61, + 0x73, 0x74, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x13, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x76, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x2a, + 0x0a, 0x10, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x72, 0x75, 0x6e, 0x69, 0x6e, + 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x28, 0x0a, 0x0f, 0x76, 0x69, + 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, + 0x63, 0x6f, 0x72, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, + 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x55, 0x0a, 0x25, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x66, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x22, 0x54, 0x0a, 0x26, 0x52, 0x65, 0x73, 0x6f, 0x6c, + 0x76, 0x65, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, + 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x27, 0x0a, + 0x25, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x54, 0x0a, 0x26, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x55, 0x0a, 0x23, + 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x22, 0x5b, 0x0a, 0x2b, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x43, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x66, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x18, 0x0a, 0x16, 0x53, 0x68, 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x45, 0x0a, 0x17, 0x53, 0x68, + 0x75, 0x74, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x70, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x69, 0x73, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x22, 0x61, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x40, 0x0a, 0x20, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x21, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x95, 0x01, 0x0a, 0x1f, 0x55, 0x74, + 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, + 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, + 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x64, 0x22, 0x9c, 0x01, 0x0a, 0x15, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x4f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x08, 0x6f, 0x75, 0x74, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x75, 0x74, 0x78, + 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x55, 0x74, 0x78, 0x6f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x22, 0x47, 0x0a, 0x27, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, + 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x56, 0x0a, 0x28, 0x53, 0x74, 0x6f, + 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x41, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x55, 0x74, 0x78, 0x6f, + 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x55, 0x74, 0x78, 0x6f, 0x73, 0x42, 0x79, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, + 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x3d, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x22, 0x6a, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x44, + 0x0a, 0x24, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x22, 0x78, 0x0a, 0x16, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x62, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, + 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x90, + 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x42, 0x79, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x31, 0x0a, 0x2f, 0x47, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, + 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x7c, 0x0a, 0x30, 0x47, 0x65, 0x74, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x6c, 0x75, 0x65, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x6c, 0x75, + 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, + 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x39, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, + 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x68, 0x0a, 0x3a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, + 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x82, 0x01, 0x0a, 0x38, 0x56, 0x69, + 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x46, 0x0a, 0x1e, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, + 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, + 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1e, + 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x50, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x6c, 0x75, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x2c, + 0x0a, 0x2a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, + 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x59, 0x0a, 0x2b, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, + 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x55, 0x0a, 0x29, 0x56, 0x69, 0x72, 0x74, 0x75, + 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, + 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, + 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x44, 0x61, 0x61, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x31, + 0x0a, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x22, 0x5e, 0x0a, 0x30, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x50, 0x72, 0x75, 0x6e, 0x69, + 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, + 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x30, 0x0a, 0x2e, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x38, 0x0a, 0x36, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x79, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, + 0x37, 0x53, 0x74, 0x6f, 0x70, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x75, 0x6e, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x54, 0x58, 0x4f, 0x53, 0x65, + 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x23, 0x0a, 0x11, 0x42, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x22, 0x40, 0x0a, 0x12, 0x42, 0x61, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x25, 0x0a, 0x13, 0x55, + 0x6e, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x70, 0x22, 0x42, 0x0a, 0x14, 0x55, 0x6e, 0x62, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0xe4, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x32, + 0x70, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x70, 0x32, 0x70, 0x49, 0x64, + 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x73, 0x55, 0x74, + 0x78, 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x69, 0x73, 0x55, 0x74, 0x78, 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x69, 0x73, 0x53, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x69, 0x73, 0x53, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6c, 0x0a, 0x2c, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, + 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, + 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x77, 0x69, 0x6e, 0x64, + 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x22, 0x93, 0x01, 0x0a, 0x2d, 0x45, 0x73, 0x74, 0x69, 0x6d, 0x61, 0x74, + 0x65, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, + 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x48, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2a, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x26, 0x0a, 0x24, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x53, 0x0a, 0x25, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x4e, 0x65, 0x77, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x25, 0x0a, 0x23, 0x4e, 0x65, 0x77, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x9b, + 0x01, 0x0a, 0x15, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, + 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x73, 0x65, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x69, + 0x6e, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x09, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x69, 0x6e, 0x67, 0x22, 0xae, 0x01, 0x0a, + 0x2a, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x72, 0x70, + 0x68, 0x61, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x12, 0x34, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x95, 0x01, + 0x0a, 0x2b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x6d, 0x70, 0x6f, 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3a, 0x0a, + 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x4d, 0x65, 0x6d, 0x70, 0x6f, + 0x6f, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x69, 0x6e, + 0x53, 0x75, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x6f, 0x6d, 0x70, + 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x6f, 0x6d, 0x70, + 0x69, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, + 0x53, 0x6f, 0x6d, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x63, 0x69, 0x72, + 0x63, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x6f, 0x6d, 0x70, 0x69, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x2d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x72, 0x65, 0x64, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_rpc_proto_rawDescOnce sync.Once + file_rpc_proto_rawDescData = file_rpc_proto_rawDesc +) + +func file_rpc_proto_rawDescGZIP() []byte { + file_rpc_proto_rawDescOnce.Do(func() { + file_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(file_rpc_proto_rawDescData) + }) + return file_rpc_proto_rawDescData +} + +var file_rpc_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 108) +var file_rpc_proto_goTypes = []interface{}{ + (SubmitBlockResponseMessage_RejectReason)(0), // 0: protowire.SubmitBlockResponseMessage.RejectReason + (*RPCError)(nil), // 1: protowire.RPCError + (*RpcBlock)(nil), // 2: protowire.RpcBlock + (*RpcBlockHeader)(nil), // 3: protowire.RpcBlockHeader + (*RpcBlockLevelParents)(nil), // 4: protowire.RpcBlockLevelParents + (*RpcBlockVerboseData)(nil), // 5: protowire.RpcBlockVerboseData + (*RpcTransaction)(nil), // 6: protowire.RpcTransaction + (*RpcTransactionInput)(nil), // 7: protowire.RpcTransactionInput + (*RpcScriptPublicKey)(nil), // 8: protowire.RpcScriptPublicKey + (*RpcTransactionOutput)(nil), // 9: protowire.RpcTransactionOutput + (*RpcOutpoint)(nil), // 10: protowire.RpcOutpoint + (*RpcUtxoEntry)(nil), // 11: protowire.RpcUtxoEntry + (*RpcTransactionVerboseData)(nil), // 12: protowire.RpcTransactionVerboseData + (*RpcTransactionInputVerboseData)(nil), // 13: protowire.RpcTransactionInputVerboseData + (*RpcTransactionOutputVerboseData)(nil), // 14: protowire.RpcTransactionOutputVerboseData + (*GetCurrentNetworkRequestMessage)(nil), // 15: protowire.GetCurrentNetworkRequestMessage + (*GetCurrentNetworkResponseMessage)(nil), // 16: protowire.GetCurrentNetworkResponseMessage + (*SubmitBlockRequestMessage)(nil), // 17: protowire.SubmitBlockRequestMessage + (*SubmitBlockResponseMessage)(nil), // 18: protowire.SubmitBlockResponseMessage + (*GetBlockTemplateRequestMessage)(nil), // 19: protowire.GetBlockTemplateRequestMessage + (*GetBlockTemplateResponseMessage)(nil), // 20: protowire.GetBlockTemplateResponseMessage + (*NotifyBlockAddedRequestMessage)(nil), // 21: protowire.NotifyBlockAddedRequestMessage + (*NotifyBlockAddedResponseMessage)(nil), // 22: protowire.NotifyBlockAddedResponseMessage + (*BlockAddedNotificationMessage)(nil), // 23: protowire.BlockAddedNotificationMessage + (*GetPeerAddressesRequestMessage)(nil), // 24: protowire.GetPeerAddressesRequestMessage + (*GetPeerAddressesResponseMessage)(nil), // 25: protowire.GetPeerAddressesResponseMessage + (*GetPeerAddressesKnownAddressMessage)(nil), // 26: protowire.GetPeerAddressesKnownAddressMessage + (*GetSelectedTipHashRequestMessage)(nil), // 27: protowire.GetSelectedTipHashRequestMessage + (*GetSelectedTipHashResponseMessage)(nil), // 28: protowire.GetSelectedTipHashResponseMessage + (*GetMempoolEntryRequestMessage)(nil), // 29: protowire.GetMempoolEntryRequestMessage + (*GetMempoolEntryResponseMessage)(nil), // 30: protowire.GetMempoolEntryResponseMessage + (*GetMempoolEntriesRequestMessage)(nil), // 31: protowire.GetMempoolEntriesRequestMessage + (*GetMempoolEntriesResponseMessage)(nil), // 32: protowire.GetMempoolEntriesResponseMessage + (*MempoolEntry)(nil), // 33: protowire.MempoolEntry + (*GetConnectedPeerInfoRequestMessage)(nil), // 34: protowire.GetConnectedPeerInfoRequestMessage + (*GetConnectedPeerInfoResponseMessage)(nil), // 35: protowire.GetConnectedPeerInfoResponseMessage + (*GetConnectedPeerInfoMessage)(nil), // 36: protowire.GetConnectedPeerInfoMessage + (*AddPeerRequestMessage)(nil), // 37: protowire.AddPeerRequestMessage + (*AddPeerResponseMessage)(nil), // 38: protowire.AddPeerResponseMessage + (*SubmitTransactionRequestMessage)(nil), // 39: protowire.SubmitTransactionRequestMessage + (*SubmitTransactionResponseMessage)(nil), // 40: protowire.SubmitTransactionResponseMessage + (*NotifyVirtualSelectedParentChainChangedRequestMessage)(nil), // 41: protowire.NotifyVirtualSelectedParentChainChangedRequestMessage + (*NotifyVirtualSelectedParentChainChangedResponseMessage)(nil), // 42: protowire.NotifyVirtualSelectedParentChainChangedResponseMessage + (*VirtualSelectedParentChainChangedNotificationMessage)(nil), // 43: protowire.VirtualSelectedParentChainChangedNotificationMessage + (*GetBlockRequestMessage)(nil), // 44: protowire.GetBlockRequestMessage + (*GetBlockResponseMessage)(nil), // 45: protowire.GetBlockResponseMessage + (*GetSubnetworkRequestMessage)(nil), // 46: protowire.GetSubnetworkRequestMessage + (*GetSubnetworkResponseMessage)(nil), // 47: protowire.GetSubnetworkResponseMessage + (*GetVirtualSelectedParentChainFromBlockRequestMessage)(nil), // 48: protowire.GetVirtualSelectedParentChainFromBlockRequestMessage + (*AcceptedTransactionIds)(nil), // 49: protowire.AcceptedTransactionIds + (*GetVirtualSelectedParentChainFromBlockResponseMessage)(nil), // 50: protowire.GetVirtualSelectedParentChainFromBlockResponseMessage + (*GetBlocksRequestMessage)(nil), // 51: protowire.GetBlocksRequestMessage + (*GetBlocksResponseMessage)(nil), // 52: protowire.GetBlocksResponseMessage + (*GetBlockCountRequestMessage)(nil), // 53: protowire.GetBlockCountRequestMessage + (*GetBlockCountResponseMessage)(nil), // 54: protowire.GetBlockCountResponseMessage + (*GetBlockDagInfoRequestMessage)(nil), // 55: protowire.GetBlockDagInfoRequestMessage + (*GetBlockDagInfoResponseMessage)(nil), // 56: protowire.GetBlockDagInfoResponseMessage + (*ResolveFinalityConflictRequestMessage)(nil), // 57: protowire.ResolveFinalityConflictRequestMessage + (*ResolveFinalityConflictResponseMessage)(nil), // 58: protowire.ResolveFinalityConflictResponseMessage + (*NotifyFinalityConflictsRequestMessage)(nil), // 59: protowire.NotifyFinalityConflictsRequestMessage + (*NotifyFinalityConflictsResponseMessage)(nil), // 60: protowire.NotifyFinalityConflictsResponseMessage + (*FinalityConflictNotificationMessage)(nil), // 61: protowire.FinalityConflictNotificationMessage + (*FinalityConflictResolvedNotificationMessage)(nil), // 62: protowire.FinalityConflictResolvedNotificationMessage + (*ShutDownRequestMessage)(nil), // 63: protowire.ShutDownRequestMessage + (*ShutDownResponseMessage)(nil), // 64: protowire.ShutDownResponseMessage + (*GetHeadersRequestMessage)(nil), // 65: protowire.GetHeadersRequestMessage + (*GetHeadersResponseMessage)(nil), // 66: protowire.GetHeadersResponseMessage + (*NotifyUtxosChangedRequestMessage)(nil), // 67: protowire.NotifyUtxosChangedRequestMessage + (*NotifyUtxosChangedResponseMessage)(nil), // 68: protowire.NotifyUtxosChangedResponseMessage + (*UtxosChangedNotificationMessage)(nil), // 69: protowire.UtxosChangedNotificationMessage + (*UtxosByAddressesEntry)(nil), // 70: protowire.UtxosByAddressesEntry + (*StopNotifyingUtxosChangedRequestMessage)(nil), // 71: protowire.StopNotifyingUtxosChangedRequestMessage + (*StopNotifyingUtxosChangedResponseMessage)(nil), // 72: protowire.StopNotifyingUtxosChangedResponseMessage + (*GetUtxosByAddressesRequestMessage)(nil), // 73: protowire.GetUtxosByAddressesRequestMessage + (*GetUtxosByAddressesResponseMessage)(nil), // 74: protowire.GetUtxosByAddressesResponseMessage + (*GetBalanceByAddressRequestMessage)(nil), // 75: protowire.GetBalanceByAddressRequestMessage + (*GetBalanceByAddressResponseMessage)(nil), // 76: protowire.GetBalanceByAddressResponseMessage + (*GetBalancesByAddressesRequestMessage)(nil), // 77: protowire.GetBalancesByAddressesRequestMessage + (*BalancesByAddressEntry)(nil), // 78: protowire.BalancesByAddressEntry + (*GetBalancesByAddressesResponseMessage)(nil), // 79: protowire.GetBalancesByAddressesResponseMessage + (*GetVirtualSelectedParentBlueScoreRequestMessage)(nil), // 80: protowire.GetVirtualSelectedParentBlueScoreRequestMessage + (*GetVirtualSelectedParentBlueScoreResponseMessage)(nil), // 81: protowire.GetVirtualSelectedParentBlueScoreResponseMessage + (*NotifyVirtualSelectedParentBlueScoreChangedRequestMessage)(nil), // 82: protowire.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage + (*NotifyVirtualSelectedParentBlueScoreChangedResponseMessage)(nil), // 83: protowire.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage + (*VirtualSelectedParentBlueScoreChangedNotificationMessage)(nil), // 84: protowire.VirtualSelectedParentBlueScoreChangedNotificationMessage + (*NotifyVirtualDaaScoreChangedRequestMessage)(nil), // 85: protowire.NotifyVirtualDaaScoreChangedRequestMessage + (*NotifyVirtualDaaScoreChangedResponseMessage)(nil), // 86: protowire.NotifyVirtualDaaScoreChangedResponseMessage + (*VirtualDaaScoreChangedNotificationMessage)(nil), // 87: protowire.VirtualDaaScoreChangedNotificationMessage + (*NotifyPruningPointUTXOSetOverrideRequestMessage)(nil), // 88: protowire.NotifyPruningPointUTXOSetOverrideRequestMessage + (*NotifyPruningPointUTXOSetOverrideResponseMessage)(nil), // 89: protowire.NotifyPruningPointUTXOSetOverrideResponseMessage + (*PruningPointUTXOSetOverrideNotificationMessage)(nil), // 90: protowire.PruningPointUTXOSetOverrideNotificationMessage + (*StopNotifyingPruningPointUTXOSetOverrideRequestMessage)(nil), // 91: protowire.StopNotifyingPruningPointUTXOSetOverrideRequestMessage + (*StopNotifyingPruningPointUTXOSetOverrideResponseMessage)(nil), // 92: protowire.StopNotifyingPruningPointUTXOSetOverrideResponseMessage + (*BanRequestMessage)(nil), // 93: protowire.BanRequestMessage + (*BanResponseMessage)(nil), // 94: protowire.BanResponseMessage + (*UnbanRequestMessage)(nil), // 95: protowire.UnbanRequestMessage + (*UnbanResponseMessage)(nil), // 96: protowire.UnbanResponseMessage + (*GetInfoRequestMessage)(nil), // 97: protowire.GetInfoRequestMessage + (*GetInfoResponseMessage)(nil), // 98: protowire.GetInfoResponseMessage + (*EstimateNetworkHashesPerSecondRequestMessage)(nil), // 99: protowire.EstimateNetworkHashesPerSecondRequestMessage + (*EstimateNetworkHashesPerSecondResponseMessage)(nil), // 100: protowire.EstimateNetworkHashesPerSecondResponseMessage + (*NotifyNewBlockTemplateRequestMessage)(nil), // 101: protowire.NotifyNewBlockTemplateRequestMessage + (*NotifyNewBlockTemplateResponseMessage)(nil), // 102: protowire.NotifyNewBlockTemplateResponseMessage + (*NewBlockTemplateNotificationMessage)(nil), // 103: protowire.NewBlockTemplateNotificationMessage + (*MempoolEntryByAddress)(nil), // 104: protowire.MempoolEntryByAddress + (*GetMempoolEntriesByAddressesRequestMessage)(nil), // 105: protowire.GetMempoolEntriesByAddressesRequestMessage + (*GetMempoolEntriesByAddressesResponseMessage)(nil), // 106: protowire.GetMempoolEntriesByAddressesResponseMessage + (*GetCoinSupplyRequestMessage)(nil), // 107: protowire.GetCoinSupplyRequestMessage + (*GetCoinSupplyResponseMessage)(nil), // 108: protowire.GetCoinSupplyResponseMessage +} +var file_rpc_proto_depIdxs = []int32{ + 3, // 0: protowire.RpcBlock.header:type_name -> protowire.RpcBlockHeader + 6, // 1: protowire.RpcBlock.transactions:type_name -> protowire.RpcTransaction + 5, // 2: protowire.RpcBlock.verboseData:type_name -> protowire.RpcBlockVerboseData + 4, // 3: protowire.RpcBlockHeader.parents:type_name -> protowire.RpcBlockLevelParents + 7, // 4: protowire.RpcTransaction.inputs:type_name -> protowire.RpcTransactionInput + 9, // 5: protowire.RpcTransaction.outputs:type_name -> protowire.RpcTransactionOutput + 12, // 6: protowire.RpcTransaction.verboseData:type_name -> protowire.RpcTransactionVerboseData + 10, // 7: protowire.RpcTransactionInput.previousOutpoint:type_name -> protowire.RpcOutpoint + 13, // 8: protowire.RpcTransactionInput.verboseData:type_name -> protowire.RpcTransactionInputVerboseData + 8, // 9: protowire.RpcTransactionOutput.scriptPublicKey:type_name -> protowire.RpcScriptPublicKey + 14, // 10: protowire.RpcTransactionOutput.verboseData:type_name -> protowire.RpcTransactionOutputVerboseData + 8, // 11: protowire.RpcUtxoEntry.scriptPublicKey:type_name -> protowire.RpcScriptPublicKey + 1, // 12: protowire.GetCurrentNetworkResponseMessage.error:type_name -> protowire.RPCError + 2, // 13: protowire.SubmitBlockRequestMessage.block:type_name -> protowire.RpcBlock + 0, // 14: protowire.SubmitBlockResponseMessage.rejectReason:type_name -> protowire.SubmitBlockResponseMessage.RejectReason + 1, // 15: protowire.SubmitBlockResponseMessage.error:type_name -> protowire.RPCError + 2, // 16: protowire.GetBlockTemplateResponseMessage.block:type_name -> protowire.RpcBlock + 1, // 17: protowire.GetBlockTemplateResponseMessage.error:type_name -> protowire.RPCError + 1, // 18: protowire.NotifyBlockAddedResponseMessage.error:type_name -> protowire.RPCError + 2, // 19: protowire.BlockAddedNotificationMessage.block:type_name -> protowire.RpcBlock + 26, // 20: protowire.GetPeerAddressesResponseMessage.addresses:type_name -> protowire.GetPeerAddressesKnownAddressMessage + 26, // 21: protowire.GetPeerAddressesResponseMessage.bannedAddresses:type_name -> protowire.GetPeerAddressesKnownAddressMessage + 1, // 22: protowire.GetPeerAddressesResponseMessage.error:type_name -> protowire.RPCError + 1, // 23: protowire.GetSelectedTipHashResponseMessage.error:type_name -> protowire.RPCError + 33, // 24: protowire.GetMempoolEntryResponseMessage.entry:type_name -> protowire.MempoolEntry + 1, // 25: protowire.GetMempoolEntryResponseMessage.error:type_name -> protowire.RPCError + 33, // 26: protowire.GetMempoolEntriesResponseMessage.entries:type_name -> protowire.MempoolEntry + 1, // 27: protowire.GetMempoolEntriesResponseMessage.error:type_name -> protowire.RPCError + 6, // 28: protowire.MempoolEntry.transaction:type_name -> protowire.RpcTransaction + 36, // 29: protowire.GetConnectedPeerInfoResponseMessage.infos:type_name -> protowire.GetConnectedPeerInfoMessage + 1, // 30: protowire.GetConnectedPeerInfoResponseMessage.error:type_name -> protowire.RPCError + 1, // 31: protowire.AddPeerResponseMessage.error:type_name -> protowire.RPCError + 6, // 32: protowire.SubmitTransactionRequestMessage.transaction:type_name -> protowire.RpcTransaction + 1, // 33: protowire.SubmitTransactionResponseMessage.error:type_name -> protowire.RPCError + 1, // 34: protowire.NotifyVirtualSelectedParentChainChangedResponseMessage.error:type_name -> protowire.RPCError + 49, // 35: protowire.VirtualSelectedParentChainChangedNotificationMessage.acceptedTransactionIds:type_name -> protowire.AcceptedTransactionIds + 2, // 36: protowire.GetBlockResponseMessage.block:type_name -> protowire.RpcBlock + 1, // 37: protowire.GetBlockResponseMessage.error:type_name -> protowire.RPCError + 1, // 38: protowire.GetSubnetworkResponseMessage.error:type_name -> protowire.RPCError + 49, // 39: protowire.GetVirtualSelectedParentChainFromBlockResponseMessage.acceptedTransactionIds:type_name -> protowire.AcceptedTransactionIds + 1, // 40: protowire.GetVirtualSelectedParentChainFromBlockResponseMessage.error:type_name -> protowire.RPCError + 2, // 41: protowire.GetBlocksResponseMessage.blocks:type_name -> protowire.RpcBlock + 1, // 42: protowire.GetBlocksResponseMessage.error:type_name -> protowire.RPCError + 1, // 43: protowire.GetBlockCountResponseMessage.error:type_name -> protowire.RPCError + 1, // 44: protowire.GetBlockDagInfoResponseMessage.error:type_name -> protowire.RPCError + 1, // 45: protowire.ResolveFinalityConflictResponseMessage.error:type_name -> protowire.RPCError + 1, // 46: protowire.NotifyFinalityConflictsResponseMessage.error:type_name -> protowire.RPCError + 1, // 47: protowire.ShutDownResponseMessage.error:type_name -> protowire.RPCError + 1, // 48: protowire.GetHeadersResponseMessage.error:type_name -> protowire.RPCError + 1, // 49: protowire.NotifyUtxosChangedResponseMessage.error:type_name -> protowire.RPCError + 70, // 50: protowire.UtxosChangedNotificationMessage.added:type_name -> protowire.UtxosByAddressesEntry + 70, // 51: protowire.UtxosChangedNotificationMessage.removed:type_name -> protowire.UtxosByAddressesEntry + 10, // 52: protowire.UtxosByAddressesEntry.outpoint:type_name -> protowire.RpcOutpoint + 11, // 53: protowire.UtxosByAddressesEntry.utxoEntry:type_name -> protowire.RpcUtxoEntry + 1, // 54: protowire.StopNotifyingUtxosChangedResponseMessage.error:type_name -> protowire.RPCError + 70, // 55: protowire.GetUtxosByAddressesResponseMessage.entries:type_name -> protowire.UtxosByAddressesEntry + 1, // 56: protowire.GetUtxosByAddressesResponseMessage.error:type_name -> protowire.RPCError + 1, // 57: protowire.GetBalanceByAddressResponseMessage.error:type_name -> protowire.RPCError + 1, // 58: protowire.BalancesByAddressEntry.error:type_name -> protowire.RPCError + 78, // 59: protowire.GetBalancesByAddressesResponseMessage.entries:type_name -> protowire.BalancesByAddressEntry + 1, // 60: protowire.GetBalancesByAddressesResponseMessage.error:type_name -> protowire.RPCError + 1, // 61: protowire.GetVirtualSelectedParentBlueScoreResponseMessage.error:type_name -> protowire.RPCError + 1, // 62: protowire.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage.error:type_name -> protowire.RPCError + 1, // 63: protowire.NotifyVirtualDaaScoreChangedResponseMessage.error:type_name -> protowire.RPCError + 1, // 64: protowire.NotifyPruningPointUTXOSetOverrideResponseMessage.error:type_name -> protowire.RPCError + 1, // 65: protowire.StopNotifyingPruningPointUTXOSetOverrideResponseMessage.error:type_name -> protowire.RPCError + 1, // 66: protowire.BanResponseMessage.error:type_name -> protowire.RPCError + 1, // 67: protowire.UnbanResponseMessage.error:type_name -> protowire.RPCError + 1, // 68: protowire.GetInfoResponseMessage.error:type_name -> protowire.RPCError + 1, // 69: protowire.EstimateNetworkHashesPerSecondResponseMessage.error:type_name -> protowire.RPCError + 1, // 70: protowire.NotifyNewBlockTemplateResponseMessage.error:type_name -> protowire.RPCError + 33, // 71: protowire.MempoolEntryByAddress.sending:type_name -> protowire.MempoolEntry + 33, // 72: protowire.MempoolEntryByAddress.receiving:type_name -> protowire.MempoolEntry + 104, // 73: protowire.GetMempoolEntriesByAddressesResponseMessage.entries:type_name -> protowire.MempoolEntryByAddress + 1, // 74: protowire.GetMempoolEntriesByAddressesResponseMessage.error:type_name -> protowire.RPCError + 1, // 75: protowire.GetCoinSupplyResponseMessage.error:type_name -> protowire.RPCError + 76, // [76:76] is the sub-list for method output_type + 76, // [76:76] is the sub-list for method input_type + 76, // [76:76] is the sub-list for extension type_name + 76, // [76:76] is the sub-list for extension extendee + 0, // [0:76] is the sub-list for field type_name +} + +func init() { file_rpc_proto_init() } +func file_rpc_proto_init() { + if File_rpc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_rpc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RPCError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcBlock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcBlockHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcBlockLevelParents); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcBlockVerboseData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransaction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransactionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcScriptPublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransactionOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcOutpoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcUtxoEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransactionVerboseData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransactionInputVerboseData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RpcTransactionOutputVerboseData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCurrentNetworkRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCurrentNetworkResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubmitBlockRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubmitBlockResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockTemplateRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockTemplateResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyBlockAddedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyBlockAddedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockAddedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPeerAddressesRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPeerAddressesResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPeerAddressesKnownAddressMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSelectedTipHashRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSelectedTipHashResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntryRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntryResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntriesRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntriesResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MempoolEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetConnectedPeerInfoRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetConnectedPeerInfoResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetConnectedPeerInfoMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddPeerRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddPeerResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubmitTransactionRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubmitTransactionResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualSelectedParentChainChangedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualSelectedParentChainChangedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualSelectedParentChainChangedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubnetworkRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubnetworkResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVirtualSelectedParentChainFromBlockRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AcceptedTransactionIds); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVirtualSelectedParentChainFromBlockResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlocksRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlocksResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockCountRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockCountResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockDagInfoRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBlockDagInfoResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResolveFinalityConflictRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResolveFinalityConflictResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyFinalityConflictsRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyFinalityConflictsResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FinalityConflictNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FinalityConflictResolvedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutDownRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutDownResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadersRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetHeadersResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyUtxosChangedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyUtxosChangedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UtxosChangedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UtxosByAddressesEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopNotifyingUtxosChangedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopNotifyingUtxosChangedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetUtxosByAddressesRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetUtxosByAddressesResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalanceByAddressRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalanceByAddressResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalancesByAddressesRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BalancesByAddressEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBalancesByAddressesResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVirtualSelectedParentBlueScoreRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVirtualSelectedParentBlueScoreResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualSelectedParentBlueScoreChangedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualSelectedParentBlueScoreChangedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualSelectedParentBlueScoreChangedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualDaaScoreChangedRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyVirtualDaaScoreChangedResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VirtualDaaScoreChangedNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyPruningPointUTXOSetOverrideRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyPruningPointUTXOSetOverrideResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PruningPointUTXOSetOverrideNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopNotifyingPruningPointUTXOSetOverrideRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopNotifyingPruningPointUTXOSetOverrideResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BanRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BanResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnbanRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnbanResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetInfoRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetInfoResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EstimateNetworkHashesPerSecondRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EstimateNetworkHashesPerSecondResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyNewBlockTemplateRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NotifyNewBlockTemplateResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewBlockTemplateNotificationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MempoolEntryByAddress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntriesByAddressesRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMempoolEntriesByAddressesResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCoinSupplyRequestMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_rpc_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCoinSupplyResponseMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_rpc_proto_rawDesc, + NumEnums: 1, + NumMessages: 108, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_rpc_proto_goTypes, + DependencyIndexes: file_rpc_proto_depIdxs, + EnumInfos: file_rpc_proto_enumTypes, + MessageInfos: file_rpc_proto_msgTypes, + }.Build() + File_rpc_proto = out.File + file_rpc_proto_rawDesc = nil + file_rpc_proto_goTypes = nil + file_rpc_proto_depIdxs = nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.proto b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.proto new file mode 100644 index 0000000..f43b77d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc.proto @@ -0,0 +1,723 @@ +// RPC-related types. Request messages, response messages, and dependant types. +// +// Clients are expected to build RequestMessages and wrap them in SpectredMessage. (see messages.proto) +// +// Having received a RequestMessage, (wrapped in a SpectredMessage) the RPC server will respond with a +// ResponseMessage (likewise wrapped in a SpectredMessage) respective to the original RequestMessage. +// +// **IMPORTANT:** This API is a work in progress and is subject to break between versions. +// +syntax = "proto3"; +package protowire; + +option go_package = "github.com/spectre-project/spectred/protowire"; + +// RPCError represents a generic non-internal error. +// +// Receivers of any ResponseMessage are expected to check whether its error field is not null. +message RPCError{ + string message = 1; +} + +message RpcBlock { + RpcBlockHeader header = 1; + repeated RpcTransaction transactions = 2; + RpcBlockVerboseData verboseData = 3; +} + +message RpcBlockHeader { + uint32 version = 1; + repeated RpcBlockLevelParents parents = 12; + string hashMerkleRoot = 3; + string acceptedIdMerkleRoot = 4; + string utxoCommitment = 5; + int64 timestamp = 6; + uint32 bits = 7; + uint64 nonce = 8; + uint64 daaScore = 9; + string blueWork = 10; + string pruningPoint = 14; + uint64 blueScore = 13; +} + +message RpcBlockLevelParents { + repeated string parentHashes = 1; +} + +message RpcBlockVerboseData{ + string hash = 1; + double difficulty = 11; + string selectedParentHash = 13; + repeated string transactionIds = 14; + bool isHeaderOnly = 15; + uint64 blueScore = 16; + repeated string childrenHashes = 17; + repeated string mergeSetBluesHashes = 18; + repeated string mergeSetRedsHashes = 19; + bool isChainBlock = 20; +} + +message RpcTransaction { + uint32 version = 1; + repeated RpcTransactionInput inputs = 2; + repeated RpcTransactionOutput outputs = 3; + uint64 lockTime = 4; + string subnetworkId = 5; + uint64 gas = 6; + string payload = 8; + RpcTransactionVerboseData verboseData = 9; +} + +message RpcTransactionInput { + RpcOutpoint previousOutpoint = 1; + string signatureScript = 2; + uint64 sequence = 3; + uint32 sigOpCount = 5; + RpcTransactionInputVerboseData verboseData = 4; +} + +message RpcScriptPublicKey { + uint32 version = 1; + string scriptPublicKey = 2; +} + +message RpcTransactionOutput { + uint64 amount = 1; + RpcScriptPublicKey scriptPublicKey = 2; + RpcTransactionOutputVerboseData verboseData = 3; +} + +message RpcOutpoint { + string transactionId = 1; + uint32 index = 2; +} + +message RpcUtxoEntry { + uint64 amount = 1; + RpcScriptPublicKey scriptPublicKey = 2; + uint64 blockDaaScore = 3; + bool isCoinbase = 4; +} + +message RpcTransactionVerboseData{ + string transactionId = 1; + string hash = 2; + uint64 mass = 4; + string blockHash = 12; + uint64 blockTime = 14; +} + +message RpcTransactionInputVerboseData{ +} + +message RpcTransactionOutputVerboseData{ + string scriptPublicKeyType = 5; + string scriptPublicKeyAddress = 6; +} + +// GetCurrentNetworkRequestMessage requests the network spectred is currently running against. +// +// Possible networks are: Mainnet, Testnet, Simnet, Devnet +message GetCurrentNetworkRequestMessage{ +} + +message GetCurrentNetworkResponseMessage{ + string currentNetwork = 1; + RPCError error = 1000; +} + +// SubmitBlockRequestMessage requests to submit a block into the DAG. +// Blocks are generally expected to have been generated using the getBlockTemplate call. +// +// See: GetBlockTemplateRequestMessage +message SubmitBlockRequestMessage{ + RpcBlock block = 2; + bool allowNonDAABlocks = 3; +} + +message SubmitBlockResponseMessage{ + enum RejectReason { + NONE = 0; + BLOCK_INVALID = 1; + IS_IN_IBD = 2; + } + RejectReason rejectReason = 1; + RPCError error = 1000; +} + +// GetBlockTemplateRequestMessage requests a current block template. +// Callers are expected to solve the block template and submit it using the submitBlock call +// +// See: SubmitBlockRequestMessage +message GetBlockTemplateRequestMessage{ + // Which spectre address should the coinbase block reward transaction pay into + string payAddress = 1; + string extraData = 2; +} + +message GetBlockTemplateResponseMessage{ + RpcBlock block = 3; + + // Whether spectred thinks that it's synced. + // Callers are discouraged (but not forbidden) from solving blocks when spectred is not synced. + // That is because when spectred isn't in sync with the rest of the network there's a high + // chance the block will never be accepted, thus the solving effort would have been wasted. + bool isSynced = 2; + + RPCError error = 1000; +} + +// NotifyBlockAddedRequestMessage registers this connection for blockAdded notifications. +// +// See: BlockAddedNotificationMessage +message NotifyBlockAddedRequestMessage{ +} + +message NotifyBlockAddedResponseMessage{ + RPCError error = 1000; +} + +// BlockAddedNotificationMessage is sent whenever a blocks has been added (NOT accepted) +// into the DAG. +// +// See: NotifyBlockAddedRequestMessage +message BlockAddedNotificationMessage{ + RpcBlock block = 3; +} + +// GetPeerAddressesRequestMessage requests the list of known spectred addresses in the +// current network. (mainnet, testnet, etc.) +message GetPeerAddressesRequestMessage{ +} + +message GetPeerAddressesResponseMessage{ + repeated GetPeerAddressesKnownAddressMessage addresses = 1; + repeated GetPeerAddressesKnownAddressMessage bannedAddresses = 2; + RPCError error = 1000; +} + +message GetPeerAddressesKnownAddressMessage { + string Addr = 1; +} + +// GetSelectedTipHashRequestMessage requests the hash of the current virtual's +// selected parent. +message GetSelectedTipHashRequestMessage{ +} + +message GetSelectedTipHashResponseMessage{ + string selectedTipHash = 1; + RPCError error = 1000; +} + +// GetMempoolEntryRequestMessage requests information about a specific transaction +// in the mempool. +message GetMempoolEntryRequestMessage{ + // The transaction's TransactionID. + string txId = 1; + bool includeOrphanPool = 2; + bool filterTransactionPool = 3; +} + +message GetMempoolEntryResponseMessage{ + MempoolEntry entry = 1; + + RPCError error = 1000; +} + +// GetMempoolEntriesRequestMessage requests information about all the transactions +// currently in the mempool. +message GetMempoolEntriesRequestMessage{ + bool includeOrphanPool = 1; + bool filterTransactionPool = 2; +} + +message GetMempoolEntriesResponseMessage{ + repeated MempoolEntry entries = 1; + + RPCError error = 1000; +} + +message MempoolEntry{ + uint64 fee = 1; + RpcTransaction transaction = 3; + bool isOrphan = 4; +} + +// GetConnectedPeerInfoRequestMessage requests information about all the p2p peers +// currently connected to this spectred. +message GetConnectedPeerInfoRequestMessage{ +} + +message GetConnectedPeerInfoResponseMessage{ + repeated GetConnectedPeerInfoMessage infos = 1; + RPCError error = 1000; +} + +message GetConnectedPeerInfoMessage{ + string id = 1; + string address = 2; + + // How long did the last ping/pong exchange take + int64 lastPingDuration = 3; + + // Whether this spectred initiated the connection + bool isOutbound = 6; + int64 timeOffset = 7; + string userAgent = 8; + + // The protocol version that this peer claims to support + uint32 advertisedProtocolVersion = 9; + + // The timestamp of when this peer connected to this spectred + int64 timeConnected = 10; + + // Whether this peer is the IBD peer (if IBD is running) + bool isIbdPeer = 11; +} + +// AddPeerRequestMessage adds a peer to spectred's outgoing connection list. +// This will, in most cases, result in spectred connecting to said peer. +message AddPeerRequestMessage{ + string address = 1; + + // Whether to keep attempting to connect to this peer after disconnection + bool isPermanent = 2; +} + +message AddPeerResponseMessage{ + RPCError error = 1000; +} + +// SubmitTransactionRequestMessage submits a transaction to the mempool +message SubmitTransactionRequestMessage{ + RpcTransaction transaction = 1; + bool allowOrphan = 2; +} + +message SubmitTransactionResponseMessage{ + // The transaction ID of the submitted transaction + string transactionId = 1; + + RPCError error = 1000; +} + +// NotifyVirtualSelectedParentChainChangedRequestMessage registers this connection for virtualSelectedParentChainChanged notifications. +// +// See: VirtualSelectedParentChainChangedNotificationMessage +message NotifyVirtualSelectedParentChainChangedRequestMessage{ + bool includeAcceptedTransactionIds = 1; +} + +message NotifyVirtualSelectedParentChainChangedResponseMessage{ + RPCError error = 1000; +} + +// VirtualSelectedParentChainChangedNotificationMessage is sent whenever the DAG's selected parent +// chain had changed. +// +// See: NotifyVirtualSelectedParentChainChangedRequestMessage +message VirtualSelectedParentChainChangedNotificationMessage{ + // The chain blocks that were removed, in high-to-low order + repeated string removedChainBlockHashes = 1; + + // The chain blocks that were added, in low-to-high order + repeated string addedChainBlockHashes = 3; + + // Will be filled only if `includeAcceptedTransactionIds = true` in the notify request. + repeated AcceptedTransactionIds acceptedTransactionIds = 2; +} + +// GetBlockRequestMessage requests information about a specific block +message GetBlockRequestMessage{ + // The hash of the requested block + string hash = 1; + + // Whether to include transaction data in the response + bool includeTransactions = 3; +} + +message GetBlockResponseMessage{ + RpcBlock block = 3; + RPCError error = 1000; +} + +// GetSubnetworkRequestMessage requests information about a specific subnetwork +// +// Currently unimplemented +message GetSubnetworkRequestMessage{ + string subnetworkId = 1; +} + +message GetSubnetworkResponseMessage{ + uint64 gasLimit = 1; + RPCError error = 1000; +} + +// GetVirtualSelectedParentChainFromBlockRequestMessage requests the virtual selected +// parent chain from some startHash to this spectred's current virtual +message GetVirtualSelectedParentChainFromBlockRequestMessage{ + string startHash = 1; + bool includeAcceptedTransactionIds = 2; +} + +message AcceptedTransactionIds{ + string acceptingBlockHash = 1; + repeated string acceptedTransactionIds = 2; +} + +message GetVirtualSelectedParentChainFromBlockResponseMessage{ + // The chain blocks that were removed, in high-to-low order + repeated string removedChainBlockHashes = 1; + + // The chain blocks that were added, in low-to-high order + repeated string addedChainBlockHashes = 3; + + // The transactions accepted by each block in addedChainBlockHashes. + // Will be filled only if `includeAcceptedTransactionIds = true` in the request. + repeated AcceptedTransactionIds acceptedTransactionIds = 2; + + RPCError error = 1000; +} + +// GetBlocksRequestMessage requests blocks between a certain block lowHash up to this +// spectred's current virtual. +message GetBlocksRequestMessage{ + string lowHash = 1; + bool includeBlocks = 2; + bool includeTransactions = 3; +} + +message GetBlocksResponseMessage{ + repeated string blockHashes = 4; + repeated RpcBlock blocks = 3; + RPCError error = 1000; +} + +// GetBlockCountRequestMessage requests the current number of blocks in this spectred. +// Note that this number may decrease as pruning occurs. +message GetBlockCountRequestMessage{ +} + +message GetBlockCountResponseMessage{ + uint64 blockCount = 1; + uint64 headerCount = 2; + RPCError error = 1000; +} + +// GetBlockDagInfoRequestMessage requests general information about the current state +// of this spectred's DAG. +message GetBlockDagInfoRequestMessage{ +} + +message GetBlockDagInfoResponseMessage{ + string networkName = 1; + uint64 blockCount = 2; + uint64 headerCount = 3; + repeated string tipHashes = 4; + double difficulty = 5; + int64 pastMedianTime = 6; + repeated string virtualParentHashes = 7; + string pruningPointHash = 8; + uint64 virtualDaaScore = 9; + RPCError error = 1000; +} + +message ResolveFinalityConflictRequestMessage{ + string finalityBlockHash = 1; +} + +message ResolveFinalityConflictResponseMessage{ + RPCError error = 1000; +} + +message NotifyFinalityConflictsRequestMessage{ +} + +message NotifyFinalityConflictsResponseMessage{ + RPCError error = 1000; +} + +message FinalityConflictNotificationMessage{ + string violatingBlockHash = 1; +} + +message FinalityConflictResolvedNotificationMessage{ + string finalityBlockHash = 1; +} + +// ShutDownRequestMessage shuts down this spectred. +message ShutDownRequestMessage{ +} + +message ShutDownResponseMessage{ + RPCError error = 1000; +} + +// GetHeadersRequestMessage requests headers between the given startHash and the +// current virtual, up to the given limit. +message GetHeadersRequestMessage{ + string startHash = 1; + uint64 limit = 2; + bool isAscending = 3; +} + +message GetHeadersResponseMessage{ + repeated string headers = 1; + RPCError error = 1000; +} + +// NotifyUtxosChangedRequestMessage registers this connection for utxoChanged notifications +// for the given addresses. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: UtxosChangedNotificationMessage +message NotifyUtxosChangedRequestMessage { + repeated string addresses = 1; // Leave empty to get all updates +} + +message NotifyUtxosChangedResponseMessage { + RPCError error = 1000; +} + +// UtxosChangedNotificationMessage is sent whenever the UTXO index had been updated. +// +// See: NotifyUtxosChangedRequestMessage +message UtxosChangedNotificationMessage { + repeated UtxosByAddressesEntry added = 1; + repeated UtxosByAddressesEntry removed = 2; +} + +message UtxosByAddressesEntry { + string address = 1; + RpcOutpoint outpoint = 2; + RpcUtxoEntry utxoEntry = 3; +} + +// StopNotifyingUtxosChangedRequestMessage unregisters this connection for utxoChanged notifications +// for the given addresses. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: UtxosChangedNotificationMessage +message StopNotifyingUtxosChangedRequestMessage { + repeated string addresses = 1; +} + +message StopNotifyingUtxosChangedResponseMessage { + RPCError error = 1000; +} + +// GetUtxosByAddressesRequestMessage requests all current UTXOs for the given spectred addresses +// +// This call is only available when this spectred was started with `--utxoindex` +message GetUtxosByAddressesRequestMessage { + repeated string addresses = 1; +} + +message GetUtxosByAddressesResponseMessage { + repeated UtxosByAddressesEntry entries = 1; + + RPCError error = 1000; +} + +// GetBalanceByAddressRequest returns the total balance in unspent transactions towards a given address +// +// This call is only available when this spectred was started with `--utxoindex` +message GetBalanceByAddressRequestMessage { + string address = 1; +} + +message GetBalanceByAddressResponseMessage { + uint64 balance = 1; + + RPCError error = 1000; +} + +message GetBalancesByAddressesRequestMessage { + repeated string addresses = 1; +} + +message BalancesByAddressEntry{ + string address = 1; + uint64 balance = 2; + + RPCError error = 1000; +} + +message GetBalancesByAddressesResponseMessage { + repeated BalancesByAddressEntry entries = 1; + + RPCError error = 1000; +} + +// GetVirtualSelectedParentBlueScoreRequestMessage requests the blue score of the current selected parent +// of the virtual block. +message GetVirtualSelectedParentBlueScoreRequestMessage { +} + +message GetVirtualSelectedParentBlueScoreResponseMessage { + uint64 blueScore = 1; + + RPCError error = 1000; +} + +// NotifyVirtualSelectedParentBlueScoreChangedRequestMessage registers this connection for +// virtualSelectedParentBlueScoreChanged notifications. +// +// See: VirtualSelectedParentBlueScoreChangedNotificationMessage +message NotifyVirtualSelectedParentBlueScoreChangedRequestMessage { +} + +message NotifyVirtualSelectedParentBlueScoreChangedResponseMessage { + RPCError error = 1000; +} + +// VirtualSelectedParentBlueScoreChangedNotificationMessage is sent whenever the blue score +// of the virtual's selected parent changes. +// +// See NotifyVirtualSelectedParentBlueScoreChangedRequestMessage +message VirtualSelectedParentBlueScoreChangedNotificationMessage { + uint64 virtualSelectedParentBlueScore = 1; +} + +// NotifyVirtualDaaScoreChangedRequestMessage registers this connection for +// virtualDaaScoreChanged notifications. +// +// See: VirtualDaaScoreChangedNotificationMessage +message NotifyVirtualDaaScoreChangedRequestMessage { +} + +message NotifyVirtualDaaScoreChangedResponseMessage { + RPCError error = 1000; +} + +// VirtualDaaScoreChangedNotificationMessage is sent whenever the DAA score +// of the virtual changes. +// +// See NotifyVirtualDaaScoreChangedRequestMessage +message VirtualDaaScoreChangedNotificationMessage { + uint64 virtualDaaScore = 1; +} + +// NotifyPruningPointUTXOSetOverrideRequestMessage registers this connection for +// pruning point UTXO set override notifications. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: NotifyPruningPointUTXOSetOverrideResponseMessage +message NotifyPruningPointUTXOSetOverrideRequestMessage { +} + + +message NotifyPruningPointUTXOSetOverrideResponseMessage { + RPCError error = 1000; +} + +// PruningPointUTXOSetOverrideNotificationMessage is sent whenever the UTXO index +// resets due to pruning point change via IBD. +// +// See NotifyPruningPointUTXOSetOverrideRequestMessage +message PruningPointUTXOSetOverrideNotificationMessage { +} + +// StopNotifyingPruningPointUTXOSetOverrideRequestMessage unregisters this connection for +// pruning point UTXO set override notifications. +// +// This call is only available when this spectred was started with `--utxoindex` +// +// See: PruningPointUTXOSetOverrideNotificationMessage +message StopNotifyingPruningPointUTXOSetOverrideRequestMessage { +} + +message StopNotifyingPruningPointUTXOSetOverrideResponseMessage { + RPCError error = 1000; +} + +// BanRequestMessage bans the given ip. +message BanRequestMessage{ + string ip = 1; +} + +message BanResponseMessage{ + RPCError error = 1000; +} + +// UnbanRequestMessage unbans the given ip. +message UnbanRequestMessage{ + string ip = 1; +} + +message UnbanResponseMessage{ + RPCError error = 1000; +} + +// GetInfoRequestMessage returns info about the node. +message GetInfoRequestMessage{ +} + +message GetInfoResponseMessage{ + string p2pId = 1; + uint64 mempoolSize = 2; + string serverVersion = 3; + bool isUtxoIndexed = 4; + bool isSynced = 5; + RPCError error = 1000; +} + +message EstimateNetworkHashesPerSecondRequestMessage{ + uint32 windowSize = 1; + string startHash = 2; +} + +message EstimateNetworkHashesPerSecondResponseMessage{ + uint64 networkHashesPerSecond = 1; + RPCError error = 1000; +} + +// NotifyNewBlockTemplateRequestMessage registers this connection for +// NewBlockTemplate notifications. +// +// See: NewBlockTemplateNotificationMessage +message NotifyNewBlockTemplateRequestMessage { +} + +message NotifyNewBlockTemplateResponseMessage { + RPCError error = 1000; +} + +// NewBlockTemplateNotificationMessage is sent whenever a new updated block template is +// available for miners. +// +// See NotifyNewBlockTemplateRequestMessage +message NewBlockTemplateNotificationMessage { +} + +message MempoolEntryByAddress{ + string address = 1; + repeated MempoolEntry sending = 2; + repeated MempoolEntry receiving = 3; +} + +message GetMempoolEntriesByAddressesRequestMessage{ + repeated string addresses = 1; + bool includeOrphanPool = 2; + bool filterTransactionPool = 3; +} + +message GetMempoolEntriesByAddressesResponseMessage{ + repeated MempoolEntryByAddress entries = 1; + + RPCError error = 1000; +} + +message GetCoinSupplyRequestMessage{ +} + +message GetCoinSupplyResponseMessage{ + uint64 maxSompi = 1; // note: this is a hard coded maxSupply, actual maxSupply is expected to deviate by upto -5%, but cannot be measured exactly. + uint64 circulatingSompi = 2; + + RPCError error = 1000; +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_add_peer.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_add_peer.go new file mode 100644 index 0000000..cd27d2a --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_add_peer.go @@ -0,0 +1,63 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_AddPeerRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_AddPeerRequest is nil") + } + return x.AddPeerRequest.toAppMessage() +} + +func (x *SpectredMessage_AddPeerRequest) fromAppMessage(message *appmessage.AddPeerRequestMessage) error { + x.AddPeerRequest = &AddPeerRequestMessage{ + Address: message.Address, + IsPermanent: message.IsPermanent, + } + return nil +} + +func (x *AddPeerRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "AddPeerRequestMessage is nil") + } + return &appmessage.AddPeerRequestMessage{ + Address: x.Address, + IsPermanent: x.IsPermanent, + }, nil +} + +func (x *SpectredMessage_AddPeerResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_AddPeerResponse is nil") + } + return x.AddPeerResponse.toAppMessage() +} + +func (x *SpectredMessage_AddPeerResponse) fromAppMessage(message *appmessage.AddPeerResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.AddPeerResponse = &AddPeerResponseMessage{ + Error: err, + } + return nil +} + +func (x *AddPeerResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "AddPeerResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.AddPeerResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_ban.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_ban.go new file mode 100644 index 0000000..89663e9 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_ban.go @@ -0,0 +1,59 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_BanRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BanRequest is nil") + } + return x.BanRequest.toAppMessage() +} + +func (x *BanRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BanRequestMessage is nil") + } + return &appmessage.BanRequestMessage{ + IP: x.Ip, + }, nil +} + +func (x *SpectredMessage_BanRequest) fromAppMessage(message *appmessage.BanRequestMessage) error { + x.BanRequest = &BanRequestMessage{Ip: message.IP} + return nil +} + +func (x *SpectredMessage_BanResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BanResponse is nil") + } + return x.BanResponse.toAppMessage() +} + +func (x *BanResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BanResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.BanResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_BanResponse) fromAppMessage(message *appmessage.BanResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.BanResponse = &BanResponseMessage{ + Error: err, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_error.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_error.go new file mode 100644 index 0000000..33f6c8c --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_error.go @@ -0,0 +1,13 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *RPCError) toAppMessage() (*appmessage.RPCError, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RPCError is nil") + } + return &appmessage.RPCError{Message: x.Message}, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_estimate_network_hashes_per_second.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_estimate_network_hashes_per_second.go new file mode 100644 index 0000000..55746b9 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_estimate_network_hashes_per_second.go @@ -0,0 +1,70 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_EstimateNetworkHashesPerSecondRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_EstimateNetworkHashesPerSecondRequest is nil") + } + return x.EstimateNetworkHashesPerSecondRequest.toAppMessage() +} + +func (x *SpectredMessage_EstimateNetworkHashesPerSecondRequest) fromAppMessage(message *appmessage.EstimateNetworkHashesPerSecondRequestMessage) error { + x.EstimateNetworkHashesPerSecondRequest = &EstimateNetworkHashesPerSecondRequestMessage{ + WindowSize: message.WindowSize, + StartHash: message.StartHash, + } + return nil +} + +func (x *EstimateNetworkHashesPerSecondRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "EstimateNetworkHashesPerSecondRequestMessage is nil") + } + return &appmessage.EstimateNetworkHashesPerSecondRequestMessage{ + WindowSize: x.WindowSize, + StartHash: x.StartHash, + }, nil +} + +func (x *SpectredMessage_EstimateNetworkHashesPerSecondResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_EstimateNetworkHashesPerSecondResponse is nil") + } + return x.EstimateNetworkHashesPerSecondResponse.toAppMessage() +} + +func (x *SpectredMessage_EstimateNetworkHashesPerSecondResponse) fromAppMessage(message *appmessage.EstimateNetworkHashesPerSecondResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.EstimateNetworkHashesPerSecondResponse = &EstimateNetworkHashesPerSecondResponseMessage{ + NetworkHashesPerSecond: message.NetworkHashesPerSecond, + Error: err, + } + return nil +} + +func (x *EstimateNetworkHashesPerSecondResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "EstimateNetworkHashesPerSecondResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && x.NetworkHashesPerSecond != 0 { + return nil, errors.New("EstimateNetworkHashesPerSecondResponseMessage contains both an error and a response") + } + + return &appmessage.EstimateNetworkHashesPerSecondResponseMessage{ + NetworkHashesPerSecond: x.NetworkHashesPerSecond, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balance_by_address.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balance_by_address.go new file mode 100644 index 0000000..22211b9 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balance_by_address.go @@ -0,0 +1,69 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBalanceByAddressRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBalanceByAddressRequest is nil") + } + return x.GetBalanceByAddressRequest.toAppMessage() +} + +func (x *SpectredMessage_GetBalanceByAddressRequest) fromAppMessage(message *appmessage.GetBalanceByAddressRequestMessage) error { + x.GetBalanceByAddressRequest = &GetBalanceByAddressRequestMessage{ + Address: message.Address, + } + return nil +} + +func (x *GetBalanceByAddressRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalanceByAddressRequest is nil") + } + return &appmessage.GetBalanceByAddressRequestMessage{ + Address: x.Address, + }, nil +} + +func (x *SpectredMessage_GetBalanceByAddressResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalanceByAddressResponse is nil") + } + return x.GetBalanceByAddressResponse.toAppMessage() +} + +func (x *SpectredMessage_GetBalanceByAddressResponse) fromAppMessage(message *appmessage.GetBalanceByAddressResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetBalanceByAddressResponse = &GetBalanceByAddressResponseMessage{ + Balance: message.Balance, + + Error: err, + } + return nil +} + +func (x *GetBalanceByAddressResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalanceByAddressResponse is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && x.Balance != 1 { + return nil, errors.New("GetBalanceByAddressResponse contains both an error and a response") + } + + return &appmessage.GetBalanceByAddressResponseMessage{ + Balance: x.Balance, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balances_by_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balances_by_addresses.go new file mode 100644 index 0000000..9377219 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_balances_by_addresses.go @@ -0,0 +1,99 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBalancesByAddressesRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBalanceByAddressRequest is nil") + } + return x.GetBalancesByAddressesRequest.toAppMessage() +} + +func (x *SpectredMessage_GetBalancesByAddressesRequest) fromAppMessage(message *appmessage.GetBalancesByAddressesRequestMessage) error { + x.GetBalancesByAddressesRequest = &GetBalancesByAddressesRequestMessage{ + Addresses: message.Addresses, + } + return nil +} + +func (x *GetBalancesByAddressesRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalanceByAddressRequest is nil") + } + return &appmessage.GetBalancesByAddressesRequestMessage{ + Addresses: x.Addresses, + }, nil +} + +func (x *SpectredMessage_GetBalancesByAddressesResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalanceByAddressResponse is nil") + } + return x.GetBalancesByAddressesResponse.toAppMessage() +} + +func (x *SpectredMessage_GetBalancesByAddressesResponse) fromAppMessage(message *appmessage.GetBalancesByAddressesResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + entries := make([]*BalancesByAddressEntry, len(message.Entries)) + for i, entry := range message.Entries { + entries[i] = &BalancesByAddressEntry{} + entries[i].fromAppMessage(entry) + } + x.GetBalancesByAddressesResponse = &GetBalancesByAddressesResponseMessage{ + Entries: entries, + Error: err, + } + return nil +} + +func (x *GetBalancesByAddressesResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBalancesByAddressesResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.Entries) != 0 { + return nil, errors.New("GetBalancesByAddressesResponseMessage contains both an error and a response") + } + + entries := make([]*appmessage.BalancesByAddressesEntry, len(x.Entries)) + for i, entry := range x.Entries { + entryAsAppMessage, err := entry.toAppMessage() + if err != nil { + return nil, err + } + entries[i] = entryAsAppMessage + } + + return &appmessage.GetBalancesByAddressesResponseMessage{ + Entries: entries, + Error: rpcErr, + }, nil +} + +func (x *BalancesByAddressEntry) toAppMessage() (*appmessage.BalancesByAddressesEntry, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BalancesByAddressesEntry is nil") + } + return &appmessage.BalancesByAddressesEntry{ + Address: x.Address, + Balance: x.Balance, + }, nil +} + +func (x *BalancesByAddressEntry) fromAppMessage(message *appmessage.BalancesByAddressesEntry) { + *x = BalancesByAddressEntry{ + Address: message.Address, + Balance: message.Balance, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block.go new file mode 100644 index 0000000..41949a4 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block.go @@ -0,0 +1,85 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBlockRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockRequest is nil") + } + return x.GetBlockRequest.toAppMessage() +} + +func (x *GetBlockRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockRequestMessage is nil") + } + return &appmessage.GetBlockRequestMessage{ + Hash: x.Hash, + IncludeTransactions: x.IncludeTransactions, + }, nil +} + +func (x *SpectredMessage_GetBlockRequest) fromAppMessage(message *appmessage.GetBlockRequestMessage) error { + x.GetBlockRequest = &GetBlockRequestMessage{ + Hash: message.Hash, + IncludeTransactions: message.IncludeTransactions, + } + return nil +} + +func (x *SpectredMessage_GetBlockResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockResponse is nil") + } + return x.GetBlockResponse.toAppMessage() +} + +func (x *GetBlockResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + var block *appmessage.RPCBlock + // Return verbose data only if there's no error + if rpcErr != nil && x.Block != nil { + return nil, errors.New("GetBlockResponseMessage contains both an error and a response") + } + if rpcErr == nil { + block, err = x.Block.toAppMessage() + if err != nil { + return nil, err + } + } + return &appmessage.GetBlockResponseMessage{ + Block: block, + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_GetBlockResponse) fromAppMessage(message *appmessage.GetBlockResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + var block *RpcBlock + if message.Block != nil { + protoBlock := &RpcBlock{} + err := protoBlock.fromAppMessage(message.Block) + if err != nil { + return err + } + block = protoBlock + } + x.GetBlockResponse = &GetBlockResponseMessage{ + Block: block, + Error: err, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_count.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_count.go new file mode 100644 index 0000000..b0416f3 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_count.go @@ -0,0 +1,57 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBlockCountRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockCountRequest is nil") + } + return &appmessage.GetBlockCountRequestMessage{}, nil +} + +func (x *SpectredMessage_GetBlockCountRequest) fromAppMessage(_ *appmessage.GetBlockCountRequestMessage) error { + x.GetBlockCountRequest = &GetBlockCountRequestMessage{} + return nil +} + +func (x *SpectredMessage_GetBlockCountResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockCountResponse is nil") + } + return x.GetBlockCountResponse.toAppMessage() +} + +func (x *SpectredMessage_GetBlockCountResponse) fromAppMessage(message *appmessage.GetBlockCountResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetBlockCountResponse = &GetBlockCountResponseMessage{ + BlockCount: message.BlockCount, + HeaderCount: message.HeaderCount, + Error: err, + } + return nil +} + +func (x *GetBlockCountResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockCountResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + if rpcErr != nil && (x.BlockCount != 0 || x.HeaderCount != 0) { + return nil, errors.New("GetBlockCountResponseMessage contains both an error and a response") + } + return &appmessage.GetBlockCountResponseMessage{ + BlockCount: x.BlockCount, + HeaderCount: x.HeaderCount, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_dag_info.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_dag_info.go new file mode 100644 index 0000000..613f3a9 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_dag_info.go @@ -0,0 +1,71 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBlockDagInfoRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockDagInfoRequest is nil") + } + return &appmessage.GetBlockDAGInfoRequestMessage{}, nil +} + +func (x *SpectredMessage_GetBlockDagInfoRequest) fromAppMessage(_ *appmessage.GetBlockDAGInfoRequestMessage) error { + x.GetBlockDagInfoRequest = &GetBlockDagInfoRequestMessage{} + return nil +} + +func (x *SpectredMessage_GetBlockDagInfoResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockDagInfoResponse is nil") + } + return x.GetBlockDagInfoResponse.toAppMessage() +} + +func (x *GetBlockDagInfoResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockDagInfoResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + if rpcErr != nil && (x.NetworkName != "" || x.BlockCount != 0 || x.HeaderCount != 0 || len(x.TipHashes) != 0 || len(x.VirtualParentHashes) != 0 || x.Difficulty != 0 || x.PastMedianTime != 0 || x.PruningPointHash != "") { + return nil, errors.New("GetBlockDagInfoResponseMessage contains both an error and a response") + } + return &appmessage.GetBlockDAGInfoResponseMessage{ + NetworkName: x.NetworkName, + BlockCount: x.BlockCount, + HeaderCount: x.HeaderCount, + TipHashes: x.TipHashes, + VirtualParentHashes: x.VirtualParentHashes, + Difficulty: x.Difficulty, + PastMedianTime: x.PastMedianTime, + PruningPointHash: x.PruningPointHash, + VirtualDAAScore: x.VirtualDaaScore, + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_GetBlockDagInfoResponse) fromAppMessage(message *appmessage.GetBlockDAGInfoResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetBlockDagInfoResponse = &GetBlockDagInfoResponseMessage{ + NetworkName: message.NetworkName, + BlockCount: message.BlockCount, + HeaderCount: message.HeaderCount, + TipHashes: message.TipHashes, + VirtualParentHashes: message.VirtualParentHashes, + Difficulty: message.Difficulty, + PastMedianTime: message.PastMedianTime, + PruningPointHash: message.PruningPointHash, + VirtualDaaScore: message.VirtualDAAScore, + Error: err, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_template.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_template.go new file mode 100644 index 0000000..bf85e0d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_block_template.go @@ -0,0 +1,89 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBlockTemplateRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockTemplateRequest is nil") + } + return x.GetBlockTemplateRequest.toAppMessage() +} + +func (x *SpectredMessage_GetBlockTemplateRequest) fromAppMessage(message *appmessage.GetBlockTemplateRequestMessage) error { + x.GetBlockTemplateRequest = &GetBlockTemplateRequestMessage{ + PayAddress: message.PayAddress, + ExtraData: message.ExtraData, + } + return nil +} + +func (x *GetBlockTemplateRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockTemplateRequestMessage is nil") + } + return &appmessage.GetBlockTemplateRequestMessage{ + PayAddress: x.PayAddress, + ExtraData: x.ExtraData, + }, nil +} + +func (x *SpectredMessage_GetBlockTemplateResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlockTemplateResponse is nil") + } + return x.GetBlockTemplateResponse.toAppMessage() +} + +func (x *SpectredMessage_GetBlockTemplateResponse) fromAppMessage(message *appmessage.GetBlockTemplateResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + + var block *RpcBlock + if message.Block != nil { + protoBlock := &RpcBlock{} + err := protoBlock.fromAppMessage(message.Block) + if err != nil { + return err + } + block = protoBlock + } + + x.GetBlockTemplateResponse = &GetBlockTemplateResponseMessage{ + Block: block, + IsSynced: message.IsSynced, + Error: err, + } + return nil +} + +func (x *GetBlockTemplateResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlockTemplateResponseMessage is nil") + } + var msgBlock *appmessage.RPCBlock + if x.Block != nil { + var err error + msgBlock, err = x.Block.toAppMessage() + if err != nil { + return nil, err + } + } + var rpcError *appmessage.RPCError + if x.Error != nil { + var err error + rpcError, err = x.Error.toAppMessage() + if err != nil { + return nil, err + } + } + return &appmessage.GetBlockTemplateResponseMessage{ + Block: msgBlock, + IsSynced: x.IsSynced, + Error: rpcError, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_blocks.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_blocks.go new file mode 100644 index 0000000..955fec6 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_blocks.go @@ -0,0 +1,89 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetBlocksRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlocksRequest is nil") + } + return x.GetBlocksRequest.toAppMessage() +} + +func (x *SpectredMessage_GetBlocksRequest) fromAppMessage(message *appmessage.GetBlocksRequestMessage) error { + x.GetBlocksRequest = &GetBlocksRequestMessage{ + LowHash: message.LowHash, + IncludeBlocks: message.IncludeBlocks, + IncludeTransactions: message.IncludeTransactions, + } + return nil +} + +func (x *GetBlocksRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlocksRequestMessage is nil") + } + return &appmessage.GetBlocksRequestMessage{ + LowHash: x.LowHash, + IncludeBlocks: x.IncludeBlocks, + IncludeTransactions: x.IncludeTransactions, + }, nil +} + +func (x *SpectredMessage_GetBlocksResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetBlocksResponse is nil") + } + return x.GetBlocksResponse.toAppMessage() +} + +func (x *SpectredMessage_GetBlocksResponse) fromAppMessage(message *appmessage.GetBlocksResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetBlocksResponse = &GetBlocksResponseMessage{ + Error: err, + } + x.GetBlocksResponse.BlockHashes = message.BlockHashes + x.GetBlocksResponse.Blocks = make([]*RpcBlock, len(message.Blocks)) + for i, block := range message.Blocks { + protoBlock := &RpcBlock{} + err := protoBlock.fromAppMessage(block) + if err != nil { + return err + } + x.GetBlocksResponse.Blocks[i] = protoBlock + } + return nil +} + +func (x *GetBlocksResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetBlocksResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + // Return data only if there's no error + if rpcErr != nil && len(x.Blocks) != 0 { + return nil, errors.New("GetBlocksResponseMessage contains both an error and a response") + } + blocks := make([]*appmessage.RPCBlock, len(x.Blocks)) + for i, block := range x.Blocks { + appMessageBlock, err := block.toAppMessage() + if err != nil { + return nil, err + } + blocks[i] = appMessageBlock + } + return &appmessage.GetBlocksResponseMessage{ + BlockHashes: x.BlockHashes, + Blocks: blocks, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_connected_peer_info.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_connected_peer_info.go new file mode 100644 index 0000000..52fc3af --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_connected_peer_info.go @@ -0,0 +1,92 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetConnectedPeerInfoRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.GetConnectedPeerInfoRequestMessage{}, nil +} + +func (x *SpectredMessage_GetConnectedPeerInfoRequest) fromAppMessage(_ *appmessage.GetConnectedPeerInfoRequestMessage) error { + return nil +} + +func (x *SpectredMessage_GetConnectedPeerInfoResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetConnectedPeerInfoResponse is nil") + } + return x.GetConnectedPeerInfoResponse.toAppMessage() +} + +func (x *SpectredMessage_GetConnectedPeerInfoResponse) fromAppMessage(message *appmessage.GetConnectedPeerInfoResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + infos := make([]*GetConnectedPeerInfoMessage, len(message.Infos)) + for i, info := range message.Infos { + infos[i] = &GetConnectedPeerInfoMessage{ + Id: info.ID, + Address: info.Address, + LastPingDuration: info.LastPingDuration, + IsOutbound: info.IsOutbound, + TimeOffset: info.TimeOffset, + UserAgent: info.UserAgent, + AdvertisedProtocolVersion: info.AdvertisedProtocolVersion, + TimeConnected: info.TimeConnected, + IsIbdPeer: info.IsIBDPeer, + } + } + x.GetConnectedPeerInfoResponse = &GetConnectedPeerInfoResponseMessage{ + Infos: infos, + Error: err, + } + return nil +} + +func (x *GetConnectedPeerInfoResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetConnectedPeerInfoResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + // Return verbose data only if there's no error + if rpcErr != nil && len(x.Infos) != 0 { + return nil, errors.New("GetConnectedPeerInfoResponseMessage contains both an error and a response") + } + infos := make([]*appmessage.GetConnectedPeerInfoMessage, len(x.Infos)) + for i, info := range x.Infos { + appInfo, err := info.toAppMessage() + if err != nil { + return nil, err + } + infos[i] = appInfo + } + + return &appmessage.GetConnectedPeerInfoResponseMessage{ + Infos: infos, + Error: rpcErr, + }, nil +} + +func (x *GetConnectedPeerInfoMessage) toAppMessage() (*appmessage.GetConnectedPeerInfoMessage, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetConnectedPeerInfoMessage is nil") + } + return &appmessage.GetConnectedPeerInfoMessage{ + ID: x.Id, + Address: x.Address, + LastPingDuration: x.LastPingDuration, + IsOutbound: x.IsOutbound, + TimeOffset: x.TimeOffset, + UserAgent: x.UserAgent, + AdvertisedProtocolVersion: x.AdvertisedProtocolVersion, + TimeConnected: x.TimeOffset, + IsIBDPeer: x.IsIbdPeer, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_current_network.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_current_network.go new file mode 100644 index 0000000..1db2359 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_current_network.go @@ -0,0 +1,53 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetCurrentNetworkRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.GetCurrentNetworkRequestMessage{}, nil +} + +func (x *SpectredMessage_GetCurrentNetworkRequest) fromAppMessage(_ *appmessage.GetCurrentNetworkRequestMessage) error { + return nil +} + +func (x *SpectredMessage_GetCurrentNetworkResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetCurrentNetworkResponse is nil") + } + return x.toAppMessage() +} + +func (x *SpectredMessage_GetCurrentNetworkResponse) fromAppMessage(message *appmessage.GetCurrentNetworkResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetCurrentNetworkResponse = &GetCurrentNetworkResponseMessage{ + CurrentNetwork: message.CurrentNetwork, + Error: err, + } + return nil +} + +func (x *GetCurrentNetworkResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetCurrentNetworkResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.CurrentNetwork) != 0 { + return nil, errors.New("GetCurrentNetworkResponseMessage contains both an error and a response") + } + + return &appmessage.GetCurrentNetworkResponseMessage{ + CurrentNetwork: x.CurrentNetwork, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_headers.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_headers.go new file mode 100644 index 0000000..dc8f2e3 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_headers.go @@ -0,0 +1,72 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetHeadersRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetHeadersRequest is nil") + } + return x.GetHeadersRequest.toAppMessage() +} + +func (x *SpectredMessage_GetHeadersRequest) fromAppMessage(message *appmessage.GetHeadersRequestMessage) error { + x.GetHeadersRequest = &GetHeadersRequestMessage{ + StartHash: message.StartHash, + Limit: message.Limit, + IsAscending: message.IsAscending, + } + return nil +} + +func (x *GetHeadersRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetHeadersRequestMessage is nil") + } + return &appmessage.GetHeadersRequestMessage{ + StartHash: x.StartHash, + Limit: x.Limit, + IsAscending: x.IsAscending, + }, nil +} + +func (x *SpectredMessage_GetHeadersResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetHeadersResponse is nil") + } + return x.GetHeadersResponse.toAppMessage() +} + +func (x *SpectredMessage_GetHeadersResponse) fromAppMessage(message *appmessage.GetHeadersResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetHeadersResponse = &GetHeadersResponseMessage{ + Headers: message.Headers, + Error: err, + } + return nil +} + +func (x *GetHeadersResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetHeadersResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.Headers) != 0 { + return nil, errors.New("GetHeadersResponseMessage contains both an error and a response") + } + + return &appmessage.GetHeadersResponseMessage{ + Headers: x.Headers, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_info.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_info.go new file mode 100644 index 0000000..bddf338 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_info.go @@ -0,0 +1,63 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetInfoRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.GetInfoRequestMessage{}, nil +} + +func (x *SpectredMessage_GetInfoRequest) fromAppMessage(_ *appmessage.GetInfoRequestMessage) error { + x.GetInfoRequest = &GetInfoRequestMessage{} + return nil +} + +func (x *SpectredMessage_GetInfoResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetInfoResponse is nil") + } + return x.GetInfoResponse.toAppMessage() +} + +func (x *SpectredMessage_GetInfoResponse) fromAppMessage(message *appmessage.GetInfoResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetInfoResponse = &GetInfoResponseMessage{ + P2PId: message.P2PID, + ServerVersion: message.ServerVersion, + MempoolSize: message.MempoolSize, + IsUtxoIndexed: message.IsUtxoIndexed, + IsSynced: message.IsSynced, + Error: err, + } + return nil +} + +func (x *GetInfoResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetInfoResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.P2PId) != 0 { + return nil, errors.New("GetInfoResponseMessage contains both an error and a response") + } + + return &appmessage.GetInfoResponseMessage{ + P2PID: x.P2PId, + MempoolSize: x.MempoolSize, + ServerVersion: x.ServerVersion, + IsUtxoIndexed: x.IsUtxoIndexed, + IsSynced: x.IsSynced, + + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries.go new file mode 100644 index 0000000..11aab50 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries.go @@ -0,0 +1,85 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetMempoolEntriesRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntriesRequest is nil") + } + return x.GetMempoolEntriesRequest.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntriesRequest) fromAppMessage(message *appmessage.GetMempoolEntriesRequestMessage) error { + x.GetMempoolEntriesRequest = &GetMempoolEntriesRequestMessage{ + IncludeOrphanPool: message.IncludeOrphanPool, + FilterTransactionPool: message.FilterTransactionPool, + } + return nil +} + +func (x *GetMempoolEntriesRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetMempoolEntryRequestMessage is nil") + } + return &appmessage.GetMempoolEntriesRequestMessage{ + IncludeOrphanPool: x.IncludeOrphanPool, + FilterTransactionPool: x.FilterTransactionPool, + }, nil +} + +func (x *SpectredMessage_GetMempoolEntriesResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntriesResponse is nil") + } + return x.GetMempoolEntriesResponse.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntriesResponse) fromAppMessage(message *appmessage.GetMempoolEntriesResponseMessage) error { + var rpcErr *RPCError + if message.Error != nil { + rpcErr = &RPCError{Message: message.Error.Message} + } + entries := make([]*MempoolEntry, len(message.Entries)) + for i, entry := range message.Entries { + entries[i] = new(MempoolEntry) + err := entries[i].fromAppMessage(entry) + if err != nil { + return err + } + } + x.GetMempoolEntriesResponse = &GetMempoolEntriesResponseMessage{ + Entries: entries, + Error: rpcErr, + } + return nil +} + +func (x *GetMempoolEntriesResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetMempoolEntriesResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.Entries) != 0 { + return nil, errors.New("GetMempoolEntriesResponseMessage contains both an error and a response") + } + entries := make([]*appmessage.MempoolEntry, len(x.Entries)) + for i, entry := range x.Entries { + entries[i], err = entry.toAppMessage() + if err != nil { + return nil, err + } + } + + return &appmessage.GetMempoolEntriesResponseMessage{ + Entries: entries, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries_by_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries_by_addresses.go new file mode 100644 index 0000000..7b2d2bc --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entries_by_addresses.go @@ -0,0 +1,141 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetMempoolEntriesByAddressesRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_SpectredMessage_GetMempoolEntriesByAddressesRequest is nil") + } + return x.GetMempoolEntriesByAddressesRequest.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntriesByAddressesRequest) fromAppMessage(message *appmessage.GetMempoolEntriesByAddressesRequestMessage) error { + x.GetMempoolEntriesByAddressesRequest = &GetMempoolEntriesByAddressesRequestMessage{ + Addresses: message.Addresses, + IncludeOrphanPool: message.IncludeOrphanPool, + FilterTransactionPool: message.FilterTransactionPool, + } + return nil +} + +func (x *GetMempoolEntriesByAddressesRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntriesRequest is nil") + } + return &appmessage.GetMempoolEntriesByAddressesRequestMessage{ + Addresses: x.Addresses, + IncludeOrphanPool: x.IncludeOrphanPool, + FilterTransactionPool: x.FilterTransactionPool, + }, nil +} + +func (x *SpectredMessage_GetMempoolEntriesByAddressesResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntriesByAddressesResponse is nil") + } + return x.GetMempoolEntriesByAddressesResponse.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntriesByAddressesResponse) fromAppMessage(message *appmessage.GetMempoolEntriesByAddressesResponseMessage) error { + var rpcErr *RPCError + if message.Error != nil { + rpcErr = &RPCError{Message: message.Error.Message} + } + entries := make([]*MempoolEntryByAddress, len(message.Entries)) + for i, entry := range message.Entries { + entries[i] = &MempoolEntryByAddress{} + entries[i].fromAppMessage(entry) + } + x.GetMempoolEntriesByAddressesResponse = &GetMempoolEntriesByAddressesResponseMessage{ + Entries: entries, + Error: rpcErr, + } + return nil +} + +func (x *GetMempoolEntriesByAddressesResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetMempoolEntriesResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.Entries) != 0 { + return nil, errors.New("GetMempoolEntriesByAddressesResponseMessage contains both an error and a response") + } + entries := make([]*appmessage.MempoolEntryByAddress, len(x.Entries)) + for i, entry := range x.Entries { + entries[i], err = entry.toAppMessage() + if err != nil { + return nil, err + } + } + + return &appmessage.GetMempoolEntriesByAddressesResponseMessage{ + Entries: entries, + Error: rpcErr, + }, nil +} + +func (x *MempoolEntryByAddress) toAppMessage() (*appmessage.MempoolEntryByAddress, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "MempoolEntry is nil") + } + + var err error + + sending := make([]*appmessage.MempoolEntry, len(x.Sending)) + for i, mempoolEntry := range x.Sending { + sending[i], err = mempoolEntry.toAppMessage() + if err != nil { + return nil, err + } + } + + receiving := make([]*appmessage.MempoolEntry, len(x.Receiving)) + for i, mempoolEntry := range x.Receiving { + receiving[i], err = mempoolEntry.toAppMessage() + if err != nil { + return nil, err + } + } + + return &appmessage.MempoolEntryByAddress{ + Address: x.Address, + Sending: sending, + Receiving: receiving, + }, nil +} + +func (x *MempoolEntryByAddress) fromAppMessage(message *appmessage.MempoolEntryByAddress) error { + + sending := make([]*MempoolEntry, len(message.Sending)) + for i, mempoolEntry := range message.Sending { + sending[i] = &MempoolEntry{} + err := sending[i].fromAppMessage(mempoolEntry) + if err != nil { + return err + } + } + receiving := make([]*MempoolEntry, len(message.Receiving)) + for i, mempoolEntry := range message.Receiving { + receiving[i] = &MempoolEntry{} + err := receiving[i].fromAppMessage(mempoolEntry) + if err != nil { + return err + } + } + + *x = MempoolEntryByAddress{ + Address: message.Address, + Sending: sending, + Receiving: receiving, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entry.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entry.go new file mode 100644 index 0000000..0335a10 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_mempool_entry.go @@ -0,0 +1,114 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetMempoolEntryRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntryRequest is nil") + } + return x.GetMempoolEntryRequest.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntryRequest) fromAppMessage(message *appmessage.GetMempoolEntryRequestMessage) error { + x.GetMempoolEntryRequest = &GetMempoolEntryRequestMessage{ + TxId: message.TxID, + IncludeOrphanPool: message.IncludeOrphanPool, + FilterTransactionPool: message.FilterTransactionPool, + } + return nil +} + +func (x *GetMempoolEntryRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetMempoolEntryRequestMessage is nil") + } + return &appmessage.GetMempoolEntryRequestMessage{ + TxID: x.TxId, + IncludeOrphanPool: x.IncludeOrphanPool, + FilterTransactionPool: x.FilterTransactionPool, + }, nil +} + +func (x *SpectredMessage_GetMempoolEntryResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetMempoolEntryResponse is nil") + } + return x.GetMempoolEntryResponse.toAppMessage() +} + +func (x *SpectredMessage_GetMempoolEntryResponse) fromAppMessage(message *appmessage.GetMempoolEntryResponseMessage) error { + var rpcErr *RPCError + if message.Error != nil { + rpcErr = &RPCError{Message: message.Error.Message} + } + var entry *MempoolEntry + if message.Entry != nil { + entry = new(MempoolEntry) + err := entry.fromAppMessage(message.Entry) + if err != nil { + return err + } + } + x.GetMempoolEntryResponse = &GetMempoolEntryResponseMessage{ + Entry: entry, + Error: rpcErr, + } + return nil +} + +func (x *GetMempoolEntryResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetMempoolEntryResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + entry, err := x.Entry.toAppMessage() + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && entry != nil { + return nil, errors.New("GetMempoolEntryResponseMessage contains both an error and a response") + } + + return &appmessage.GetMempoolEntryResponseMessage{ + Entry: entry, + Error: rpcErr, + }, nil +} + +func (x *MempoolEntry) toAppMessage() (*appmessage.MempoolEntry, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "MempoolEntry is nil") + } + transaction, err := x.Transaction.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.MempoolEntry{ + Fee: x.Fee, + Transaction: transaction, + IsOrphan: x.IsOrphan, + }, nil +} + +func (x *MempoolEntry) fromAppMessage(message *appmessage.MempoolEntry) error { + var transaction *RpcTransaction + if message.Transaction != nil { + transaction = new(RpcTransaction) + transaction.fromAppMessage(message.Transaction) + } + *x = MempoolEntry{ + Fee: message.Fee, + Transaction: transaction, + IsOrphan: message.IsOrphan, + } + return nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_peer_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_peer_addresses.go new file mode 100644 index 0000000..0c33ff4 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_peer_addresses.go @@ -0,0 +1,89 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetPeerAddressesRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetPeerAddressesRequest is nil") + } + return &appmessage.GetPeerAddressesRequestMessage{}, nil +} + +func (x *SpectredMessage_GetPeerAddressesRequest) fromAppMessage(_ *appmessage.GetPeerAddressesRequestMessage) error { + return nil +} + +func (x *SpectredMessage_GetPeerAddressesResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetPeerAddressesResponse is nil") + } + return x.GetPeerAddressesResponse.toAppMessage() +} + +func (x *SpectredMessage_GetPeerAddressesResponse) fromAppMessage(message *appmessage.GetPeerAddressesResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + addresses := make([]*GetPeerAddressesKnownAddressMessage, len(message.Addresses)) + for i, address := range message.Addresses { + addresses[i] = &GetPeerAddressesKnownAddressMessage{Addr: address.Addr} + } + bannedAddresses := make([]*GetPeerAddressesKnownAddressMessage, len(message.BannedAddresses)) + for i, address := range message.BannedAddresses { + bannedAddresses[i] = &GetPeerAddressesKnownAddressMessage{Addr: address.Addr} + } + x.GetPeerAddressesResponse = &GetPeerAddressesResponseMessage{ + Addresses: addresses, + BannedAddresses: bannedAddresses, + Error: err, + } + return nil +} + +func (x *GetPeerAddressesResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetPeerAddressesResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + addresses := make([]*appmessage.GetPeerAddressesKnownAddressMessage, len(x.Addresses)) + for i, address := range x.Addresses { + appAddress, err := address.toAppMessage() + if err != nil { + return nil, err + } + addresses[i] = appAddress + } + bannedAddresses := make([]*appmessage.GetPeerAddressesKnownAddressMessage, len(x.BannedAddresses)) + for i, address := range x.BannedAddresses { + bannedAddress, err := address.toAppMessage() + if err != nil { + return nil, err + } + bannedAddresses[i] = bannedAddress + } + + if rpcErr != nil && (len(addresses) != 0 || len(bannedAddresses) != 0) { + return nil, errors.New("GetPeerAddressesResponseMessage contains both an error and a response") + } + return &appmessage.GetPeerAddressesResponseMessage{ + Addresses: addresses, + BannedAddresses: bannedAddresses, + Error: rpcErr, + }, nil +} + +func (x *GetPeerAddressesKnownAddressMessage) toAppMessage() (*appmessage.GetPeerAddressesKnownAddressMessage, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetPeerAddressesKnownAddressMessage is nil") + } + return &appmessage.GetPeerAddressesKnownAddressMessage{Addr: x.Addr}, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_selected_tip_hash.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_selected_tip_hash.go new file mode 100644 index 0000000..2222c65 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_selected_tip_hash.go @@ -0,0 +1,53 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetSelectedTipHashRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.GetSelectedTipHashRequestMessage{}, nil +} + +func (x *SpectredMessage_GetSelectedTipHashRequest) fromAppMessage(_ *appmessage.GetSelectedTipHashRequestMessage) error { + return nil +} + +func (x *SpectredMessage_GetSelectedTipHashResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetSelectedTipHashResponse is nil") + } + return x.GetSelectedTipHashResponse.toAppMessage() +} + +func (x *SpectredMessage_GetSelectedTipHashResponse) fromAppMessage(message *appmessage.GetSelectedTipHashResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetSelectedTipHashResponse = &GetSelectedTipHashResponseMessage{ + SelectedTipHash: message.SelectedTipHash, + Error: err, + } + return nil +} + +func (x *GetSelectedTipHashResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetSelectedTipHashResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.SelectedTipHash) != 0 { + return nil, errors.New("GetSelectedTipHashResponseMessage contains both an error and a response") + } + + return &appmessage.GetSelectedTipHashResponseMessage{ + SelectedTipHash: x.SelectedTipHash, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_subnetwork.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_subnetwork.go new file mode 100644 index 0000000..d2f78cb --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_subnetwork.go @@ -0,0 +1,68 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetSubnetworkRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetSubnetworkRequest is nil") + } + return x.GetSubnetworkRequest.toAppMessage() +} + +func (x *SpectredMessage_GetSubnetworkRequest) fromAppMessage(message *appmessage.GetSubnetworkRequestMessage) error { + x.GetSubnetworkRequest = &GetSubnetworkRequestMessage{ + SubnetworkId: message.SubnetworkID, + } + return nil +} + +func (x *GetSubnetworkRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetSubnetworkRequestMessage is nil") + } + return &appmessage.GetSubnetworkRequestMessage{ + SubnetworkID: x.SubnetworkId, + }, nil +} + +func (x *SpectredMessage_GetSubnetworkResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetSubnetworkResponse is nil") + } + return x.GetSubnetworkResponse.toAppMessage() +} + +func (x *SpectredMessage_GetSubnetworkResponse) fromAppMessage(message *appmessage.GetSubnetworkResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetSubnetworkResponse = &GetSubnetworkResponseMessage{ + GasLimit: message.GasLimit, + Error: err, + } + return nil +} + +func (x *GetSubnetworkResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetSubnetworkResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && x.GasLimit != 0 { + return nil, errors.New("GetSubnetworkResponseMessage contains both an error and a response") + } + + return &appmessage.GetSubnetworkResponseMessage{ + GasLimit: x.GasLimit, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_supply.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_supply.go new file mode 100644 index 0000000..2900d89 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_supply.go @@ -0,0 +1,54 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetCoinSupplyRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.GetCoinSupplyRequestMessage{}, nil +} + +func (x *SpectredMessage_GetCoinSupplyRequest) fromAppMessage(_ *appmessage.GetCoinSupplyRequestMessage) error { + x.GetCoinSupplyRequest = &GetCoinSupplyRequestMessage{} + return nil +} + +func (x *SpectredMessage_GetCoinSupplyResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetCoinSupplyResponse is nil") + } + return x.GetCoinSupplyResponse.toAppMessage() +} + +func (x *SpectredMessage_GetCoinSupplyResponse) fromAppMessage(message *appmessage.GetCoinSupplyResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetCoinSupplyResponse = &GetCoinSupplyResponseMessage{ + MaxSompi: message.MaxSompi, + CirculatingSompi: message.CirculatingSompi, + + Error: err, + } + return nil +} + +func (x *GetCoinSupplyResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetCoinSupplyResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + return &appmessage.GetCoinSupplyResponseMessage{ + MaxSompi: x.MaxSompi, + CirculatingSompi: x.CirculatingSompi, + + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_utxos_by_addresses.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_utxos_by_addresses.go new file mode 100644 index 0000000..6494f0d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_utxos_by_addresses.go @@ -0,0 +1,82 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetUtxosByAddressesRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetUtxosByAddressesRequest is nil") + } + return x.GetUtxosByAddressesRequest.toAppMessage() +} + +func (x *SpectredMessage_GetUtxosByAddressesRequest) fromAppMessage(message *appmessage.GetUTXOsByAddressesRequestMessage) error { + x.GetUtxosByAddressesRequest = &GetUtxosByAddressesRequestMessage{ + Addresses: message.Addresses, + } + return nil +} + +func (x *GetUtxosByAddressesRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetUtxosByAddressesRequestMessage is nil") + } + return &appmessage.GetUTXOsByAddressesRequestMessage{ + Addresses: x.Addresses, + }, nil +} + +func (x *SpectredMessage_GetUtxosByAddressesResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetUtxosByAddressesResponseMessage is nil") + } + return x.GetUtxosByAddressesResponse.toAppMessage() +} + +func (x *SpectredMessage_GetUtxosByAddressesResponse) fromAppMessage(message *appmessage.GetUTXOsByAddressesResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + entries := make([]*UtxosByAddressesEntry, len(message.Entries)) + for i, entry := range message.Entries { + entries[i] = &UtxosByAddressesEntry{} + entries[i].fromAppMessage(entry) + } + x.GetUtxosByAddressesResponse = &GetUtxosByAddressesResponseMessage{ + Entries: entries, + Error: err, + } + return nil +} + +func (x *GetUtxosByAddressesResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetUtxosByAddressesResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && len(x.Entries) != 0 { + return nil, errors.New("GetUtxosByAddressesResponseMessage contains both an error and a response") + } + + entries := make([]*appmessage.UTXOsByAddressesEntry, len(x.Entries)) + for i, entry := range x.Entries { + entryAsAppMessage, err := entry.toAppMessage() + if err != nil { + return nil, err + } + entries[i] = entryAsAppMessage + } + + return &appmessage.GetUTXOsByAddressesResponseMessage{ + Entries: entries, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_blue_score.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_blue_score.go new file mode 100644 index 0000000..47e27df --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_blue_score.go @@ -0,0 +1,57 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetVirtualSelectedParentBlueScoreRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetVirtualSelectedParentBlueScoreRequest is nil") + } + return &appmessage.GetVirtualSelectedParentBlueScoreRequestMessage{}, nil +} + +func (x *SpectredMessage_GetVirtualSelectedParentBlueScoreRequest) fromAppMessage(message *appmessage.GetVirtualSelectedParentBlueScoreRequestMessage) error { + x.GetVirtualSelectedParentBlueScoreRequest = &GetVirtualSelectedParentBlueScoreRequestMessage{} + return nil +} + +func (x *SpectredMessage_GetVirtualSelectedParentBlueScoreResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetVirtualSelectedParentBlueScoreResponse is nil") + } + return x.GetVirtualSelectedParentBlueScoreResponse.toAppMessage() +} + +func (x *SpectredMessage_GetVirtualSelectedParentBlueScoreResponse) fromAppMessage(message *appmessage.GetVirtualSelectedParentBlueScoreResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetVirtualSelectedParentBlueScoreResponse = &GetVirtualSelectedParentBlueScoreResponseMessage{ + BlueScore: message.BlueScore, + Error: err, + } + return nil +} + +func (x *GetVirtualSelectedParentBlueScoreResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetVirtualSelectedParentBlueScoreResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && x.BlueScore != 0 { + return nil, errors.New("GetVirtualSelectedParentBlueScoreResponseMessage contains both an error and a response") + } + + return &appmessage.GetVirtualSelectedParentBlueScoreResponseMessage{ + BlueScore: x.BlueScore, + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_chain_from_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_chain_from_block.go new file mode 100644 index 0000000..a17185e --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_get_virtual_selected_parent_chain_from_block.go @@ -0,0 +1,96 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest is nil") + } + return x.GetVirtualSelectedParentChainFromBlockRequest.toAppMessage() +} + +func (x *SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest) fromAppMessage(message *appmessage.GetVirtualSelectedParentChainFromBlockRequestMessage) error { + x.GetVirtualSelectedParentChainFromBlockRequest = &GetVirtualSelectedParentChainFromBlockRequestMessage{ + StartHash: message.StartHash, + IncludeAcceptedTransactionIds: message.IncludeAcceptedTransactionIDs, + } + return nil +} + +func (x *GetVirtualSelectedParentChainFromBlockRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetVirtualSelectedParentChainFromBlockRequestMessage is nil") + } + return &appmessage.GetVirtualSelectedParentChainFromBlockRequestMessage{ + StartHash: x.StartHash, + IncludeAcceptedTransactionIDs: x.IncludeAcceptedTransactionIds, + }, nil +} + +func (x *SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse is nil") + } + return x.GetVirtualSelectedParentChainFromBlockResponse.toAppMessage() +} + +func (x *SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse) fromAppMessage(message *appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.GetVirtualSelectedParentChainFromBlockResponse = &GetVirtualSelectedParentChainFromBlockResponseMessage{ + RemovedChainBlockHashes: message.RemovedChainBlockHashes, + AddedChainBlockHashes: message.AddedChainBlockHashes, + AcceptedTransactionIds: make([]*AcceptedTransactionIds, len(message.AcceptedTransactionIDs)), + Error: err, + } + for i, acceptedTransactionIDs := range message.AcceptedTransactionIDs { + x.GetVirtualSelectedParentChainFromBlockResponse.AcceptedTransactionIds[i] = &AcceptedTransactionIds{} + x.GetVirtualSelectedParentChainFromBlockResponse.AcceptedTransactionIds[i].fromAppMessage(acceptedTransactionIDs) + } + return nil +} + +func (x *GetVirtualSelectedParentChainFromBlockResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "GetVirtualSelectedParentChainFromBlockResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + + if rpcErr != nil && (len(x.AddedChainBlockHashes) != 0 || len(x.RemovedChainBlockHashes) != 0) { + return nil, errors.New("GetVirtualSelectedParentChainFromBlockResponseMessage contains both an error and a response") + } + + message := &appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage{ + RemovedChainBlockHashes: x.RemovedChainBlockHashes, + AddedChainBlockHashes: x.AddedChainBlockHashes, + AcceptedTransactionIDs: make([]*appmessage.AcceptedTransactionIDs, len(x.AcceptedTransactionIds)), + Error: rpcErr, + } + + for i, acceptedTransactionIds := range x.AcceptedTransactionIds { + message.AcceptedTransactionIDs[i] = acceptedTransactionIds.toAppMessage() + } + + return message, nil +} + +func (x *AcceptedTransactionIds) fromAppMessage(acceptedTransactionIDs *appmessage.AcceptedTransactionIDs) { + x.AcceptingBlockHash = acceptedTransactionIDs.AcceptingBlockHash + x.AcceptedTransactionIds = acceptedTransactionIDs.AcceptedTransactionIDs +} + +func (x *AcceptedTransactionIds) toAppMessage() *appmessage.AcceptedTransactionIDs { + return &appmessage.AcceptedTransactionIDs{ + AcceptingBlockHash: x.AcceptingBlockHash, + AcceptedTransactionIDs: x.AcceptedTransactionIds, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_block_added.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_block_added.go new file mode 100644 index 0000000..42234a8 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_block_added.go @@ -0,0 +1,79 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyBlockAddedRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.NotifyBlockAddedRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyBlockAddedRequest) fromAppMessage(_ *appmessage.NotifyBlockAddedRequestMessage) error { + x.NotifyBlockAddedRequest = &NotifyBlockAddedRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyBlockAddedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyBlockAddedResponse is nil") + } + return x.NotifyBlockAddedResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyBlockAddedResponse) fromAppMessage(message *appmessage.NotifyBlockAddedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyBlockAddedResponse = &NotifyBlockAddedResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyBlockAddedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyBlockAddedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyBlockAddedResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_BlockAddedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_BlockAddedNotification is nil") + } + return x.BlockAddedNotification.toAppMessage() +} + +func (x *SpectredMessage_BlockAddedNotification) fromAppMessage(message *appmessage.BlockAddedNotificationMessage) error { + block := &RpcBlock{} + err := block.fromAppMessage(message.Block) + if err != nil { + return err + } + x.BlockAddedNotification = &BlockAddedNotificationMessage{ + Block: block, + } + return nil +} + +func (x *BlockAddedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "BlockAddedNotificationMessage is nil") + } + block, err := x.Block.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.BlockAddedNotificationMessage{ + Block: block, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_finality_conflicts.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_finality_conflicts.go new file mode 100644 index 0000000..5f23038 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_finality_conflicts.go @@ -0,0 +1,93 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyFinalityConflictsRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.NotifyFinalityConflictsRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyFinalityConflictsRequest) fromAppMessage(_ *appmessage.NotifyFinalityConflictsRequestMessage) error { + x.NotifyFinalityConflictsRequest = &NotifyFinalityConflictsRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyFinalityConflictsResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyFinalityConflictsResponse is nil") + } + return x.NotifyFinalityConflictsResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyFinalityConflictsResponse) fromAppMessage(message *appmessage.NotifyFinalityConflictsResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyFinalityConflictsResponse = &NotifyFinalityConflictsResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyFinalityConflictsResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyFinalityConflictsResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyFinalityConflictsResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_FinalityConflictNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_FinalityConflictNotification is nil") + } + return x.FinalityConflictNotification.toAppMessage() +} + +func (x *SpectredMessage_FinalityConflictNotification) fromAppMessage(message *appmessage.FinalityConflictNotificationMessage) error { + x.FinalityConflictNotification = &FinalityConflictNotificationMessage{ + ViolatingBlockHash: message.ViolatingBlockHash, + } + return nil +} + +func (x *FinalityConflictNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "FinalityConflictNotificationMessage is nil") + } + return &appmessage.FinalityConflictNotificationMessage{ + ViolatingBlockHash: x.ViolatingBlockHash, + }, nil +} + +func (x *SpectredMessage_FinalityConflictResolvedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_FinalityConflictResolvedNotification is nil") + } + return x.FinalityConflictResolvedNotification.toAppMessage() +} + +func (x *SpectredMessage_FinalityConflictResolvedNotification) fromAppMessage(message *appmessage.FinalityConflictResolvedNotificationMessage) error { + x.FinalityConflictResolvedNotification = &FinalityConflictResolvedNotificationMessage{ + FinalityBlockHash: message.FinalityBlockHash, + } + return nil +} + +func (x *FinalityConflictResolvedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "FinalityConflictResolvedNotificationMessage is nil") + } + return &appmessage.FinalityConflictResolvedNotificationMessage{ + FinalityBlockHash: x.FinalityBlockHash, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_new_block_template.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_new_block_template.go new file mode 100644 index 0000000..0816ca0 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_new_block_template.go @@ -0,0 +1,66 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyNewBlockTemplateRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.NotifyNewBlockTemplateRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyNewBlockTemplateRequest) fromAppMessage(_ *appmessage.NotifyNewBlockTemplateRequestMessage) error { + x.NotifyNewBlockTemplateRequest = &NotifyNewBlockTemplateRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyNewBlockTemplateResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyNewBlockTemplateResponse is nil") + } + return x.NotifyNewBlockTemplateResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyNewBlockTemplateResponse) fromAppMessage(message *appmessage.NotifyNewBlockTemplateResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyNewBlockTemplateResponse = &NotifyNewBlockTemplateResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyNewBlockTemplateResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyNewBlockTemplateResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyNewBlockTemplateResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_NewBlockTemplateNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NewBlockTemplateNotification is nil") + } + return x.NewBlockTemplateNotification.toAppMessage() +} + +func (x *SpectredMessage_NewBlockTemplateNotification) fromAppMessage(message *appmessage.NewBlockTemplateNotificationMessage) error { + x.NewBlockTemplateNotification = &NewBlockTemplateNotificationMessage{} + return nil +} + +func (x *NewBlockTemplateNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NewBlockTemplateNotificationMessage is nil") + } + return &appmessage.NewBlockTemplateNotificationMessage{}, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_pruning_point_utxo_set_override.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_pruning_point_utxo_set_override.go new file mode 100644 index 0000000..5566ea8 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_pruning_point_utxo_set_override.go @@ -0,0 +1,108 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest is nil") + } + return &appmessage.NotifyPruningPointUTXOSetOverrideRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest) fromAppMessage(_ *appmessage.NotifyPruningPointUTXOSetOverrideRequestMessage) error { + x.NotifyPruningPointUTXOSetOverrideRequest = &NotifyPruningPointUTXOSetOverrideRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse is nil") + } + return x.NotifyPruningPointUTXOSetOverrideResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse) fromAppMessage(message *appmessage.NotifyPruningPointUTXOSetOverrideResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyPruningPointUTXOSetOverrideResponse = &NotifyPruningPointUTXOSetOverrideResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyPruningPointUTXOSetOverrideResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyPruningPointUTXOSetOverrideResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyPruningPointUTXOSetOverrideResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_PruningPointUTXOSetOverrideNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_PruningPointUTXOSetOverrideNotification is nil") + } + return &appmessage.PruningPointUTXOSetOverrideNotificationMessage{}, nil +} + +func (x *SpectredMessage_PruningPointUTXOSetOverrideNotification) fromAppMessage(_ *appmessage.PruningPointUTXOSetOverrideNotificationMessage) error { + x.PruningPointUTXOSetOverrideNotification = &PruningPointUTXOSetOverrideNotificationMessage{} + return nil +} + +func (x *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest is nil") + } + return &appmessage.StopNotifyingPruningPointUTXOSetOverrideRequestMessage{}, nil +} + +func (x *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest) fromAppMessage(_ *appmessage.StopNotifyingPruningPointUTXOSetOverrideRequestMessage) error { + x.StopNotifyingPruningPointUTXOSetOverrideRequest = &StopNotifyingPruningPointUTXOSetOverrideRequestMessage{} + return nil +} + +func (x *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse is nil") + } + return x.StopNotifyingPruningPointUTXOSetOverrideResponse.toAppMessage() +} + +func (x *SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideResponse) fromAppMessage( + message *appmessage.StopNotifyingPruningPointUTXOSetOverrideResponseMessage) error { + + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.StopNotifyingPruningPointUTXOSetOverrideResponse = &StopNotifyingPruningPointUTXOSetOverrideResponseMessage{ + Error: err, + } + return nil +} + +func (x *StopNotifyingPruningPointUTXOSetOverrideResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "StopNotifyingPruningPointUTXOSetOverrideResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.StopNotifyingPruningPointUTXOSetOverrideResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_utxos_changed.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_utxos_changed.go new file mode 100644 index 0000000..e0e0d09 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_utxos_changed.go @@ -0,0 +1,155 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyUtxosChangedRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyUtxosChangedRequest is nil") + } + return x.NotifyUtxosChangedRequest.toAppMessage() +} + +func (x *SpectredMessage_NotifyUtxosChangedRequest) fromAppMessage(message *appmessage.NotifyUTXOsChangedRequestMessage) error { + x.NotifyUtxosChangedRequest = &NotifyUtxosChangedRequestMessage{ + Addresses: message.Addresses, + } + return nil +} + +func (x *NotifyUtxosChangedRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyUtxosChangedRequestMessage is nil") + } + return &appmessage.NotifyUTXOsChangedRequestMessage{ + Addresses: x.Addresses, + }, nil +} + +func (x *SpectredMessage_NotifyUtxosChangedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyUtxosChangedResponseMessage is nil") + } + return x.NotifyUtxosChangedResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyUtxosChangedResponse) fromAppMessage(message *appmessage.NotifyUTXOsChangedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyUtxosChangedResponse = &NotifyUtxosChangedResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyUtxosChangedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyUtxosChangedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyUTXOsChangedResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_UtxosChangedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_UtxosChangedNotification is nil") + } + return x.UtxosChangedNotification.toAppMessage() +} + +func (x *SpectredMessage_UtxosChangedNotification) fromAppMessage(message *appmessage.UTXOsChangedNotificationMessage) error { + added := make([]*UtxosByAddressesEntry, len(message.Added)) + for i, entry := range message.Added { + added[i] = &UtxosByAddressesEntry{} + added[i].fromAppMessage(entry) + } + + removed := make([]*UtxosByAddressesEntry, len(message.Removed)) + for i, entry := range message.Removed { + removed[i] = &UtxosByAddressesEntry{} + removed[i].fromAppMessage(entry) + } + + x.UtxosChangedNotification = &UtxosChangedNotificationMessage{ + Added: added, + Removed: removed, + } + return nil +} + +func (x *UtxosChangedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "UtxosChangedNotificationMessage is nil") + } + added := make([]*appmessage.UTXOsByAddressesEntry, len(x.Added)) + for i, entry := range x.Added { + entryAsAppMessage, err := entry.toAppMessage() + if err != nil { + return nil, err + } + // UTXOEntry is optional in other places, but here it's required. + if entryAsAppMessage.UTXOEntry == nil { + return nil, errors.Wrapf(errorNil, "UTXOEntry is nil in UTXOsByAddressesEntry.Added") + } + added[i] = entryAsAppMessage + } + + removed := make([]*appmessage.UTXOsByAddressesEntry, len(x.Removed)) + for i, entry := range x.Removed { + entryAsAppMessage, err := entry.toAppMessage() + if err != nil { + return nil, err + } + removed[i] = entryAsAppMessage + } + + return &appmessage.UTXOsChangedNotificationMessage{ + Added: added, + Removed: removed, + }, nil +} + +func (x *UtxosByAddressesEntry) toAppMessage() (*appmessage.UTXOsByAddressesEntry, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "UtxosByAddressesEntry is nil") + } + outpoint, err := x.Outpoint.toAppMessage() + if err != nil { + return nil, err + } + entry, err := x.UtxoEntry.toAppMessage() + // entry is an optional field sometimes + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.UTXOsByAddressesEntry{ + Address: x.Address, + Outpoint: outpoint, + UTXOEntry: entry, + }, nil +} + +func (x *UtxosByAddressesEntry) fromAppMessage(message *appmessage.UTXOsByAddressesEntry) { + outpoint := &RpcOutpoint{} + outpoint.fromAppMessage(message.Outpoint) + var utxoEntry *RpcUtxoEntry + if message.UTXOEntry != nil { + utxoEntry = &RpcUtxoEntry{} + utxoEntry.fromAppMessage(message.UTXOEntry) + } + *x = UtxosByAddressesEntry{ + Address: message.Address, + Outpoint: outpoint, + UtxoEntry: utxoEntry, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_daa_score_changed.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_daa_score_changed.go new file mode 100644 index 0000000..952ef2b --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_daa_score_changed.go @@ -0,0 +1,73 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyVirtualDaaScoreChangedRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualDaaScoreChangedRequest is nil") + } + return &appmessage.NotifyVirtualDaaScoreChangedRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyVirtualDaaScoreChangedRequest) fromAppMessage(_ *appmessage.NotifyVirtualDaaScoreChangedRequestMessage) error { + x.NotifyVirtualDaaScoreChangedRequest = &NotifyVirtualDaaScoreChangedRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyVirtualDaaScoreChangedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualDaaScoreChangedResponse is nil") + } + return x.NotifyVirtualDaaScoreChangedResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyVirtualDaaScoreChangedResponse) fromAppMessage(message *appmessage.NotifyVirtualDaaScoreChangedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyVirtualDaaScoreChangedResponse = &NotifyVirtualDaaScoreChangedResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyVirtualDaaScoreChangedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyVirtualDaaScoreChangedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyVirtualDaaScoreChangedResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_VirtualDaaScoreChangedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_VirtualDaaScoreChangedNotification is nil") + } + return x.VirtualDaaScoreChangedNotification.toAppMessage() +} + +func (x *SpectredMessage_VirtualDaaScoreChangedNotification) fromAppMessage(message *appmessage.VirtualDaaScoreChangedNotificationMessage) error { + x.VirtualDaaScoreChangedNotification = &VirtualDaaScoreChangedNotificationMessage{ + VirtualDaaScore: message.VirtualDaaScore, + } + return nil +} + +func (x *VirtualDaaScoreChangedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "VirtualDaaScoreChangedNotificationMessage is nil") + } + return &appmessage.VirtualDaaScoreChangedNotificationMessage{ + VirtualDaaScore: x.VirtualDaaScore, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_blue_score_changed.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_blue_score_changed.go new file mode 100644 index 0000000..85de4ae --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_blue_score_changed.go @@ -0,0 +1,73 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest is nil") + } + return &appmessage.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{}, nil +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest) fromAppMessage(_ *appmessage.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage) error { + x.NotifyVirtualSelectedParentBlueScoreChangedRequest = &NotifyVirtualSelectedParentBlueScoreChangedRequestMessage{} + return nil +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse is nil") + } + return x.NotifyVirtualSelectedParentBlueScoreChangedResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse) fromAppMessage(message *appmessage.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyVirtualSelectedParentBlueScoreChangedResponse = &NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyVirtualSelectedParentBlueScoreChangedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification is nil") + } + return x.VirtualSelectedParentBlueScoreChangedNotification.toAppMessage() +} + +func (x *SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification) fromAppMessage(message *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage) error { + x.VirtualSelectedParentBlueScoreChangedNotification = &VirtualSelectedParentBlueScoreChangedNotificationMessage{ + VirtualSelectedParentBlueScore: message.VirtualSelectedParentBlueScore, + } + return nil +} + +func (x *VirtualSelectedParentBlueScoreChangedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "VirtualSelectedParentBlueScoreChangedNotificationMessage is nil") + } + return &appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage{ + VirtualSelectedParentBlueScore: x.VirtualSelectedParentBlueScore, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_chain_changed.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_chain_changed.go new file mode 100644 index 0000000..3abca95 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_notify_virtual_selected_parent_chain_changed.go @@ -0,0 +1,91 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest is nil") + } + return &appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage{ + IncludeAcceptedTransactionIDs: x.NotifyVirtualSelectedParentChainChangedRequest.IncludeAcceptedTransactionIds, + }, nil +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest) fromAppMessage(appmessage *appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage) error { + x.NotifyVirtualSelectedParentChainChangedRequest = &NotifyVirtualSelectedParentChainChangedRequestMessage{ + IncludeAcceptedTransactionIds: appmessage.IncludeAcceptedTransactionIDs, + } + return nil +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse is nil") + } + return x.NotifyVirtualSelectedParentChainChangedResponse.toAppMessage() +} + +func (x *SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse) fromAppMessage(message *appmessage.NotifyVirtualSelectedParentChainChangedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.NotifyVirtualSelectedParentChainChangedResponse = &NotifyVirtualSelectedParentChainChangedResponseMessage{ + Error: err, + } + return nil +} + +func (x *NotifyVirtualSelectedParentChainChangedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "NotifyVirtualSelectedParentChainChangedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.NotifyVirtualSelectedParentChainChangedResponseMessage{ + Error: rpcErr, + }, nil +} + +func (x *SpectredMessage_VirtualSelectedParentChainChangedNotification) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_VirtualSelectedParentChainChangedNotification is nil") + } + return x.VirtualSelectedParentChainChangedNotification.toAppMessage() +} + +func (x *SpectredMessage_VirtualSelectedParentChainChangedNotification) fromAppMessage(message *appmessage.VirtualSelectedParentChainChangedNotificationMessage) error { + x.VirtualSelectedParentChainChangedNotification = &VirtualSelectedParentChainChangedNotificationMessage{ + RemovedChainBlockHashes: message.RemovedChainBlockHashes, + AddedChainBlockHashes: message.AddedChainBlockHashes, + AcceptedTransactionIds: make([]*AcceptedTransactionIds, len(message.AcceptedTransactionIDs)), + } + + for i, acceptedTransactionIDs := range message.AcceptedTransactionIDs { + x.VirtualSelectedParentChainChangedNotification.AcceptedTransactionIds[i] = &AcceptedTransactionIds{} + x.VirtualSelectedParentChainChangedNotification.AcceptedTransactionIds[i].fromAppMessage(acceptedTransactionIDs) + } + return nil +} + +func (x *VirtualSelectedParentChainChangedNotificationMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "VirtualSelectedParentChainChangedNotificationMessage is nil") + } + message := &appmessage.VirtualSelectedParentChainChangedNotificationMessage{ + RemovedChainBlockHashes: x.RemovedChainBlockHashes, + AddedChainBlockHashes: x.AddedChainBlockHashes, + AcceptedTransactionIDs: make([]*appmessage.AcceptedTransactionIDs, len(x.AcceptedTransactionIds)), + } + + for i, acceptedTransactionIds := range x.AcceptedTransactionIds { + message.AcceptedTransactionIDs[i] = acceptedTransactionIds.toAppMessage() + } + return message, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_resolve_finality_conflict.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_resolve_finality_conflict.go new file mode 100644 index 0000000..5af0e28 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_resolve_finality_conflict.go @@ -0,0 +1,61 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_ResolveFinalityConflictRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_ResolveFinalityConflictRequest is nil") + } + return x.ResolveFinalityConflictRequest.toAppMessage() +} + +func (x *SpectredMessage_ResolveFinalityConflictRequest) fromAppMessage(message *appmessage.ResolveFinalityConflictRequestMessage) error { + x.ResolveFinalityConflictRequest = &ResolveFinalityConflictRequestMessage{ + FinalityBlockHash: message.FinalityBlockHash, + } + return nil +} + +func (x *ResolveFinalityConflictRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "ResolveFinalityConflictRequestMessage is nil") + } + return &appmessage.ResolveFinalityConflictRequestMessage{ + FinalityBlockHash: x.FinalityBlockHash, + }, nil +} + +func (x *SpectredMessage_ResolveFinalityConflictResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_ResolveFinalityConflictResponse is nil") + } + return x.ResolveFinalityConflictResponse.toAppMessage() +} + +func (x *SpectredMessage_ResolveFinalityConflictResponse) fromAppMessage(message *appmessage.ResolveFinalityConflictResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.ResolveFinalityConflictResponse = &ResolveFinalityConflictResponseMessage{ + Error: err, + } + return nil +} + +func (x *ResolveFinalityConflictResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "ResolveFinalityConflictResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.ResolveFinalityConflictResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_shut_down.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_shut_down.go new file mode 100644 index 0000000..b6a45dc --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_shut_down.go @@ -0,0 +1,47 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_ShutDownRequest) toAppMessage() (appmessage.Message, error) { + return &appmessage.ShutDownRequestMessage{}, nil +} + +func (x *SpectredMessage_ShutDownRequest) fromAppMessage(_ *appmessage.ShutDownRequestMessage) error { + x.ShutDownRequest = &ShutDownRequestMessage{} + return nil +} + +func (x *SpectredMessage_ShutDownResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_ShutDownResponse is nil") + } + return x.ShutDownResponse.toAppMessage() +} + +func (x *SpectredMessage_ShutDownResponse) fromAppMessage(message *appmessage.ShutDownResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.ShutDownResponse = &ShutDownResponseMessage{ + Error: err, + } + return nil +} + +func (x *ShutDownResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "ShutDownResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.ShutDownResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_stop_notifying_utxos_changed.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_stop_notifying_utxos_changed.go new file mode 100644 index 0000000..afd9e7b --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_stop_notifying_utxos_changed.go @@ -0,0 +1,61 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_StopNotifyingUtxosChangedRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_StopNotifyingUtxosChangedRequest is nil") + } + return x.StopNotifyingUtxosChangedRequest.toAppMessage() +} + +func (x *SpectredMessage_StopNotifyingUtxosChangedRequest) fromAppMessage(message *appmessage.StopNotifyingUTXOsChangedRequestMessage) error { + x.StopNotifyingUtxosChangedRequest = &StopNotifyingUtxosChangedRequestMessage{ + Addresses: message.Addresses, + } + return nil +} + +func (x *StopNotifyingUtxosChangedRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "StopNotifyingUtxosChangedRequestMessage is nil") + } + return &appmessage.StopNotifyingUTXOsChangedRequestMessage{ + Addresses: x.Addresses, + }, nil +} + +func (x *SpectredMessage_StopNotifyingUtxosChangedResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_StopNotifyingUtxosChangedResponse is nil") + } + return x.StopNotifyingUtxosChangedResponse.toAppMessage() +} + +func (x *SpectredMessage_StopNotifyingUtxosChangedResponse) fromAppMessage(message *appmessage.StopNotifyingUTXOsChangedResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.StopNotifyingUtxosChangedResponse = &StopNotifyingUtxosChangedResponseMessage{ + Error: err, + } + return nil +} + +func (x *StopNotifyingUtxosChangedResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "StopNotifyingUtxosChangedResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.StopNotifyingUTXOsChangedResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_block.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_block.go new file mode 100644 index 0000000..5827d30 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_block.go @@ -0,0 +1,223 @@ +package protowire + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_SubmitBlockRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SubmitBlockRequestMessage is nil") + } + return x.SubmitBlockRequest.toAppMessage() +} + +func (x *SpectredMessage_SubmitBlockRequest) fromAppMessage(message *appmessage.SubmitBlockRequestMessage) error { + x.SubmitBlockRequest = &SubmitBlockRequestMessage{Block: &RpcBlock{}} + x.SubmitBlockRequest.AllowNonDAABlocks = message.AllowNonDAABlocks + return x.SubmitBlockRequest.Block.fromAppMessage(message.Block) +} + +func (x *SubmitBlockRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SubmitBlockRequestMessage is nil") + } + blockAppMessage, err := x.Block.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.SubmitBlockRequestMessage{ + Block: blockAppMessage, + AllowNonDAABlocks: x.GetAllowNonDAABlocks(), + }, nil +} + +func (x *SpectredMessage_SubmitBlockResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_SubmitBlockResponse is nil") + } + return x.SubmitBlockResponse.toAppMessage() +} + +func (x *SpectredMessage_SubmitBlockResponse) fromAppMessage(message *appmessage.SubmitBlockResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.SubmitBlockResponse = &SubmitBlockResponseMessage{ + RejectReason: SubmitBlockResponseMessage_RejectReason(message.RejectReason), + Error: err, + } + return nil +} + +func (x *SubmitBlockResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SubmitBlockResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.SubmitBlockResponseMessage{ + RejectReason: appmessage.RejectReason(x.RejectReason), + Error: rpcErr, + }, nil +} + +func (x *RpcBlock) toAppMessage() (*appmessage.RPCBlock, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcBlock is nil") + } + header, err := x.Header.toAppMessage() + if err != nil { + return nil, err + } + transactions := make([]*appmessage.RPCTransaction, len(x.Transactions)) + for i, transaction := range x.Transactions { + appTransaction, err := transaction.toAppMessage() + if err != nil { + return nil, err + } + transactions[i] = appTransaction + } + var verboseData *appmessage.RPCBlockVerboseData + if x.VerboseData != nil { + appMessageVerboseData, err := x.VerboseData.toAppMessage() + if err != nil { + return nil, err + } + verboseData = appMessageVerboseData + } + return &appmessage.RPCBlock{ + Header: header, + Transactions: transactions, + VerboseData: verboseData, + }, nil +} + +func (x *RpcBlock) fromAppMessage(message *appmessage.RPCBlock) error { + header := &RpcBlockHeader{} + header.fromAppMessage(message.Header) + transactions := make([]*RpcTransaction, len(message.Transactions)) + for i, transaction := range message.Transactions { + rpcTransaction := &RpcTransaction{} + rpcTransaction.fromAppMessage(transaction) + transactions[i] = rpcTransaction + } + var verboseData *RpcBlockVerboseData + if message.VerboseData != nil { + verboseData = &RpcBlockVerboseData{} + verboseData.fromAppMessage(message.VerboseData) + } + *x = RpcBlock{ + Header: header, + Transactions: transactions, + VerboseData: verboseData, + } + return nil +} + +func (x *RpcBlockHeader) toAppMessage() (*appmessage.RPCBlockHeader, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcBlockHeader is nil") + } + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid block header version - bigger then uint16") + } + parents := make([]*appmessage.RPCBlockLevelParents, len(x.Parents)) + for i, blockLevelParents := range x.Parents { + var err error + parents[i], err = blockLevelParents.toAppMessage() + if err != nil { + return nil, err + } + } + return &appmessage.RPCBlockHeader{ + Version: x.Version, + Parents: parents, + HashMerkleRoot: x.HashMerkleRoot, + AcceptedIDMerkleRoot: x.AcceptedIdMerkleRoot, + UTXOCommitment: x.UtxoCommitment, + Timestamp: x.Timestamp, + Bits: x.Bits, + Nonce: x.Nonce, + DAAScore: x.DaaScore, + BlueWork: x.BlueWork, + BlueScore: x.BlueScore, + PruningPoint: x.PruningPoint, + }, nil +} + +func (x *RpcBlockHeader) fromAppMessage(message *appmessage.RPCBlockHeader) { + parents := make([]*RpcBlockLevelParents, len(message.Parents)) + for i, blockLevelParents := range message.Parents { + parents[i] = &RpcBlockLevelParents{} + parents[i].fromAppMessage(blockLevelParents) + } + *x = RpcBlockHeader{ + Version: message.Version, + Parents: parents, + HashMerkleRoot: message.HashMerkleRoot, + AcceptedIdMerkleRoot: message.AcceptedIDMerkleRoot, + UtxoCommitment: message.UTXOCommitment, + Timestamp: message.Timestamp, + Bits: message.Bits, + Nonce: message.Nonce, + DaaScore: message.DAAScore, + BlueWork: message.BlueWork, + BlueScore: message.BlueScore, + PruningPoint: message.PruningPoint, + } +} + +func (x *RpcBlockLevelParents) toAppMessage() (*appmessage.RPCBlockLevelParents, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcBlockLevelParents is nil") + } + return &appmessage.RPCBlockLevelParents{ + ParentHashes: x.ParentHashes, + }, nil +} + +func (x *RpcBlockLevelParents) fromAppMessage(message *appmessage.RPCBlockLevelParents) { + *x = RpcBlockLevelParents{ + ParentHashes: message.ParentHashes, + } +} + +func (x *RpcBlockVerboseData) toAppMessage() (*appmessage.RPCBlockVerboseData, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcBlockVerboseData is nil") + } + return &appmessage.RPCBlockVerboseData{ + Hash: x.Hash, + Difficulty: x.Difficulty, + SelectedParentHash: x.SelectedParentHash, + TransactionIDs: x.TransactionIds, + IsHeaderOnly: x.IsHeaderOnly, + BlueScore: x.BlueScore, + ChildrenHashes: x.ChildrenHashes, + MergeSetBluesHashes: x.MergeSetBluesHashes, + MergeSetRedsHashes: x.MergeSetRedsHashes, + IsChainBlock: x.IsChainBlock, + }, nil +} + +func (x *RpcBlockVerboseData) fromAppMessage(message *appmessage.RPCBlockVerboseData) { + *x = RpcBlockVerboseData{ + Hash: message.Hash, + Difficulty: message.Difficulty, + SelectedParentHash: message.SelectedParentHash, + TransactionIds: message.TransactionIDs, + IsHeaderOnly: message.IsHeaderOnly, + BlueScore: message.BlueScore, + ChildrenHashes: message.ChildrenHashes, + MergeSetBluesHashes: message.MergeSetBluesHashes, + MergeSetRedsHashes: message.MergeSetRedsHashes, + IsChainBlock: message.IsChainBlock, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_transaction.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_transaction.go new file mode 100644 index 0000000..597bf29 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_submit_transaction.go @@ -0,0 +1,341 @@ +package protowire + +import ( + "math" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_SubmitTransactionRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_SubmitTransactionRequest is nil") + } + return x.SubmitTransactionRequest.toAppMessage() +} + +func (x *SpectredMessage_SubmitTransactionRequest) fromAppMessage(message *appmessage.SubmitTransactionRequestMessage) error { + x.SubmitTransactionRequest = &SubmitTransactionRequestMessage{ + Transaction: &RpcTransaction{}, + AllowOrphan: message.AllowOrphan, + } + x.SubmitTransactionRequest.Transaction.fromAppMessage(message.Transaction) + return nil +} + +func (x *SubmitTransactionRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SubmitBlockRequestMessage is nil") + } + rpcTransaction, err := x.Transaction.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.SubmitTransactionRequestMessage{ + Transaction: rpcTransaction, + AllowOrphan: x.AllowOrphan, + }, nil +} + +func (x *SpectredMessage_SubmitTransactionResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_SubmitTransactionResponse is nil") + } + return x.SubmitTransactionResponse.toAppMessage() +} + +func (x *SpectredMessage_SubmitTransactionResponse) fromAppMessage(message *appmessage.SubmitTransactionResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.SubmitTransactionResponse = &SubmitTransactionResponseMessage{ + TransactionId: message.TransactionID, + Error: err, + } + return nil +} + +func (x *SubmitTransactionResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SubmitTransactionResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.SubmitTransactionResponseMessage{ + TransactionID: x.TransactionId, + Error: rpcErr, + }, nil +} + +func (x *RpcTransaction) toAppMessage() (*appmessage.RPCTransaction, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransaction is nil") + } + inputs := make([]*appmessage.RPCTransactionInput, len(x.Inputs)) + for i, input := range x.Inputs { + appInput, err := input.toAppMessage() + if err != nil { + return nil, err + } + inputs[i] = appInput + } + outputs := make([]*appmessage.RPCTransactionOutput, len(x.Outputs)) + for i, output := range x.Outputs { + appOutput, err := output.toAppMessage() + if err != nil { + return nil, err + } + outputs[i] = appOutput + } + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid RPC transaction version - bigger then uint16") + } + var verboseData *appmessage.RPCTransactionVerboseData + if x.VerboseData != nil { + appMessageVerboseData, err := x.VerboseData.toAppMessage() + if err != nil { + return nil, err + } + verboseData = appMessageVerboseData + } + return &appmessage.RPCTransaction{ + Version: uint16(x.Version), + Inputs: inputs, + Outputs: outputs, + LockTime: x.LockTime, + SubnetworkID: x.SubnetworkId, + Gas: x.Gas, + Payload: x.Payload, + VerboseData: verboseData, + }, nil +} + +func (x *RpcTransaction) fromAppMessage(transaction *appmessage.RPCTransaction) { + inputs := make([]*RpcTransactionInput, len(transaction.Inputs)) + for i, input := range transaction.Inputs { + inputs[i] = &RpcTransactionInput{} + inputs[i].fromAppMessage(input) + } + outputs := make([]*RpcTransactionOutput, len(transaction.Outputs)) + for i, output := range transaction.Outputs { + outputs[i] = &RpcTransactionOutput{} + outputs[i].fromAppMessage(output) + } + var verboseData *RpcTransactionVerboseData + if transaction.VerboseData != nil { + verboseData = &RpcTransactionVerboseData{} + verboseData.fromAppMessage(transaction.VerboseData) + } + *x = RpcTransaction{ + Version: uint32(transaction.Version), + Inputs: inputs, + Outputs: outputs, + LockTime: transaction.LockTime, + SubnetworkId: transaction.SubnetworkID, + Gas: transaction.Gas, + Payload: transaction.Payload, + VerboseData: verboseData, + } +} + +func (x *RpcTransactionInput) toAppMessage() (*appmessage.RPCTransactionInput, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransactionInput is nil") + } + if x.SigOpCount > math.MaxUint8 { + return nil, errors.New("TransactionInput SigOpCount > math.MaxUint8") + } + outpoint, err := x.PreviousOutpoint.toAppMessage() + if err != nil { + return nil, err + } + var verboseData *appmessage.RPCTransactionInputVerboseData + for x.VerboseData != nil { + appMessageVerboseData, err := x.VerboseData.toAppMessage() + if err != nil { + return nil, err + } + verboseData = appMessageVerboseData + } + return &appmessage.RPCTransactionInput{ + PreviousOutpoint: outpoint, + SignatureScript: x.SignatureScript, + Sequence: x.Sequence, + VerboseData: verboseData, + SigOpCount: byte(x.SigOpCount), + }, nil +} + +func (x *RpcTransactionInput) fromAppMessage(message *appmessage.RPCTransactionInput) { + previousOutpoint := &RpcOutpoint{} + previousOutpoint.fromAppMessage(message.PreviousOutpoint) + var verboseData *RpcTransactionInputVerboseData + if message.VerboseData != nil { + verboseData := &RpcTransactionInputVerboseData{} + verboseData.fromAppData(message.VerboseData) + } + *x = RpcTransactionInput{ + PreviousOutpoint: previousOutpoint, + SignatureScript: message.SignatureScript, + Sequence: message.Sequence, + VerboseData: verboseData, + SigOpCount: uint32(message.SigOpCount), + } +} + +func (x *RpcTransactionOutput) toAppMessage() (*appmessage.RPCTransactionOutput, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransactionOutput is nil") + } + scriptPublicKey, err := x.ScriptPublicKey.toAppMessage() + if err != nil { + return nil, err + } + var verboseData *appmessage.RPCTransactionOutputVerboseData + if x.VerboseData != nil { + appMessageVerboseData, err := x.VerboseData.toAppMessage() + if err != nil { + return nil, err + } + verboseData = appMessageVerboseData + } + return &appmessage.RPCTransactionOutput{ + Amount: x.Amount, + ScriptPublicKey: scriptPublicKey, + VerboseData: verboseData, + }, nil +} + +func (x *RpcTransactionOutput) fromAppMessage(message *appmessage.RPCTransactionOutput) { + scriptPublicKey := &RpcScriptPublicKey{} + scriptPublicKey.fromAppMessage(message.ScriptPublicKey) + var verboseData *RpcTransactionOutputVerboseData + if message.VerboseData != nil { + verboseData = &RpcTransactionOutputVerboseData{} + verboseData.fromAppMessage(message.VerboseData) + } + *x = RpcTransactionOutput{ + Amount: message.Amount, + ScriptPublicKey: scriptPublicKey, + VerboseData: verboseData, + } +} + +func (x *RpcOutpoint) toAppMessage() (*appmessage.RPCOutpoint, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcOutpoint is nil") + } + return &appmessage.RPCOutpoint{ + TransactionID: x.TransactionId, + Index: x.Index, + }, nil +} + +func (x *RpcOutpoint) fromAppMessage(message *appmessage.RPCOutpoint) { + *x = RpcOutpoint{ + TransactionId: message.TransactionID, + Index: message.Index, + } +} + +func (x *RpcUtxoEntry) toAppMessage() (*appmessage.RPCUTXOEntry, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcUtxoEntry is nil") + } + scriptPubKey, err := x.ScriptPublicKey.toAppMessage() + if err != nil { + return nil, err + } + return &appmessage.RPCUTXOEntry{ + Amount: x.Amount, + ScriptPublicKey: scriptPubKey, + BlockDAAScore: x.BlockDaaScore, + IsCoinbase: x.IsCoinbase, + }, nil +} + +func (x *RpcUtxoEntry) fromAppMessage(message *appmessage.RPCUTXOEntry) { + scriptPublicKey := &RpcScriptPublicKey{} + scriptPublicKey.fromAppMessage(message.ScriptPublicKey) + *x = RpcUtxoEntry{ + Amount: message.Amount, + ScriptPublicKey: scriptPublicKey, + BlockDaaScore: message.BlockDAAScore, + IsCoinbase: message.IsCoinbase, + } +} + +func (x *RpcScriptPublicKey) toAppMessage() (*appmessage.RPCScriptPublicKey, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcScriptPublicKey is nil") + } + if x.Version > math.MaxUint16 { + return nil, errors.Errorf("Invalid header version - bigger then uint16") + } + return &appmessage.RPCScriptPublicKey{ + Version: uint16(x.Version), + Script: x.ScriptPublicKey, + }, nil +} + +func (x *RpcScriptPublicKey) fromAppMessage(message *appmessage.RPCScriptPublicKey) { + *x = RpcScriptPublicKey{ + Version: uint32(message.Version), + ScriptPublicKey: message.Script, + } +} + +func (x *RpcTransactionVerboseData) toAppMessage() (*appmessage.RPCTransactionVerboseData, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransactionVerboseData is nil") + } + return &appmessage.RPCTransactionVerboseData{ + TransactionID: x.TransactionId, + Hash: x.Hash, + Mass: x.Mass, + BlockHash: x.BlockHash, + BlockTime: x.BlockTime, + }, nil +} + +func (x *RpcTransactionVerboseData) fromAppMessage(message *appmessage.RPCTransactionVerboseData) { + *x = RpcTransactionVerboseData{ + TransactionId: message.TransactionID, + Hash: message.Hash, + Mass: message.Mass, + BlockHash: message.BlockHash, + BlockTime: message.BlockTime, + } +} + +func (x *RpcTransactionInputVerboseData) toAppMessage() (*appmessage.RPCTransactionInputVerboseData, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransactionInputVerboseData is nil") + } + return &appmessage.RPCTransactionInputVerboseData{}, nil +} + +func (x *RpcTransactionInputVerboseData) fromAppData(message *appmessage.RPCTransactionInputVerboseData) { + *x = RpcTransactionInputVerboseData{} +} + +func (x *RpcTransactionOutputVerboseData) toAppMessage() (*appmessage.RPCTransactionOutputVerboseData, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "RpcTransactionOutputVerboseData is nil") + } + return &appmessage.RPCTransactionOutputVerboseData{ + ScriptPublicKeyType: x.ScriptPublicKeyType, + ScriptPublicKeyAddress: x.ScriptPublicKeyAddress, + }, nil +} + +func (x *RpcTransactionOutputVerboseData) fromAppMessage(message *appmessage.RPCTransactionOutputVerboseData) { + *x = RpcTransactionOutputVerboseData{ + ScriptPublicKeyType: message.ScriptPublicKeyType, + ScriptPublicKeyAddress: message.ScriptPublicKeyAddress, + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_unban.go b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_unban.go new file mode 100644 index 0000000..8f34a98 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/rpc_unban.go @@ -0,0 +1,59 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +func (x *SpectredMessage_UnbanRequest) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_UnbanRequest is nil") + } + return x.UnbanRequest.toAppMessage() +} + +func (x *SpectredMessage_UnbanRequest) fromAppMessage(message *appmessage.UnbanRequestMessage) error { + x.UnbanRequest = &UnbanRequestMessage{Ip: message.IP} + return nil +} + +func (x *UnbanRequestMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "UnbanRequestMessage is nil") + } + return &appmessage.UnbanRequestMessage{ + IP: x.Ip, + }, nil +} + +func (x *SpectredMessage_UnbanResponse) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage_UnbanResponse is nil") + } + return x.UnbanResponse.toAppMessage() +} + +func (x *SpectredMessage_UnbanResponse) fromAppMessage(message *appmessage.UnbanResponseMessage) error { + var err *RPCError + if message.Error != nil { + err = &RPCError{Message: message.Error.Message} + } + x.UnbanResponse = &UnbanResponseMessage{ + Error: err, + } + return nil +} + +func (x *UnbanResponseMessage) toAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "UnbanResponseMessage is nil") + } + rpcErr, err := x.Error.toAppMessage() + // Error is an optional field + if err != nil && !errors.Is(err, errorNil) { + return nil, err + } + return &appmessage.UnbanResponseMessage{ + Error: rpcErr, + }, nil +} diff --git a/infrastructure/network/netadapter/server/grpcserver/protowire/wire.go b/infrastructure/network/netadapter/server/grpcserver/protowire/wire.go new file mode 100644 index 0000000..23bed17 --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/protowire/wire.go @@ -0,0 +1,974 @@ +package protowire + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" +) + +type converter interface { + toAppMessage() (appmessage.Message, error) +} + +// ToAppMessage converts a SpectredMessage to its appmessage.Message representation +func (x *SpectredMessage) ToAppMessage() (appmessage.Message, error) { + if x == nil { + return nil, errors.Wrapf(errorNil, "SpectredMessage is nil") + } + converter, ok := x.Payload.(converter) + if !ok { + return nil, errors.Errorf("received invalid message") + } + appMessage, err := converter.toAppMessage() + if err != nil { + return nil, err + } + return appMessage, nil +} + +// FromAppMessage creates a SpectredMessage from a appmessage.Message +func FromAppMessage(message appmessage.Message) (*SpectredMessage, error) { + payload, err := toPayload(message) + if err != nil { + return nil, err + } + return &SpectredMessage{ + Payload: payload, + }, nil +} + +func toPayload(message appmessage.Message) (isSpectredMessage_Payload, error) { + p2pPayload, err := toP2PPayload(message) + if err != nil { + return nil, err + } + if p2pPayload != nil { + return p2pPayload, nil + } + + rpcPayload, err := toRPCPayload(message) + if err != nil { + return nil, err + } + if rpcPayload != nil { + return rpcPayload, nil + } + + return nil, errors.Errorf("unknown message type %T", message) +} + +func toP2PPayload(message appmessage.Message) (isSpectredMessage_Payload, error) { + switch message := message.(type) { + case *appmessage.MsgAddresses: + payload := new(SpectredMessage_Addresses) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgBlock: + payload := new(SpectredMessage_Block) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestBlockLocator: + payload := new(SpectredMessage_RequestBlockLocator) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgBlockLocator: + payload := new(SpectredMessage_BlockLocator) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestAddresses: + payload := new(SpectredMessage_RequestAddresses) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestIBDBlocks: + payload := new(SpectredMessage_RequestIBDBlocks) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestNextHeaders: + payload := new(SpectredMessage_RequestNextHeaders) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgDoneHeaders: + payload := new(SpectredMessage_DoneHeaders) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestRelayBlocks: + payload := new(SpectredMessage_RequestRelayBlocks) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestTransactions: + payload := new(SpectredMessage_RequestTransactions) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgTransactionNotFound: + payload := new(SpectredMessage_TransactionNotFound) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgInvRelayBlock: + payload := new(SpectredMessage_InvRelayBlock) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgInvTransaction: + payload := new(SpectredMessage_InvTransactions) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgPing: + payload := new(SpectredMessage_Ping) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgPong: + payload := new(SpectredMessage_Pong) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgTx: + payload := new(SpectredMessage_Transaction) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgVerAck: + payload := new(SpectredMessage_Verack) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgVersion: + payload := new(SpectredMessage_Version) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgReject: + payload := new(SpectredMessage_Reject) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestPruningPointUTXOSet: + payload := new(SpectredMessage_RequestPruningPointUTXOSet) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgPruningPointUTXOSetChunk: + payload := new(SpectredMessage_PruningPointUtxoSetChunk) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgUnexpectedPruningPoint: + payload := new(SpectredMessage_UnexpectedPruningPoint) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgIBDBlockLocator: + payload := new(SpectredMessage_IbdBlockLocator) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgIBDBlockLocatorHighestHash: + payload := new(SpectredMessage_IbdBlockLocatorHighestHash) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgIBDBlockLocatorHighestHashNotFound: + payload := new(SpectredMessage_IbdBlockLocatorHighestHashNotFound) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.BlockHeadersMessage: + payload := new(SpectredMessage_BlockHeaders) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestNextPruningPointUTXOSetChunk: + payload := new(SpectredMessage_RequestNextPruningPointUtxoSetChunk) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgDonePruningPointUTXOSetChunks: + payload := new(SpectredMessage_DonePruningPointUtxoSetChunks) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgBlockWithTrustedData: + payload := new(SpectredMessage_BlockWithTrustedData) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestPruningPointAndItsAnticone: + payload := new(SpectredMessage_RequestPruningPointAndItsAnticone) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgDoneBlocksWithTrustedData: + payload := new(SpectredMessage_DoneBlocksWithTrustedData) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgIBDBlock: + payload := new(SpectredMessage_IbdBlock) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestHeaders: + payload := new(SpectredMessage_RequestHeaders) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgPruningPoints: + payload := new(SpectredMessage_PruningPoints) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestPruningPointProof: + payload := new(SpectredMessage_RequestPruningPointProof) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgPruningPointProof: + payload := new(SpectredMessage_PruningPointProof) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgReady: + payload := new(SpectredMessage_Ready) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgTrustedData: + payload := new(SpectredMessage_TrustedData) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgBlockWithTrustedDataV4: + payload := new(SpectredMessage_BlockWithTrustedDataV4) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestNextPruningPointAndItsAnticoneBlocks: + payload := new(SpectredMessage_RequestNextPruningPointAndItsAnticoneBlocks) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestIBDChainBlockLocator: + payload := new(SpectredMessage_RequestIBDChainBlockLocator) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgIBDChainBlockLocator: + payload := new(SpectredMessage_IbdChainBlockLocator) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.MsgRequestAnticone: + payload := new(SpectredMessage_RequestAnticone) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + default: + return nil, nil + } +} + +func toRPCPayload(message appmessage.Message) (isSpectredMessage_Payload, error) { + switch message := message.(type) { + case *appmessage.GetCurrentNetworkRequestMessage: + payload := new(SpectredMessage_GetCurrentNetworkRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetCurrentNetworkResponseMessage: + payload := new(SpectredMessage_GetCurrentNetworkResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.SubmitBlockRequestMessage: + payload := new(SpectredMessage_SubmitBlockRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.SubmitBlockResponseMessage: + payload := new(SpectredMessage_SubmitBlockResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockTemplateRequestMessage: + payload := new(SpectredMessage_GetBlockTemplateRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockTemplateResponseMessage: + payload := new(SpectredMessage_GetBlockTemplateResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyBlockAddedRequestMessage: + payload := new(SpectredMessage_NotifyBlockAddedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyBlockAddedResponseMessage: + payload := new(SpectredMessage_NotifyBlockAddedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.BlockAddedNotificationMessage: + payload := new(SpectredMessage_BlockAddedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetPeerAddressesRequestMessage: + payload := new(SpectredMessage_GetPeerAddressesRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetPeerAddressesResponseMessage: + payload := new(SpectredMessage_GetPeerAddressesResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetSelectedTipHashRequestMessage: + payload := new(SpectredMessage_GetSelectedTipHashRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetSelectedTipHashResponseMessage: + payload := new(SpectredMessage_GetSelectedTipHashResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntryRequestMessage: + payload := new(SpectredMessage_GetMempoolEntryRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntryResponseMessage: + payload := new(SpectredMessage_GetMempoolEntryResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetConnectedPeerInfoRequestMessage: + payload := new(SpectredMessage_GetConnectedPeerInfoRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetConnectedPeerInfoResponseMessage: + payload := new(SpectredMessage_GetConnectedPeerInfoResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.AddPeerRequestMessage: + payload := new(SpectredMessage_AddPeerRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.AddPeerResponseMessage: + payload := new(SpectredMessage_AddPeerResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.SubmitTransactionRequestMessage: + payload := new(SpectredMessage_SubmitTransactionRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.SubmitTransactionResponseMessage: + payload := new(SpectredMessage_SubmitTransactionResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualSelectedParentChainChangedRequestMessage: + payload := new(SpectredMessage_NotifyVirtualSelectedParentChainChangedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualSelectedParentChainChangedResponseMessage: + payload := new(SpectredMessage_NotifyVirtualSelectedParentChainChangedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.VirtualSelectedParentChainChangedNotificationMessage: + payload := new(SpectredMessage_VirtualSelectedParentChainChangedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockRequestMessage: + payload := new(SpectredMessage_GetBlockRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockResponseMessage: + payload := new(SpectredMessage_GetBlockResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetSubnetworkRequestMessage: + payload := new(SpectredMessage_GetSubnetworkRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetSubnetworkResponseMessage: + payload := new(SpectredMessage_GetSubnetworkResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetVirtualSelectedParentChainFromBlockRequestMessage: + payload := new(SpectredMessage_GetVirtualSelectedParentChainFromBlockRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage: + payload := new(SpectredMessage_GetVirtualSelectedParentChainFromBlockResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlocksRequestMessage: + payload := new(SpectredMessage_GetBlocksRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlocksResponseMessage: + payload := new(SpectredMessage_GetBlocksResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockCountRequestMessage: + payload := new(SpectredMessage_GetBlockCountRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockCountResponseMessage: + payload := new(SpectredMessage_GetBlockCountResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockDAGInfoRequestMessage: + payload := new(SpectredMessage_GetBlockDagInfoRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBlockDAGInfoResponseMessage: + payload := new(SpectredMessage_GetBlockDagInfoResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.ResolveFinalityConflictRequestMessage: + payload := new(SpectredMessage_ResolveFinalityConflictRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.ResolveFinalityConflictResponseMessage: + payload := new(SpectredMessage_ResolveFinalityConflictResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyFinalityConflictsRequestMessage: + payload := new(SpectredMessage_NotifyFinalityConflictsRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyFinalityConflictsResponseMessage: + payload := new(SpectredMessage_NotifyFinalityConflictsResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.FinalityConflictNotificationMessage: + payload := new(SpectredMessage_FinalityConflictNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.FinalityConflictResolvedNotificationMessage: + payload := new(SpectredMessage_FinalityConflictResolvedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntriesRequestMessage: + payload := new(SpectredMessage_GetMempoolEntriesRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntriesResponseMessage: + payload := new(SpectredMessage_GetMempoolEntriesResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.ShutDownRequestMessage: + payload := new(SpectredMessage_ShutDownRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.ShutDownResponseMessage: + payload := new(SpectredMessage_ShutDownResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetHeadersRequestMessage: + payload := new(SpectredMessage_GetHeadersRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetHeadersResponseMessage: + payload := new(SpectredMessage_GetHeadersResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyUTXOsChangedRequestMessage: + payload := new(SpectredMessage_NotifyUtxosChangedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyUTXOsChangedResponseMessage: + payload := new(SpectredMessage_NotifyUtxosChangedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.UTXOsChangedNotificationMessage: + payload := new(SpectredMessage_UtxosChangedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.StopNotifyingUTXOsChangedRequestMessage: + payload := new(SpectredMessage_StopNotifyingUtxosChangedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.StopNotifyingUTXOsChangedResponseMessage: + payload := new(SpectredMessage_StopNotifyingUtxosChangedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetUTXOsByAddressesRequestMessage: + payload := new(SpectredMessage_GetUtxosByAddressesRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetUTXOsByAddressesResponseMessage: + payload := new(SpectredMessage_GetUtxosByAddressesResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBalanceByAddressRequestMessage: + payload := new(SpectredMessage_GetBalanceByAddressRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBalanceByAddressResponseMessage: + payload := new(SpectredMessage_GetBalanceByAddressResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetVirtualSelectedParentBlueScoreRequestMessage: + payload := new(SpectredMessage_GetVirtualSelectedParentBlueScoreRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetVirtualSelectedParentBlueScoreResponseMessage: + payload := new(SpectredMessage_GetVirtualSelectedParentBlueScoreResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualSelectedParentBlueScoreChangedRequestMessage: + payload := new(SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage: + payload := new(SpectredMessage_NotifyVirtualSelectedParentBlueScoreChangedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage: + payload := new(SpectredMessage_VirtualSelectedParentBlueScoreChangedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.BanRequestMessage: + payload := new(SpectredMessage_BanRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.BanResponseMessage: + payload := new(SpectredMessage_BanResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.UnbanRequestMessage: + payload := new(SpectredMessage_UnbanRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.UnbanResponseMessage: + payload := new(SpectredMessage_UnbanResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetInfoRequestMessage: + payload := new(SpectredMessage_GetInfoRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetInfoResponseMessage: + payload := new(SpectredMessage_GetInfoResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyPruningPointUTXOSetOverrideRequestMessage: + payload := new(SpectredMessage_NotifyPruningPointUTXOSetOverrideRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyPruningPointUTXOSetOverrideResponseMessage: + payload := new(SpectredMessage_NotifyPruningPointUTXOSetOverrideResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.PruningPointUTXOSetOverrideNotificationMessage: + payload := new(SpectredMessage_PruningPointUTXOSetOverrideNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.StopNotifyingPruningPointUTXOSetOverrideRequestMessage: + payload := new(SpectredMessage_StopNotifyingPruningPointUTXOSetOverrideRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.EstimateNetworkHashesPerSecondRequestMessage: + payload := new(SpectredMessage_EstimateNetworkHashesPerSecondRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.EstimateNetworkHashesPerSecondResponseMessage: + payload := new(SpectredMessage_EstimateNetworkHashesPerSecondResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualDaaScoreChangedRequestMessage: + payload := new(SpectredMessage_NotifyVirtualDaaScoreChangedRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyVirtualDaaScoreChangedResponseMessage: + payload := new(SpectredMessage_NotifyVirtualDaaScoreChangedResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.VirtualDaaScoreChangedNotificationMessage: + payload := new(SpectredMessage_VirtualDaaScoreChangedNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBalancesByAddressesRequestMessage: + payload := new(SpectredMessage_GetBalancesByAddressesRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetBalancesByAddressesResponseMessage: + payload := new(SpectredMessage_GetBalancesByAddressesResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyNewBlockTemplateRequestMessage: + payload := new(SpectredMessage_NotifyNewBlockTemplateRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NotifyNewBlockTemplateResponseMessage: + payload := new(SpectredMessage_NotifyNewBlockTemplateResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.NewBlockTemplateNotificationMessage: + payload := new(SpectredMessage_NewBlockTemplateNotification) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntriesByAddressesRequestMessage: + payload := new(SpectredMessage_GetMempoolEntriesByAddressesRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetMempoolEntriesByAddressesResponseMessage: + payload := new(SpectredMessage_GetMempoolEntriesByAddressesResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetCoinSupplyRequestMessage: + payload := new(SpectredMessage_GetCoinSupplyRequest) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + case *appmessage.GetCoinSupplyResponseMessage: + payload := new(SpectredMessage_GetCoinSupplyResponse) + err := payload.fromAppMessage(message) + if err != nil { + return nil, err + } + return payload, nil + default: + return nil, nil + } +} diff --git a/infrastructure/network/netadapter/server/grpcserver/rpcserver.go b/infrastructure/network/netadapter/server/grpcserver/rpcserver.go new file mode 100644 index 0000000..f97cb3d --- /dev/null +++ b/infrastructure/network/netadapter/server/grpcserver/rpcserver.go @@ -0,0 +1,29 @@ +package grpcserver + +import ( + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + "github.com/spectre-project/spectred/util/panics" +) + +type rpcServer struct { + protowire.UnimplementedRPCServer + gRPCServer +} + +// RPCMaxMessageSize is the max message size for the RPC server to send and receive +const RPCMaxMessageSize = 1024 * 1024 * 1024 // 1 GB + +// NewRPCServer creates a new RPCServer +func NewRPCServer(listeningAddresses []string, rpcMaxInboundConnections int) (server.Server, error) { + gRPCServer := newGRPCServer(listeningAddresses, RPCMaxMessageSize, rpcMaxInboundConnections, "RPC") + rpcServer := &rpcServer{gRPCServer: *gRPCServer} + protowire.RegisterRPCServer(gRPCServer.server, rpcServer) + return rpcServer, nil +} + +func (r *rpcServer) MessageStream(stream protowire.RPC_MessageStreamServer) error { + defer panics.HandlePanic(log, "rpcServer.MessageStream", nil) + + return r.handleInboundConnection(stream.Context(), stream) +} diff --git a/infrastructure/network/netadapter/server/server.go b/infrastructure/network/netadapter/server/server.go new file mode 100644 index 0000000..1e5d837 --- /dev/null +++ b/infrastructure/network/netadapter/server/server.go @@ -0,0 +1,46 @@ +package server + +import ( + "fmt" + "net" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// OnConnectedHandler is a function that is to be called +// once a new Connection is successfully established. +type OnConnectedHandler func(connection Connection) error + +// OnDisconnectedHandler is a function that is to be +// called once a Connection has been disconnected. +type OnDisconnectedHandler func() + +// OnInvalidMessageHandler is a function that is to be called when +// an invalid message (cannot be parsed/doesn't have a route) +// was received from a connection. +type OnInvalidMessageHandler func(err error) + +// Server represents a server. +type Server interface { + Start() error + Stop() error + SetOnConnectedHandler(onConnectedHandler OnConnectedHandler) +} + +// P2PServer represents a p2p server. +type P2PServer interface { + Server + Connect(address string) (Connection, error) +} + +// Connection represents a server connection. +type Connection interface { + fmt.Stringer + Start(router *router.Router) + Disconnect() + IsConnected() bool + IsOutbound() bool + SetOnDisconnectedHandler(onDisconnectedHandler OnDisconnectedHandler) + SetOnInvalidMessageHandler(onInvalidMessageHandler OnInvalidMessageHandler) + Address() *net.TCPAddr +} diff --git a/infrastructure/network/netadapter/standalone/log.go b/infrastructure/network/netadapter/standalone/log.go new file mode 100644 index 0000000..b552809 --- /dev/null +++ b/infrastructure/network/netadapter/standalone/log.go @@ -0,0 +1,9 @@ +package standalone + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("NTAR") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/netadapter/standalone/minimal_net_adapter.go b/infrastructure/network/netadapter/standalone/minimal_net_adapter.go new file mode 100644 index 0000000..617f916 --- /dev/null +++ b/infrastructure/network/netadapter/standalone/minimal_net_adapter.go @@ -0,0 +1,235 @@ +package standalone + +import ( + "sync" + + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/util/mstime" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter/id" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + + "github.com/pkg/errors" +) + +// MinimalNetAdapter allows tests and other tools to use a simple network adapter without implementing +// all the required supporting structures. +type MinimalNetAdapter struct { + cfg *config.Config + lock sync.Mutex + netAdapter *netadapter.NetAdapter + routesChan <-chan *Routes +} + +// NewMinimalNetAdapter creates a new instance of a MinimalNetAdapter +func NewMinimalNetAdapter(cfg *config.Config) (*MinimalNetAdapter, error) { + netAdapter, err := netadapter.NewNetAdapter(cfg) + if err != nil { + return nil, errors.Wrap(err, "Error starting netAdapter") + } + + routerInitializer, routesChan := generateRouteInitializer() + + netAdapter.SetP2PRouterInitializer(routerInitializer) + netAdapter.SetRPCRouterInitializer(func(_ *router.Router, _ *netadapter.NetConnection) { + }) + + err = netAdapter.Start() + if err != nil { + return nil, errors.Wrap(err, "Error starting netAdapter") + } + + return &MinimalNetAdapter{ + cfg: cfg, + lock: sync.Mutex{}, + netAdapter: netAdapter, + routesChan: routesChan, + }, nil +} + +// Connect opens a connection to the given address, handles handshake, and returns the routes for this connection +// To simplify usage the return type contains only two routes: +// OutgoingRoute - for all outgoing messages +// IncomingRoute - for all incoming messages (excluding handshake messages) +func (mna *MinimalNetAdapter) Connect(address string) (*Routes, error) { + mna.lock.Lock() + defer mna.lock.Unlock() + + err := mna.netAdapter.P2PConnect(address) + if err != nil { + return nil, err + } + + routes := <-mna.routesChan + err = mna.handleHandshake(routes, mna.netAdapter.ID()) + if err != nil { + return nil, errors.Wrap(err, "Error in handshake") + } + + spawn("netAdapterMock-handlePingPong", func() { + err := mna.handlePingPong(routes) + if err != nil { + panic(errors.Wrap(err, "Error from ping-pong")) + } + }) + + return routes, nil +} + +// handlePingPong makes sure that we are not disconnected due to not responding to pings. +// However, it only responds to pings, not sending its own, to conform to the minimal-ness +// of MinimalNetAdapter +func (*MinimalNetAdapter) handlePingPong(routes *Routes) error { + for { + message, err := routes.pingRoute.Dequeue() + if err != nil { + if errors.Is(err, router.ErrRouteClosed) { + return nil + } + return err + } + + pingMessage := message.(*appmessage.MsgPing) + + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgPong{Nonce: pingMessage.Nonce}) + if err != nil { + return err + } + } +} + +func (mna *MinimalNetAdapter) handleHandshake(routes *Routes, ourID *id.ID) error { + msg, err := routes.handshakeRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + versionMessage, ok := msg.(*appmessage.MsgVersion) + if !ok { + return errors.Errorf("expected first message to be of type %s, but got %s", appmessage.CmdVersion, msg.Command()) + } + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgVersion{ + ProtocolVersion: versionMessage.ProtocolVersion, + Network: mna.cfg.ActiveNetParams.Name, + Services: versionMessage.Services, + Timestamp: mstime.Now(), + Address: nil, + ID: ourID, + UserAgent: "/net-adapter-mock/", + DisableRelayTx: true, + SubnetworkID: nil, + }) + if err != nil { + return err + } + + msg, err = routes.handshakeRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + _, ok = msg.(*appmessage.MsgVerAck) + if !ok { + return errors.Errorf("expected second message to be of type %s, but got %s", appmessage.CmdVerAck, msg.Command()) + } + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgVerAck{}) + if err != nil { + return err + } + + msg, err = routes.addressesRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + _, ok = msg.(*appmessage.MsgRequestAddresses) + if !ok { + return errors.Errorf("expected third message to be of type %s, but got %s", appmessage.CmdRequestAddresses, msg.Command()) + } + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgAddresses{ + AddressList: []*appmessage.NetAddress{}, + }) + if err != nil { + return err + } + + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgRequestAddresses{ + IncludeAllSubnetworks: true, + SubnetworkID: nil, + }) + if err != nil { + return err + } + msg, err = routes.addressesRoute.DequeueWithTimeout(common.DefaultTimeout) + if err != nil { + return err + } + _, ok = msg.(*appmessage.MsgAddresses) + if !ok { + return errors.Errorf("expected fourth message to be of type %s, but got %s", appmessage.CmdAddresses, msg.Command()) + } + + return nil +} + +func generateRouteInitializer() (netadapter.RouterInitializer, <-chan *Routes) { + cmdsWithBuiltInRoutes := []appmessage.MessageCommand{ + appmessage.CmdVersion, + appmessage.CmdVerAck, + appmessage.CmdRequestAddresses, + appmessage.CmdAddresses, + appmessage.CmdPing} + + everythingElse := make([]appmessage.MessageCommand, 0, len(appmessage.ProtocolMessageCommandToString)-len(cmdsWithBuiltInRoutes)) +outerLoop: + for command := range appmessage.ProtocolMessageCommandToString { + for _, cmdWithBuiltInRoute := range cmdsWithBuiltInRoutes { + if command == cmdWithBuiltInRoute { + continue outerLoop + } + } + + everythingElse = append(everythingElse, command) + } + + routesChan := make(chan *Routes) + + routeInitializer := func(router *router.Router, netConnection *netadapter.NetConnection) { + handshakeRoute, err := router.AddIncomingRoute("handshake", []appmessage.MessageCommand{appmessage.CmdVersion, appmessage.CmdVerAck}) + if err != nil { + panic(errors.Wrap(err, "error registering handshake route")) + } + addressesRoute, err := router.AddIncomingRoute("addresses", []appmessage.MessageCommand{appmessage.CmdRequestAddresses, appmessage.CmdAddresses}) + if err != nil { + panic(errors.Wrap(err, "error registering addresses route")) + } + pingRoute, err := router.AddIncomingRoute("ping", []appmessage.MessageCommand{appmessage.CmdPing}) + if err != nil { + panic(errors.Wrap(err, "error registering ping route")) + } + everythingElseRoute, err := router.AddIncomingRoute("everything else", everythingElse) + if err != nil { + panic(errors.Wrap(err, "error registering everythingElseRoute")) + } + + err = router.OutgoingRoute().Enqueue(appmessage.NewMsgReady()) + if err != nil { + panic(errors.Wrap(err, "error sending ready message")) + } + + spawn("netAdapterMock-routeInitializer-sendRoutesToChan", func() { + routesChan <- &Routes{ + netConnection: netConnection, + OutgoingRoute: router.OutgoingRoute(), + IncomingRoute: everythingElseRoute, + handshakeRoute: handshakeRoute, + addressesRoute: addressesRoute, + pingRoute: pingRoute, + } + }) + } + + return routeInitializer, routesChan +} diff --git a/infrastructure/network/netadapter/standalone/routes.go b/infrastructure/network/netadapter/standalone/routes.go new file mode 100644 index 0000000..1beeb5d --- /dev/null +++ b/infrastructure/network/netadapter/standalone/routes.go @@ -0,0 +1,69 @@ +package standalone + +import ( + "time" + + "github.com/spectre-project/spectred/infrastructure/network/netadapter" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// Routes holds the incoming and outgoing routes of a connection created by MinimalNetAdapter +type Routes struct { + netConnection *netadapter.NetConnection + IncomingRoute, OutgoingRoute *router.Route + handshakeRoute *router.Route + addressesRoute *router.Route + pingRoute *router.Route +} + +// WaitForMessageOfType waits for a message of requested type up to `timeout`, skipping all messages of any other type +// received while waiting +func (r *Routes) WaitForMessageOfType(command appmessage.MessageCommand, timeout time.Duration) (appmessage.Message, error) { + timeoutTime := time.Now().Add(timeout) + for { + route := r.chooseRouteForCommand(command) + message, err := route.DequeueWithTimeout(timeoutTime.Sub(time.Now())) + if err != nil { + return nil, errors.Wrapf(err, "error waiting for message of type %s", command) + } + if message.Command() == command { + return message, nil + } + } +} + +func (r *Routes) chooseRouteForCommand(command appmessage.MessageCommand) *router.Route { + switch command { + case appmessage.CmdVersion, appmessage.CmdVerAck: + return r.handshakeRoute + case appmessage.CmdRequestAddresses, appmessage.CmdAddresses: + return r.addressesRoute + case appmessage.CmdPing: + return r.pingRoute + default: + return r.IncomingRoute + } +} + +// WaitForDisconnect waits for a disconnect up to `timeout`, skipping all messages received while waiting +func (r *Routes) WaitForDisconnect(timeout time.Duration) error { + timeoutTime := time.Now().Add(timeout) + for { + _, err := r.IncomingRoute.DequeueWithTimeout(timeoutTime.Sub(time.Now())) + if errors.Is(err, router.ErrRouteClosed) { + return nil + } + if err != nil { + return errors.Wrap(err, "error waiting for disconnect") + } + } +} + +// Disconnect closes the connection behind the routes, thus closing all routes +func (r *Routes) Disconnect() { + r.netConnection.Disconnect() +} diff --git a/infrastructure/network/rpcclient/grpcclient/grpcclient.go b/infrastructure/network/rpcclient/grpcclient/grpcclient.go new file mode 100644 index 0000000..7b24f8b --- /dev/null +++ b/infrastructure/network/rpcclient/grpcclient/grpcclient.go @@ -0,0 +1,173 @@ +package grpcclient + +import ( + "context" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" +) + +// OnErrorHandler defines a handler function for when errors occur +type OnErrorHandler func(err error) + +// OnDisconnectedHandler defines a handler function for when the client disconnected +type OnDisconnectedHandler func() + +// GRPCClient is a gRPC-based RPC client +type GRPCClient struct { + stream protowire.RPC_MessageStreamClient + connection *grpc.ClientConn + onErrorHandler OnErrorHandler + onDisconnectedHandler OnDisconnectedHandler + isError uint32 + streamLock sync.RWMutex +} + +// Connect connects to the RPC server with the given address +func Connect(address string) (*GRPCClient, error) { + const dialTimeout = 5 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), dialTimeout) + defer cancel() + + gRPCConnection, err := grpc.DialContext(ctx, address, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + return nil, errors.Wrapf(err, "error connecting to %s", address) + } + + grpcClient := protowire.NewRPCClient(gRPCConnection) + stream, err := grpcClient.MessageStream(context.Background(), grpc.UseCompressor(gzip.Name), + grpc.MaxCallRecvMsgSize(grpcserver.RPCMaxMessageSize), grpc.MaxCallSendMsgSize(grpcserver.RPCMaxMessageSize)) + if err != nil { + return nil, errors.Wrapf(err, "error getting client stream for %s", address) + } + return &GRPCClient{stream: stream, connection: gRPCConnection}, nil +} + +// Close closes the underlying grpc connection +func (c *GRPCClient) Close() error { + return c.connection.Close() +} + +// Disconnect disconnects from the RPC server +func (c *GRPCClient) Disconnect() error { + + // Workaround for the current gRPC bi-directional router loop and error handler + if atomic.LoadUint32(&c.isError) == 1 { + c.streamLock.Lock() + defer c.streamLock.Unlock() + } + return c.stream.CloseSend() +} + +// SetOnErrorHandler sets the client's onErrorHandler +func (c *GRPCClient) SetOnErrorHandler(onErrorHandler OnErrorHandler) { + c.onErrorHandler = onErrorHandler +} + +// SetOnDisconnectedHandler sets the client's onDisconnectedHandler +func (c *GRPCClient) SetOnDisconnectedHandler(onDisconnectedHandler OnDisconnectedHandler) { + c.onDisconnectedHandler = onDisconnectedHandler +} + +// AttachRouter attaches the given router to the client and starts +// sending/receiving messages via it +func (c *GRPCClient) AttachRouter(router *router.Router) { + spawn("GRPCClient.AttachRouter-sendLoop", func() { + for { + message, err := router.OutgoingRoute().Dequeue() + if err != nil { + c.handleError(err) + return + } + err = c.send(message) + if err != nil { + c.handleError(err) + return + } + } + }) + spawn("GRPCClient.AttachRouter-receiveLoop", func() { + for { + message, err := c.receive() + if err != nil { + c.handleError(err) + return + } + err = router.EnqueueIncomingMessage(message) + if err != nil { + c.handleError(err) + return + } + } + }) +} + +func (c *GRPCClient) mutexSend(request *protowire.SpectredMessage) error { + c.streamLock.RLock() + defer c.streamLock.RUnlock() + + return c.stream.Send(request) +} + +func (c *GRPCClient) send(requestAppMessage appmessage.Message) error { + request, err := protowire.FromAppMessage(requestAppMessage) + if err != nil { + return errors.Wrapf(err, "error converting the request") + } + return c.mutexSend(request) +} + +func (c *GRPCClient) mutexReceive() (*protowire.SpectredMessage, error) { + c.streamLock.RLock() + defer c.streamLock.RUnlock() + + return c.stream.Recv() +} + +func (c *GRPCClient) receive() (appmessage.Message, error) { + response, err := c.mutexReceive() + if err != nil { + return nil, err + } + return response.ToAppMessage() +} + +func (c *GRPCClient) mutexHandleError(err error) { + + // lock error handler + c.streamLock.RLock() + defer c.streamLock.RUnlock() + + atomic.StoreUint32(&c.isError, 1) + c.onErrorHandler(err) +} + +func (c *GRPCClient) handleError(err error) { + if errors.Is(err, io.EOF) { + if c.onDisconnectedHandler != nil { + c.onDisconnectedHandler() + } + return + } + if errors.Is(err, router.ErrRouteClosed) { + err := c.Disconnect() + if err != nil { + panic(err) + } + return + } + if c.onErrorHandler != nil { + c.mutexHandleError(err) + return + } + panic(err) +} diff --git a/infrastructure/network/rpcclient/grpcclient/log.go b/infrastructure/network/rpcclient/grpcclient/log.go new file mode 100644 index 0000000..f518965 --- /dev/null +++ b/infrastructure/network/rpcclient/grpcclient/log.go @@ -0,0 +1,9 @@ +package grpcclient + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("RPCC") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/rpcclient/grpcclient/post.go b/infrastructure/network/rpcclient/grpcclient/post.go new file mode 100644 index 0000000..6d4627c --- /dev/null +++ b/infrastructure/network/rpcclient/grpcclient/post.go @@ -0,0 +1,64 @@ +package grpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/server/grpcserver/protowire" + "google.golang.org/protobuf/encoding/protojson" +) + +// PostJSON is a helper function that converts the given requestJSON +// to protobuf, sends it to the RPC server, accepts the first response +// that arrives back, and returns the response as JSON +func (c *GRPCClient) PostJSON(requestJSON string) (string, error) { + requestBytes := []byte(requestJSON) + var parsedRequest protowire.SpectredMessage + err := protojson.Unmarshal(requestBytes, &parsedRequest) + if err != nil { + return "", errors.Wrapf(err, "error parsing the request") + } + response, err := c.Post(&parsedRequest) + if err != nil { + return "", err + } + responseBytes, err := protojson.MarshalOptions{EmitUnpopulated: true}.Marshal(response) + if err != nil { + return "", errors.Wrapf(err, "error parsing the response from the RPC server") + } + return string(responseBytes), nil +} + +// PostAppMessage is a helper function that converts the given +// requestAppMessage to protobuf, sends it to the RPC server, +// accepts the first response that arrives back, and returns the +// response as an appMessage +func (c *GRPCClient) PostAppMessage(requestAppMessage appmessage.Message) (appmessage.Message, error) { + request, err := protowire.FromAppMessage(requestAppMessage) + if err != nil { + return nil, errors.Wrapf(err, "error converting the request") + } + response, err := c.Post(request) + if err != nil { + return nil, err + } + responseAppMessage, err := response.ToAppMessage() + if err != nil { + return nil, errors.Wrapf(err, "error converting the response") + } + return responseAppMessage, nil +} + +// Post is a helper function that sends the given request to the +// RPC server, accepts the first response that arrives back, and +// returns the response +func (c *GRPCClient) Post(request *protowire.SpectredMessage) (*protowire.SpectredMessage, error) { + err := c.stream.Send(request) + if err != nil { + return nil, errors.Wrapf(err, "error sending the request to the RPC server") + } + response, err := c.stream.Recv() + if err != nil { + return nil, errors.Wrapf(err, "error receiving the response from the RPC server") + } + return response, nil +} diff --git a/infrastructure/network/rpcclient/log.go b/infrastructure/network/rpcclient/log.go new file mode 100644 index 0000000..1c7e99a --- /dev/null +++ b/infrastructure/network/rpcclient/log.go @@ -0,0 +1,9 @@ +package rpcclient + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("RPCC") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/network/rpcclient/rpc_ban.go b/infrastructure/network/rpcclient/rpc_ban.go new file mode 100644 index 0000000..d72ed93 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_ban.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// Ban sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) Ban(ip string) (*appmessage.BanResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewBanRequestMessage(ip)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdBanRequestMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + banResponse := response.(*appmessage.BanResponseMessage) + if banResponse.Error != nil { + return nil, c.convertRPCError(banResponse.Error) + } + return banResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_connect_to_peer.go b/infrastructure/network/rpcclient/rpc_connect_to_peer.go new file mode 100644 index 0000000..223050a --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_connect_to_peer.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// AddPeer sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) AddPeer(address string, isPermanent bool) error { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewAddPeerRequestMessage(address, isPermanent)) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdAddPeerResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + getMempoolEntryResponse := response.(*appmessage.AddPeerResponseMessage) + if getMempoolEntryResponse.Error != nil { + return c.convertRPCError(getMempoolEntryResponse.Error) + } + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_estimate_network_hashes_per_second.go b/infrastructure/network/rpcclient/rpc_estimate_network_hashes_per_second.go new file mode 100644 index 0000000..ed2b324 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_estimate_network_hashes_per_second.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// EstimateNetworkHashesPerSecond sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) EstimateNetworkHashesPerSecond(startHash string, windowSize uint32) (*appmessage.EstimateNetworkHashesPerSecondResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewEstimateNetworkHashesPerSecondRequestMessage(startHash, windowSize)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdEstimateNetworkHashesPerSecondResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + estimateNetworkHashesPerSecondResponse := response.(*appmessage.EstimateNetworkHashesPerSecondResponseMessage) + if estimateNetworkHashesPerSecondResponse.Error != nil { + return nil, c.convertRPCError(estimateNetworkHashesPerSecondResponse.Error) + } + return estimateNetworkHashesPerSecondResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_balance_by_address.go b/infrastructure/network/rpcclient/rpc_get_balance_by_address.go new file mode 100644 index 0000000..b1d77f5 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_balance_by_address.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBalanceByAddress sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBalanceByAddress(address string) (*appmessage.GetBalanceByAddressResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetBalanceByAddressRequest(address)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBalanceByAddressResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getBalanceByAddressResponse := response.(*appmessage.GetBalanceByAddressResponseMessage) + if getBalanceByAddressResponse.Error != nil { + return nil, c.convertRPCError(getBalanceByAddressResponse.Error) + } + return getBalanceByAddressResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_balances_by_addresses.go b/infrastructure/network/rpcclient/rpc_get_balances_by_addresses.go new file mode 100644 index 0000000..fd35325 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_balances_by_addresses.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBalancesByAddresses sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBalancesByAddresses(addresses []string) (*appmessage.GetBalancesByAddressesResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetBalancesByAddressesRequest(addresses)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBalancesByAddressesResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getBalancesByAddressesResponse := response.(*appmessage.GetBalancesByAddressesResponseMessage) + if getBalancesByAddressesResponse.Error != nil { + return nil, c.convertRPCError(getBalancesByAddressesResponse.Error) + } + return getBalancesByAddressesResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_block.go b/infrastructure/network/rpcclient/rpc_get_block.go new file mode 100644 index 0000000..025031c --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_block.go @@ -0,0 +1,23 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBlock sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBlock(hash string, includeTransactions bool) ( + *appmessage.GetBlockResponseMessage, error) { + + err := c.rpcRouter.outgoingRoute().Enqueue( + appmessage.NewGetBlockRequestMessage(hash, includeTransactions)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBlockResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + GetBlockResponse := response.(*appmessage.GetBlockResponseMessage) + if GetBlockResponse.Error != nil { + return nil, c.convertRPCError(GetBlockResponse.Error) + } + return GetBlockResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_block_count.go b/infrastructure/network/rpcclient/rpc_get_block_count.go new file mode 100644 index 0000000..271ef7e --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_block_count.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBlockCount sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBlockCount() (*appmessage.GetBlockCountResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetBlockCountRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBlockCountResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getBlockCountResponse := response.(*appmessage.GetBlockCountResponseMessage) + if getBlockCountResponse.Error != nil { + return nil, c.convertRPCError(getBlockCountResponse.Error) + } + return getBlockCountResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_block_dag_info.go b/infrastructure/network/rpcclient/rpc_get_block_dag_info.go new file mode 100644 index 0000000..b03fa9c --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_block_dag_info.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBlockDAGInfo sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBlockDAGInfo() (*appmessage.GetBlockDAGInfoResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetBlockDAGInfoRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBlockDAGInfoResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + GetBlockDAGInfoResponse := response.(*appmessage.GetBlockDAGInfoResponseMessage) + if GetBlockDAGInfoResponse.Error != nil { + return nil, c.convertRPCError(GetBlockDAGInfoResponse.Error) + } + return GetBlockDAGInfoResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_block_template.go b/infrastructure/network/rpcclient/rpc_get_block_template.go new file mode 100644 index 0000000..2c7b435 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_block_template.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBlockTemplate sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBlockTemplate(miningAddress, extraData string) (*appmessage.GetBlockTemplateResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetBlockTemplateRequestMessage(miningAddress, extraData)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBlockTemplateResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getBlockTemplateResponse := response.(*appmessage.GetBlockTemplateResponseMessage) + if getBlockTemplateResponse.Error != nil { + return nil, c.convertRPCError(getBlockTemplateResponse.Error) + } + return getBlockTemplateResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_blocks.go b/infrastructure/network/rpcclient/rpc_get_blocks.go new file mode 100644 index 0000000..e0896ef --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_blocks.go @@ -0,0 +1,23 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetBlocks sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetBlocks(lowHash string, includeBlocks bool, + includeTransactions bool) (*appmessage.GetBlocksResponseMessage, error) { + + err := c.rpcRouter.outgoingRoute().Enqueue( + appmessage.NewGetBlocksRequestMessage(lowHash, includeBlocks, includeTransactions)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetBlocksResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + GetBlocksResponse := response.(*appmessage.GetBlocksResponseMessage) + if GetBlocksResponse.Error != nil { + return nil, c.convertRPCError(GetBlocksResponse.Error) + } + return GetBlocksResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_chain_from_block.go b/infrastructure/network/rpcclient/rpc_get_chain_from_block.go new file mode 100644 index 0000000..9240801 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_chain_from_block.go @@ -0,0 +1,22 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetVirtualSelectedParentChainFromBlock sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetVirtualSelectedParentChainFromBlock(startHash string, includeAcceptedTransactionIDs bool) ( + *appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue( + appmessage.NewGetVirtualSelectedParentChainFromBlockRequestMessage(startHash, includeAcceptedTransactionIDs)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetVirtualSelectedParentChainFromBlockResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + GetVirtualSelectedParentChainFromBlockResponse := response.(*appmessage.GetVirtualSelectedParentChainFromBlockResponseMessage) + if GetVirtualSelectedParentChainFromBlockResponse.Error != nil { + return nil, c.convertRPCError(GetVirtualSelectedParentChainFromBlockResponse.Error) + } + return GetVirtualSelectedParentChainFromBlockResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_coin_supply.go b/infrastructure/network/rpcclient/rpc_get_coin_supply.go new file mode 100644 index 0000000..ab3fbc7 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_coin_supply.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetCoinSupply sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetCoinSupply() (*appmessage.GetCoinSupplyResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetCoinSupplyRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetCoinSupplyResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + geCoinSupplyResponse := response.(*appmessage.GetCoinSupplyResponseMessage) + if geCoinSupplyResponse.Error != nil { + return nil, c.convertRPCError(geCoinSupplyResponse.Error) + } + return geCoinSupplyResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_connected_peer_info.go b/infrastructure/network/rpcclient/rpc_get_connected_peer_info.go new file mode 100644 index 0000000..c406ba8 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_connected_peer_info.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetConnectedPeerInfo sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetConnectedPeerInfo() (*appmessage.GetConnectedPeerInfoResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetConnectedPeerInfoRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetConnectedPeerInfoResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getConnectedPeerInfoResponse := response.(*appmessage.GetConnectedPeerInfoResponseMessage) + if getConnectedPeerInfoResponse.Error != nil { + return nil, c.convertRPCError(getConnectedPeerInfoResponse.Error) + } + return getConnectedPeerInfoResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_headers.go b/infrastructure/network/rpcclient/rpc_get_headers.go new file mode 100644 index 0000000..f97d603 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_headers.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetHeaders sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetHeaders(startHash string, limit uint64, isAscending bool) (*appmessage.GetHeadersResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetHeadersRequestMessage(startHash, limit, isAscending)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetHeadersResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getHeadersResponse := response.(*appmessage.GetHeadersResponseMessage) + if getHeadersResponse.Error != nil { + return nil, c.convertRPCError(getHeadersResponse.Error) + } + return getHeadersResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_info.go b/infrastructure/network/rpcclient/rpc_get_info.go new file mode 100644 index 0000000..2339fdc --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_info.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetInfo sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetInfo() (*appmessage.GetInfoResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetInfoRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetInfoResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getInfoResponse := response.(*appmessage.GetInfoResponseMessage) + if getInfoResponse.Error != nil { + return nil, c.convertRPCError(getInfoResponse.Error) + } + return getInfoResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_mempool_entries.go b/infrastructure/network/rpcclient/rpc_get_mempool_entries.go new file mode 100644 index 0000000..34948bb --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_mempool_entries.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetMempoolEntries sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetMempoolEntries(includeOrphanPool bool, filterTransactionPool bool) (*appmessage.GetMempoolEntriesResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetMempoolEntriesRequestMessage(includeOrphanPool, filterTransactionPool)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetMempoolEntriesResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getMempoolEntriesResponse := response.(*appmessage.GetMempoolEntriesResponseMessage) + if getMempoolEntriesResponse.Error != nil { + return nil, c.convertRPCError(getMempoolEntriesResponse.Error) + } + return getMempoolEntriesResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_mempool_entries_by_address.go b/infrastructure/network/rpcclient/rpc_get_mempool_entries_by_address.go new file mode 100644 index 0000000..c10feb0 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_mempool_entries_by_address.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetMempoolEntriesByAddresses sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetMempoolEntriesByAddresses(addresses []string, includeOrphanPool bool, filterTransactionPool bool) (*appmessage.GetMempoolEntriesByAddressesResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetMempoolEntriesByAddressesRequestMessage(addresses, includeOrphanPool, filterTransactionPool)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetMempoolEntriesByAddressesResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getMempoolEntriesByAddressesResponse := response.(*appmessage.GetMempoolEntriesByAddressesResponseMessage) + if getMempoolEntriesByAddressesResponse.Error != nil { + return nil, c.convertRPCError(getMempoolEntriesByAddressesResponse.Error) + } + return getMempoolEntriesByAddressesResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_mempool_entry.go b/infrastructure/network/rpcclient/rpc_get_mempool_entry.go new file mode 100644 index 0000000..a119b7c --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_mempool_entry.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetMempoolEntry sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetMempoolEntry(txID string, includeOrphanPool bool, filterTransactionPool bool) (*appmessage.GetMempoolEntryResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetMempoolEntryRequestMessage(txID, includeOrphanPool, filterTransactionPool)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetMempoolEntryResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getMempoolEntryResponse := response.(*appmessage.GetMempoolEntryResponseMessage) + if getMempoolEntryResponse.Error != nil { + return nil, c.convertRPCError(getMempoolEntryResponse.Error) + } + return getMempoolEntryResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_peer_addresses.go b/infrastructure/network/rpcclient/rpc_get_peer_addresses.go new file mode 100644 index 0000000..2834928 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_peer_addresses.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetPeerAddresses sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetPeerAddresses() (*appmessage.GetPeerAddressesResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetPeerAddressesRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetPeerAddressesResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getPeerAddressesResponse := response.(*appmessage.GetPeerAddressesResponseMessage) + if getPeerAddressesResponse.Error != nil { + return nil, c.convertRPCError(getPeerAddressesResponse.Error) + } + return getPeerAddressesResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_selected_tip_hash.go b/infrastructure/network/rpcclient/rpc_get_selected_tip_hash.go new file mode 100644 index 0000000..451f255 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_selected_tip_hash.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetSelectedTipHash sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetSelectedTipHash() (*appmessage.GetSelectedTipHashResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetSelectedTipHashRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetSelectedTipHashResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getSelectedTipHashResponse := response.(*appmessage.GetSelectedTipHashResponseMessage) + if getSelectedTipHashResponse.Error != nil { + return nil, c.convertRPCError(getSelectedTipHashResponse.Error) + } + return getSelectedTipHashResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_subnetwork.go b/infrastructure/network/rpcclient/rpc_get_subnetwork.go new file mode 100644 index 0000000..2a4f9d2 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_subnetwork.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetSubnetwork sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetSubnetwork(subnetworkID string) (*appmessage.GetSubnetworkResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetSubnetworkRequestMessage(subnetworkID)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetSubnetworkResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getSubnetworkResponse := response.(*appmessage.GetSubnetworkResponseMessage) + if getSubnetworkResponse.Error != nil { + return nil, c.convertRPCError(getSubnetworkResponse.Error) + } + return getSubnetworkResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_utxos_by_addresses.go b/infrastructure/network/rpcclient/rpc_get_utxos_by_addresses.go new file mode 100644 index 0000000..9a0b3db --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_utxos_by_addresses.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetUTXOsByAddresses sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetUTXOsByAddresses(addresses []string) (*appmessage.GetUTXOsByAddressesResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetUTXOsByAddressesRequestMessage(addresses)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetUTXOsByAddressesResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getUTXOsByAddressesResponse := response.(*appmessage.GetUTXOsByAddressesResponseMessage) + if getUTXOsByAddressesResponse.Error != nil { + return nil, c.convertRPCError(getUTXOsByAddressesResponse.Error) + } + return getUTXOsByAddressesResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_get_virtual_selected_parent_blue_score.go b/infrastructure/network/rpcclient/rpc_get_virtual_selected_parent_blue_score.go new file mode 100644 index 0000000..5e3f023 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_get_virtual_selected_parent_blue_score.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// GetVirtualSelectedParentBlueScore sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) GetVirtualSelectedParentBlueScore() (*appmessage.GetVirtualSelectedParentBlueScoreResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewGetVirtualSelectedParentBlueScoreRequestMessage()) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdGetVirtualSelectedParentBlueScoreResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + getVirtualSelectedParentBlueScoreResponse := response.(*appmessage.GetVirtualSelectedParentBlueScoreResponseMessage) + if getVirtualSelectedParentBlueScoreResponse.Error != nil { + return nil, c.convertRPCError(getVirtualSelectedParentBlueScoreResponse.Error) + } + return getVirtualSelectedParentBlueScoreResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_block_added.go b/infrastructure/network/rpcclient/rpc_on_block_added.go new file mode 100644 index 0000000..e47bb09 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_block_added.go @@ -0,0 +1,38 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForBlockAddedNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterForBlockAddedNotifications(onBlockAdded func(notification *appmessage.BlockAddedNotificationMessage)) error { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyBlockAddedRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyBlockAddedResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyBlockAddedResponse := response.(*appmessage.NotifyBlockAddedResponseMessage) + if notifyBlockAddedResponse.Error != nil { + return c.convertRPCError(notifyBlockAddedResponse.Error) + } + spawn("RegisterForBlockAddedNotifications", func() { + for { + notification, err := c.route(appmessage.CmdBlockAddedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + blockAddedNotification := notification.(*appmessage.BlockAddedNotificationMessage) + onBlockAdded(blockAddedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_chain_changed.go b/infrastructure/network/rpcclient/rpc_on_chain_changed.go new file mode 100644 index 0000000..bbccc58 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_chain_changed.go @@ -0,0 +1,41 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForVirtualSelectedParentChainChangedNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterForVirtualSelectedParentChainChangedNotifications(includeAcceptedTransactionIDs bool, + onChainChanged func(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage)) error { + + err := c.rpcRouter.outgoingRoute().Enqueue( + appmessage.NewNotifyVirtualSelectedParentChainChangedRequestMessage(includeAcceptedTransactionIDs)) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyVirtualSelectedParentChainChangedResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyChainChangedResponse := response.(*appmessage.NotifyVirtualSelectedParentChainChangedResponseMessage) + if notifyChainChangedResponse.Error != nil { + return c.convertRPCError(notifyChainChangedResponse.Error) + } + spawn("RegisterForVirtualSelectedParentChainChangedNotifications", func() { + for { + notification, err := c.route(appmessage.CmdVirtualSelectedParentChainChangedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + ChainChangedNotification := notification.(*appmessage.VirtualSelectedParentChainChangedNotificationMessage) + onChainChanged(ChainChangedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_finality_conflicts.go b/infrastructure/network/rpcclient/rpc_on_finality_conflicts.go new file mode 100644 index 0000000..694c9c5 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_finality_conflicts.go @@ -0,0 +1,54 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForFinalityConflictsNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterForFinalityConflictsNotifications( + onFinalityConflict func(notification *appmessage.FinalityConflictNotificationMessage), + onFinalityConflictResolved func(notification *appmessage.FinalityConflictResolvedNotificationMessage)) error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyFinalityConflictsRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyFinalityConflictsResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyFinalityConflictsResponse := response.(*appmessage.NotifyFinalityConflictsResponseMessage) + if notifyFinalityConflictsResponse.Error != nil { + return c.convertRPCError(notifyFinalityConflictsResponse.Error) + } + spawn("RegisterForFinalityConflictsNotifications-finalityConflict", func() { + for { + notification, err := c.route(appmessage.CmdFinalityConflictNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + finalityConflictNotification := notification.(*appmessage.FinalityConflictNotificationMessage) + onFinalityConflict(finalityConflictNotification) + } + }) + spawn("RegisterForFinalityConflictsNotifications-finalityConflictResolved", func() { + for { + notification, err := c.route(appmessage.CmdFinalityConflictResolvedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + finalityConflictResolvedNotification := notification.(*appmessage.FinalityConflictResolvedNotificationMessage) + onFinalityConflictResolved(finalityConflictResolvedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_new_block_template.go b/infrastructure/network/rpcclient/rpc_on_new_block_template.go new file mode 100644 index 0000000..ee1b43f --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_new_block_template.go @@ -0,0 +1,38 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForNewBlockTemplateNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterForNewBlockTemplateNotifications(onNewBlockTemplate func(notification *appmessage.NewBlockTemplateNotificationMessage)) error { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyNewBlockTemplateRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyNewBlockTemplateResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyNewBlockTemplateResponse := response.(*appmessage.NotifyNewBlockTemplateResponseMessage) + if notifyNewBlockTemplateResponse.Error != nil { + return c.convertRPCError(notifyNewBlockTemplateResponse.Error) + } + spawn("RegisterForNewBlockTemplateNotifications", func() { + for { + notification, err := c.route(appmessage.CmdNewBlockTemplateNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + NewBlockTemplateNotification := notification.(*appmessage.NewBlockTemplateNotificationMessage) + onNewBlockTemplate(NewBlockTemplateNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_pruning_point_utxo_set_override.go b/infrastructure/network/rpcclient/rpc_on_pruning_point_utxo_set_override.go new file mode 100644 index 0000000..d120271 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_pruning_point_utxo_set_override.go @@ -0,0 +1,58 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterPruningPointUTXOSetNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterPruningPointUTXOSetNotifications(onPruningPointUTXOSetNotifications func()) error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyPruningPointUTXOSetOverrideRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyPruningPointUTXOSetOverrideResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyPruningPointUTXOSetOverrideResponse := response.(*appmessage.NotifyPruningPointUTXOSetOverrideResponseMessage) + if notifyPruningPointUTXOSetOverrideResponse.Error != nil { + return c.convertRPCError(notifyPruningPointUTXOSetOverrideResponse.Error) + } + spawn("RegisterPruningPointUTXOSetNotifications", func() { + for { + notification, err := c.route(appmessage.CmdPruningPointUTXOSetOverrideNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + _ = notification.(*appmessage.PruningPointUTXOSetOverrideNotificationMessage) // Sanity check the type + onPruningPointUTXOSetNotifications() + } + }) + return nil +} + +// UnregisterPruningPointUTXOSetNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it stops listening for the appropriate notification using the given handler function +func (c *RPCClient) UnregisterPruningPointUTXOSetNotifications() error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewStopNotifyingPruningPointUTXOSetOverrideRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdStopNotifyingPruningPointUTXOSetOverrideResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + stopNotifyPruningPointUTXOSetOverrideResponse := response.(*appmessage.StopNotifyingPruningPointUTXOSetOverrideResponseMessage) + if stopNotifyPruningPointUTXOSetOverrideResponse.Error != nil { + return c.convertRPCError(stopNotifyPruningPointUTXOSetOverrideResponse.Error) + } + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_utxos_changed.go b/infrastructure/network/rpcclient/rpc_on_utxos_changed.go new file mode 100644 index 0000000..30f2580 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_utxos_changed.go @@ -0,0 +1,40 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForUTXOsChangedNotifications sends an RPC request respective to the function's name and returns the RPC server's response. +// Additionally, it starts listening for the appropriate notification using the given handler function +func (c *RPCClient) RegisterForUTXOsChangedNotifications(addresses []string, + onUTXOsChanged func(notification *appmessage.UTXOsChangedNotificationMessage)) error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyUTXOsChangedRequestMessage(addresses)) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyUTXOsChangedResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyUTXOsChangedResponse := response.(*appmessage.NotifyUTXOsChangedResponseMessage) + if notifyUTXOsChangedResponse.Error != nil { + return c.convertRPCError(notifyUTXOsChangedResponse.Error) + } + spawn("RegisterForUTXOsChangedNotifications", func() { + for { + notification, err := c.route(appmessage.CmdUTXOsChangedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + UTXOsChangedNotification := notification.(*appmessage.UTXOsChangedNotificationMessage) + onUTXOsChanged(UTXOsChangedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_virtual_daa_score_changed.go b/infrastructure/network/rpcclient/rpc_on_virtual_daa_score_changed.go new file mode 100644 index 0000000..a3a0d62 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_virtual_daa_score_changed.go @@ -0,0 +1,41 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForVirtualDaaScoreChangedNotifications sends an RPC request respective to the function's +// name and returns the RPC server's response. Additionally, it starts listening for the appropriate notification +// using the given handler function +func (c *RPCClient) RegisterForVirtualDaaScoreChangedNotifications( + onVirtualDaaScoreChanged func(notification *appmessage.VirtualDaaScoreChangedNotificationMessage)) error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyVirtualDaaScoreChangedRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyVirtualDaaScoreChangedResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyVirtualDaaScoreChangedResponse := response.(*appmessage.NotifyVirtualDaaScoreChangedResponseMessage) + if notifyVirtualDaaScoreChangedResponse.Error != nil { + return c.convertRPCError(notifyVirtualDaaScoreChangedResponse.Error) + } + spawn("RegisterForVirtualDaaScoreChangedNotifications", func() { + for { + notification, err := c.route(appmessage.CmdVirtualDaaScoreChangedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + VirtualDaaScoreChangedNotification := notification.(*appmessage.VirtualDaaScoreChangedNotificationMessage) + onVirtualDaaScoreChanged(VirtualDaaScoreChangedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_on_virtual_selected_parent_blue_score_changed.go b/infrastructure/network/rpcclient/rpc_on_virtual_selected_parent_blue_score_changed.go new file mode 100644 index 0000000..b3c5510 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_on_virtual_selected_parent_blue_score_changed.go @@ -0,0 +1,41 @@ +package rpcclient + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +// RegisterForVirtualSelectedParentBlueScoreChangedNotifications sends an RPC request respective to the function's +// name and returns the RPC server's response. Additionally, it starts listening for the appropriate notification +// using the given handler function +func (c *RPCClient) RegisterForVirtualSelectedParentBlueScoreChangedNotifications( + onVirtualSelectedParentBlueScoreChanged func(notification *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage)) error { + + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewNotifyVirtualSelectedParentBlueScoreChangedRequestMessage()) + if err != nil { + return err + } + response, err := c.route(appmessage.CmdNotifyVirtualSelectedParentBlueScoreChangedResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return err + } + notifyVirtualSelectedParentBlueScoreChangedResponse := response.(*appmessage.NotifyVirtualSelectedParentBlueScoreChangedResponseMessage) + if notifyVirtualSelectedParentBlueScoreChangedResponse.Error != nil { + return c.convertRPCError(notifyVirtualSelectedParentBlueScoreChangedResponse.Error) + } + spawn("RegisterForVirtualSelectedParentBlueScoreChangedNotifications", func() { + for { + notification, err := c.route(appmessage.CmdVirtualSelectedParentBlueScoreChangedNotificationMessage).Dequeue() + if err != nil { + if errors.Is(err, routerpkg.ErrRouteClosed) { + break + } + panic(err) + } + VirtualSelectedParentBlueScoreChangedNotification := notification.(*appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage) + onVirtualSelectedParentBlueScoreChanged(VirtualSelectedParentBlueScoreChangedNotification) + } + }) + return nil +} diff --git a/infrastructure/network/rpcclient/rpc_resolve_finality_conflict.go b/infrastructure/network/rpcclient/rpc_resolve_finality_conflict.go new file mode 100644 index 0000000..2b1bbe4 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_resolve_finality_conflict.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// ResolveFinalityConflict sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) ResolveFinalityConflict(finalityBlockHash string) (*appmessage.ResolveFinalityConflictResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewResolveFinalityConflictRequestMessage(finalityBlockHash)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdResolveFinalityConflictResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + resolveFinalityConflictResponse := response.(*appmessage.ResolveFinalityConflictResponseMessage) + if resolveFinalityConflictResponse.Error != nil { + return nil, c.convertRPCError(resolveFinalityConflictResponse.Error) + } + return resolveFinalityConflictResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_send_raw_transaction.go b/infrastructure/network/rpcclient/rpc_send_raw_transaction.go new file mode 100644 index 0000000..41cff0b --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_send_raw_transaction.go @@ -0,0 +1,23 @@ +package rpcclient + +import ( + "github.com/spectre-project/spectred/app/appmessage" +) + +// SubmitTransaction sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) SubmitTransaction(transaction *appmessage.RPCTransaction, allowOrphan bool) (*appmessage.SubmitTransactionResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewSubmitTransactionRequestMessage(transaction, allowOrphan)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdSubmitTransactionResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + submitTransactionResponse := response.(*appmessage.SubmitTransactionResponseMessage) + if submitTransactionResponse.Error != nil { + return nil, c.convertRPCError(submitTransactionResponse.Error) + } + + return submitTransactionResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpc_submit_block.go b/infrastructure/network/rpcclient/rpc_submit_block.go new file mode 100644 index 0000000..bbf78fa --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_submit_block.go @@ -0,0 +1,33 @@ +package rpcclient + +import ( + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" +) + +func (c *RPCClient) submitBlock(block *externalapi.DomainBlock, allowNonDAABlocks bool) (appmessage.RejectReason, error) { + err := c.rpcRouter.outgoingRoute().Enqueue( + appmessage.NewSubmitBlockRequestMessage(appmessage.DomainBlockToRPCBlock(block), allowNonDAABlocks)) + if err != nil { + return appmessage.RejectReasonNone, err + } + response, err := c.route(appmessage.CmdSubmitBlockResponseMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return appmessage.RejectReasonNone, err + } + submitBlockResponse := response.(*appmessage.SubmitBlockResponseMessage) + if submitBlockResponse.Error != nil { + return submitBlockResponse.RejectReason, c.convertRPCError(submitBlockResponse.Error) + } + return appmessage.RejectReasonNone, nil +} + +// SubmitBlock sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) SubmitBlock(block *externalapi.DomainBlock) (appmessage.RejectReason, error) { + return c.submitBlock(block, false) +} + +// SubmitBlockAlsoIfNonDAA operates the same as SubmitBlock with the exception that `allowNonDAABlocks` is set to true +func (c *RPCClient) SubmitBlockAlsoIfNonDAA(block *externalapi.DomainBlock) (appmessage.RejectReason, error) { + return c.submitBlock(block, true) +} diff --git a/infrastructure/network/rpcclient/rpc_unban.go b/infrastructure/network/rpcclient/rpc_unban.go new file mode 100644 index 0000000..ebbf3d4 --- /dev/null +++ b/infrastructure/network/rpcclient/rpc_unban.go @@ -0,0 +1,20 @@ +package rpcclient + +import "github.com/spectre-project/spectred/app/appmessage" + +// Unban sends an RPC request respective to the function's name and returns the RPC server's response +func (c *RPCClient) Unban(ip string) (*appmessage.UnbanResponseMessage, error) { + err := c.rpcRouter.outgoingRoute().Enqueue(appmessage.NewUnbanRequestMessage(ip)) + if err != nil { + return nil, err + } + response, err := c.route(appmessage.CmdUnbanRequestMessage).DequeueWithTimeout(c.timeout) + if err != nil { + return nil, err + } + unbanResponse := response.(*appmessage.UnbanResponseMessage) + if unbanResponse.Error != nil { + return nil, c.convertRPCError(unbanResponse.Error) + } + return unbanResponse, nil +} diff --git a/infrastructure/network/rpcclient/rpcclient.go b/infrastructure/network/rpcclient/rpcclient.go new file mode 100644 index 0000000..c15cfa8 --- /dev/null +++ b/infrastructure/network/rpcclient/rpcclient.go @@ -0,0 +1,190 @@ +package rpcclient + +import ( + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/logger" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient/grpcclient" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/version" +) + +const defaultTimeout = 30 * time.Second + +// RPCClient is an RPC client +type RPCClient struct { + *grpcclient.GRPCClient + + rpcAddress string + rpcRouter *rpcRouter + isConnected uint32 + isClosed uint32 + isReconnecting uint32 + lastDisconnectedTime time.Time + + timeout time.Duration +} + +// NewRPCClient сreates a new RPC client with a default call timeout value +func NewRPCClient(rpcAddress string) (*RPCClient, error) { + rpcClient := &RPCClient{ + rpcAddress: rpcAddress, + timeout: defaultTimeout, + } + err := rpcClient.connect() + if err != nil { + return nil, err + } + + return rpcClient, nil +} + +func (c *RPCClient) connect() error { + rpcClient, err := grpcclient.Connect(c.rpcAddress) + if err != nil { + return errors.Wrapf(err, "error connecting to address %s", c.rpcAddress) + } + rpcClient.SetOnDisconnectedHandler(c.handleClientDisconnected) + rpcClient.SetOnErrorHandler(c.handleClientError) + rpcRouter, err := buildRPCRouter() + if err != nil { + return errors.Wrapf(err, "error creating the RPC router") + } + + atomic.StoreUint32(&c.isConnected, 1) + rpcClient.AttachRouter(rpcRouter.router) + + c.GRPCClient = rpcClient + c.rpcRouter = rpcRouter + + log.Infof("Connected to %s", c.rpcAddress) + + getInfoResponse, err := c.GetInfo() + if err != nil { + return errors.Wrapf(err, "error making GetInfo request") + } + + localVersion := version.Version() + remoteVersion := getInfoResponse.ServerVersion + + if localVersion != remoteVersion { + log.Warnf("version mismatch, client: %s, server: %s - expected responses and requests may deviate", localVersion, remoteVersion) + } + + return nil +} + +func (c *RPCClient) disconnect() error { + err := c.GRPCClient.Disconnect() + if err != nil { + return err + } + log.Infof("Disconnected from %s", c.rpcAddress) + return nil +} + +// Reconnect forces the client to attempt to reconnect to the address +// this client initially was connected to +func (c *RPCClient) Reconnect() error { + if atomic.LoadUint32(&c.isClosed) == 1 { + return errors.Errorf("Cannot reconnect from a closed client") + } + + // Protect against multiple threads attempting to reconnect at the same time + swapped := atomic.CompareAndSwapUint32(&c.isReconnecting, 0, 1) + if !swapped { + // Already reconnecting + return nil + } + defer atomic.StoreUint32(&c.isReconnecting, 0) + + log.Warnf("Attempting to reconnect to %s", c.rpcAddress) + + // Disconnect if we're connected + if atomic.LoadUint32(&c.isConnected) == 1 { + err := c.disconnect() + if err != nil { + return err + } + } + + // Attempt to connect until we succeed + for { + const retryDelay = 10 * time.Second + if time.Since(c.lastDisconnectedTime) > retryDelay { + err := c.connect() + if err == nil { + return nil + } + log.Warnf("Could not automatically reconnect to %s: %s", c.rpcAddress, err) + log.Warnf("Retrying in %s", retryDelay) + } + time.Sleep(retryDelay) + } +} + +func (c *RPCClient) handleClientDisconnected() { + atomic.StoreUint32(&c.isConnected, 0) + if atomic.LoadUint32(&c.isClosed) == 0 { + err := c.disconnect() + if err != nil { + panic(err) + } + c.lastDisconnectedTime = time.Now() + err = c.Reconnect() + if err != nil { + panic(err) + } + } +} + +func (c *RPCClient) handleClientError(err error) { + if atomic.LoadUint32(&c.isClosed) == 1 { + return + } + log.Warnf("Received error from client: %s", err) + c.handleClientDisconnected() +} + +// SetTimeout sets the timeout by which to wait for RPC responses +func (c *RPCClient) SetTimeout(timeout time.Duration) { + c.timeout = timeout +} + +// Close closes the RPC client +func (c *RPCClient) Close() error { + swapped := atomic.CompareAndSwapUint32(&c.isClosed, 0, 1) + if !swapped { + return errors.Errorf("Cannot close a client that had already been closed") + } + c.rpcRouter.router.Close() + return c.GRPCClient.Close() +} + +// Address returns the address the RPC client connected to +func (c *RPCClient) Address() string { + return c.rpcAddress +} + +func (c *RPCClient) route(command appmessage.MessageCommand) *routerpkg.Route { + return c.rpcRouter.routes[command] +} + +// ErrRPC is an error in the RPC protocol +var ErrRPC = errors.New("rpc error") + +func (c *RPCClient) convertRPCError(rpcError *appmessage.RPCError) error { + return errors.Wrap(ErrRPC, rpcError.Message) +} + +// SetLogger uses a specified Logger to output package logging info +func (c *RPCClient) SetLogger(backend *logger.Backend, level logger.Level) { + const logSubsystem = "RPCC" + log = backend.Logger(logSubsystem) + log.SetLevel(level) + spawn = panics.GoroutineWrapperFunc(log) +} diff --git a/infrastructure/network/rpcclient/rpcrouter.go b/infrastructure/network/rpcclient/rpcrouter.go new file mode 100644 index 0000000..5f56dcf --- /dev/null +++ b/infrastructure/network/rpcclient/rpcrouter.go @@ -0,0 +1,32 @@ +package rpcclient + +import ( + "github.com/spectre-project/spectred/app/appmessage" + routerpkg "github.com/spectre-project/spectred/infrastructure/network/netadapter/router" +) + +type rpcRouter struct { + router *routerpkg.Router + routes map[appmessage.MessageCommand]*routerpkg.Route +} + +func buildRPCRouter() (*rpcRouter, error) { + router := routerpkg.NewRouter("RPC server") + routes := make(map[appmessage.MessageCommand]*routerpkg.Route, len(appmessage.RPCMessageCommandToString)) + for messageType := range appmessage.RPCMessageCommandToString { + route, err := router.AddIncomingRoute("rpc client", []appmessage.MessageCommand{messageType}) + if err != nil { + return nil, err + } + routes[messageType] = route + } + + return &rpcRouter{ + router: router, + routes: routes, + }, nil +} + +func (r *rpcRouter) outgoingRoute() *routerpkg.Route { + return r.router.OutgoingRoute() +} diff --git a/infrastructure/os/execenv/initialize.go b/infrastructure/os/execenv/initialize.go new file mode 100644 index 0000000..e53a481 --- /dev/null +++ b/infrastructure/os/execenv/initialize.go @@ -0,0 +1,22 @@ +package execenv + +import ( + "fmt" + "os" + "runtime" + + "github.com/spectre-project/spectred/infrastructure/os/limits" +) + +// Initialize initializes the execution environment required to run spectred +func Initialize(desiredLimits *limits.DesiredLimits) { + // Use all processor cores. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Up some limits. + if err := limits.SetLimits(desiredLimits); err != nil { + fmt.Fprintf(os.Stderr, "failed to set limits: %s\n", err) + os.Exit(1) + } + +} diff --git a/infrastructure/os/limits/desired_limits.go b/infrastructure/os/limits/desired_limits.go new file mode 100644 index 0000000..94e4a10 --- /dev/null +++ b/infrastructure/os/limits/desired_limits.go @@ -0,0 +1,7 @@ +package limits + +// DesiredLimits is a structure that specifies the limits desired by a running application +type DesiredLimits struct { + FileLimitWant uint64 + FileLimitMin uint64 +} diff --git a/infrastructure/os/limits/limits_plan9.go b/infrastructure/os/limits/limits_plan9.go new file mode 100644 index 0000000..9897e7d --- /dev/null +++ b/infrastructure/os/limits/limits_plan9.go @@ -0,0 +1,10 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package limits + +// SetLimits is a no-op on Plan 9 due to the lack of process accounting. +func SetLimits(*DesiredLimits) error { + return nil +} diff --git a/infrastructure/os/limits/limits_unix.go b/infrastructure/os/limits/limits_unix.go new file mode 100644 index 0000000..7bb7d63 --- /dev/null +++ b/infrastructure/os/limits/limits_unix.go @@ -0,0 +1,49 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package limits + +import ( + "syscall" + + "github.com/pkg/errors" +) + +// SetLimits raises some process limits to values which allow spectred and +// associated utilities to run. +func SetLimits(desiredLimits *DesiredLimits) error { + var rLimit syscall.Rlimit + + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) + if err != nil { + return err + } + if rLimit.Cur > desiredLimits.FileLimitWant { + return nil + } + if rLimit.Max < desiredLimits.FileLimitMin { + err = errors.Errorf("need at least %d file descriptors", + desiredLimits.FileLimitMin) + return err + } + if rLimit.Max < desiredLimits.FileLimitWant { + rLimit.Cur = rLimit.Max + } else { + rLimit.Cur = desiredLimits.FileLimitWant + } + err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit) + if err != nil { + // try min value + rLimit.Cur = desiredLimits.FileLimitMin + err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit) + if err != nil { + return err + } + } + + return nil +} diff --git a/infrastructure/os/limits/limits_windows.go b/infrastructure/os/limits/limits_windows.go new file mode 100644 index 0000000..e0ef3da --- /dev/null +++ b/infrastructure/os/limits/limits_windows.go @@ -0,0 +1,10 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package limits + +// SetLimits is a no-op on Windows since it's not required there. +func SetLimits(*DesiredLimits) error { + return nil +} diff --git a/infrastructure/os/signal/log.go b/infrastructure/os/signal/log.go new file mode 100644 index 0000000..d3bfb9a --- /dev/null +++ b/infrastructure/os/signal/log.go @@ -0,0 +1,11 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package signal + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var sprdLog = logger.RegisterSubSystem("SPRD") diff --git a/infrastructure/os/signal/signal.go b/infrastructure/os/signal/signal.go new file mode 100644 index 0000000..20d28e3 --- /dev/null +++ b/infrastructure/os/signal/signal.go @@ -0,0 +1,71 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package signal + +import ( + "os" + "os/signal" +) + +// ShutdownRequestChannel is used to initiate shutdown from one of the +// subsystems using the same code paths as when an interrupt signal is received. +var ShutdownRequestChannel = make(chan struct{}) + +// interruptSignals defines the default signals to catch in order to do a proper +// shutdown. This may be modified during init depending on the platform. +var interruptSignals = []os.Signal{os.Interrupt} + +// InterruptListener listens for OS Signals such as SIGINT (Ctrl+C) and shutdown +// requests from shutdownRequestChannel. It returns a channel that is closed +// when either signal is received. +func InterruptListener() chan struct{} { + c := make(chan struct{}) + go func() { + interruptChannel := make(chan os.Signal, 1) + signal.Notify(interruptChannel, interruptSignals...) + + // Listen for initial shutdown signal and close the returned + // channel to notify the caller. + select { + case sig := <-interruptChannel: + sprdLog.Infof("Received signal (%s). Shutting down...", + sig) + + case <-ShutdownRequestChannel: + sprdLog.Info("Shutdown requested. Shutting down...") + } + close(c) + + // Listen for repeated signals and display a message so the user + // knows the shutdown is in progress and the process is not + // hung. + for { + select { + case sig := <-interruptChannel: + sprdLog.Infof("Received signal (%s). Already "+ + "shutting down...", sig) + + case <-ShutdownRequestChannel: + sprdLog.Info("Shutdown requested. Already " + + "shutting down...") + } + } + }() + + return c +} + +// InterruptRequested returns true when the channel returned by +// InterruptListener was closed. This simplifies early shutdown slightly since +// the caller can just use an if statement instead of a select. +func InterruptRequested(interrupted <-chan struct{}) bool { + select { + case <-interrupted: + return true + default: + } + + return false +} diff --git a/infrastructure/os/signal/signalsigterm.go b/infrastructure/os/signal/signalsigterm.go new file mode 100644 index 0000000..38ddcf3 --- /dev/null +++ b/infrastructure/os/signal/signalsigterm.go @@ -0,0 +1,17 @@ +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package signal + +import ( + "os" + "syscall" +) + +func init() { + interruptSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} +} diff --git a/infrastructure/os/winservice/common.go b/infrastructure/os/winservice/common.go new file mode 100644 index 0000000..c431171 --- /dev/null +++ b/infrastructure/os/winservice/common.go @@ -0,0 +1,17 @@ +package winservice + +import "github.com/spectre-project/spectred/infrastructure/config" + +// ServiceDescription contains information about a service, needed to administer it +type ServiceDescription struct { + Name string + DisplayName string + Description string +} + +// MainFunc specifies the signature of an application's main function to be able to run as a windows service +type MainFunc func(startedChan chan<- struct{}) error + +// WinServiceMain is only invoked on Windows. It detects when spectred is running +// as a service and reacts accordingly. +var WinServiceMain = func(MainFunc, *ServiceDescription, *config.Config) (bool, error) { return false, nil } diff --git a/infrastructure/os/winservice/log.go b/infrastructure/os/winservice/log.go new file mode 100644 index 0000000..fb571af --- /dev/null +++ b/infrastructure/os/winservice/log.go @@ -0,0 +1,13 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package winservice + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("CNFG") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/infrastructure/os/winservice/service_command_windows.go b/infrastructure/os/winservice/service_command_windows.go new file mode 100644 index 0000000..232e5a5 --- /dev/null +++ b/infrastructure/os/winservice/service_command_windows.go @@ -0,0 +1,178 @@ +package winservice + +import ( + "os" + "path/filepath" + "time" + + "github.com/btcsuite/winsvc/eventlog" + + "github.com/btcsuite/winsvc/mgr" + + "github.com/btcsuite/winsvc/svc" + "github.com/pkg/errors" +) + +// performServiceCommand attempts to run one of the supported service commands +// provided on the command line via the service command flag. An appropriate +// error is returned if an invalid command is specified. +func (s *Service) performServiceCommand() error { + var err error + command := s.cfg.ServiceOptions.ServiceCommand + switch command { + case "install": + err = s.installService() + + case "remove": + err = s.removeService() + + case "start": + err = s.startService() + + case "stop": + err = s.controlService(svc.Stop, svc.Stopped) + + default: + err = errors.Errorf("invalid service command [%s]", command) + } + + return err +} + +// installService attempts to install the spectred service. Typically this should +// be done by the msi installer, but it is provided here since it can be useful +// for development. +func (s *Service) installService() error { + // Get the path of the current executable. This is needed because + // os.Args[0] can vary depending on how the application was launched. + // For example, under cmd.exe it will only be the name of the app + // without the path or extension, but under mingw it will be the full + // path including the extension. + exePath, err := filepath.Abs(os.Args[0]) + if err != nil { + return err + } + if filepath.Ext(exePath) == "" { + exePath += ".exe" + } + + // Connect to the windows service manager. + serviceManager, err := mgr.Connect() + if err != nil { + return err + } + defer serviceManager.Disconnect() + + // Ensure the service doesn't already exist. + service, err := serviceManager.OpenService(s.description.Name) + if err == nil { + service.Close() + return errors.Errorf("service %s already exists", s.description.Name) + } + + // Install the service. + service, err = serviceManager.CreateService(s.description.Name, exePath, mgr.Config{ + DisplayName: s.description.DisplayName, + Description: s.description.Description, + }) + if err != nil { + return err + } + defer service.Close() + + // Support events to the event log using the standard "standard" Windows + // EventCreate.exe message file. This allows easy logging of custom + // messges instead of needing to create our own message catalog. + err = eventlog.Remove(s.description.Name) + if err != nil { + return err + } + eventsSupported := uint32(eventlog.Error | eventlog.Warning | eventlog.Info) + return eventlog.InstallAsEventCreate(s.description.Name, eventsSupported) +} + +// removeService attempts to uninstall the spectred service. Typically this should +// be done by the msi uninstaller, but it is provided here since it can be +// useful for development. Not the eventlog entry is intentionally not removed +// since it would invalidate any existing event log messages. +func (s *Service) removeService() error { + // Connect to the windows service manager. + serviceManager, err := mgr.Connect() + if err != nil { + return err + } + defer serviceManager.Disconnect() + + // Ensure the service exists. + service, err := serviceManager.OpenService(s.description.Name) + if err != nil { + return errors.Errorf("service %s is not installed", s.description.Name) + } + defer service.Close() + + // Remove the service. + return service.Delete() +} + +// startService attempts to Start the spectred service. +func (s *Service) startService() error { + // Connect to the windows service manager. + serviceManager, err := mgr.Connect() + if err != nil { + return err + } + defer serviceManager.Disconnect() + + service, err := serviceManager.OpenService(s.description.Name) + if err != nil { + return errors.Errorf("could not access service: %s", err) + } + defer service.Close() + + err = service.Start(os.Args) + if err != nil { + return errors.Errorf("could not start service: %s", err) + } + + return nil +} + +// controlService allows commands which change the status of the service. It +// also waits for up to 10 seconds for the service to change to the passed +// state. +func (s *Service) controlService(c svc.Cmd, to svc.State) error { + // Connect to the windows service manager. + serviceManager, err := mgr.Connect() + if err != nil { + return err + } + defer serviceManager.Disconnect() + + service, err := serviceManager.OpenService(s.description.Name) + if err != nil { + return errors.Errorf("could not access service: %s", err) + } + defer service.Close() + + status, err := service.Control(c) + if err != nil { + return errors.Errorf("could not send control=%d: %s", c, err) + } + + // Send the control message. + timeout := time.Now().Add(10 * time.Second) + for status.State != to { + if timeout.Before(time.Now()) { + return errors.Errorf("timeout waiting for service to go "+ + "to state=%d", to) + } + time.Sleep(300 * time.Millisecond) + status, err = service.Query() + if err != nil { + return errors.Errorf("could not retrieve service "+ + "status: %s", err) + } + } + + return nil +} diff --git a/infrastructure/os/winservice/service_main_windows.go b/infrastructure/os/winservice/service_main_windows.go new file mode 100644 index 0000000..5506fd9 --- /dev/null +++ b/infrastructure/os/winservice/service_main_windows.go @@ -0,0 +1,40 @@ +package winservice + +import ( + "github.com/btcsuite/winsvc/svc" + "github.com/spectre-project/spectred/infrastructure/config" +) + +// serviceMain checks whether we're being invoked as a service, and if so uses +// the service control manager to start the long-running server. A flag is +// returned to the caller so the application can determine whether to exit (when +// running as a service) or launch in normal interactive mode. +func serviceMain(main MainFunc, description *ServiceDescription, cfg *config.Config) (bool, error) { + service := newService(main, description, cfg) + + if cfg.ServiceOptions.ServiceCommand != "" { + return true, service.performServiceCommand() + } + + // Don't run as a service if we're running interactively (or that can't + // be determined due to an error). + isInteractive, err := svc.IsAnInteractiveSession() + if err != nil { + return false, err + } + if isInteractive { + return false, nil + } + + err = service.Start() + if err != nil { + return true, err + } + + return true, nil +} + +// Set windows specific functions to real functions. +func init() { + WinServiceMain = serviceMain +} diff --git a/infrastructure/os/winservice/service_windows.go b/infrastructure/os/winservice/service_windows.go new file mode 100644 index 0000000..a330519 --- /dev/null +++ b/infrastructure/os/winservice/service_windows.go @@ -0,0 +1,119 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package winservice + +import ( + "fmt" + + "github.com/btcsuite/winsvc/eventlog" + "github.com/btcsuite/winsvc/svc" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/os/signal" + "github.com/spectre-project/spectred/version" +) + +// Service houses the main service handler which handles all service +// updates and launching the application's main. +type Service struct { + main MainFunc + description *ServiceDescription + cfg *config.Config + eventLog *eventlog.Log +} + +func newService(main MainFunc, description *ServiceDescription, cfg *config.Config) *Service { + return &Service{ + main: main, + description: description, + cfg: cfg, + } +} + +// Start starts the srevice +func (s *Service) Start() error { + elog, err := eventlog.Open(s.description.Name) + if err != nil { + return err + } + s.eventLog = elog + defer s.eventLog.Close() + + err = svc.Run(s.description.Name, &Service{}) + if err != nil { + s.eventLog.Error(1, fmt.Sprintf("Service start failed: %s", err)) + return err + } + + return nil +} + +// Execute is the main entry point the winsvc package calls when receiving +// information from the Windows service control manager. It launches the +// long-running spectredMain (which is the real meat of spectred), handles service +// change requests, and notifies the service control manager of changes. +func (s *Service) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) { + // Service start is pending. + const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown + changes <- svc.Status{State: svc.StartPending} + + // Start spectredMain in a separate goroutine so the service can start + // quickly. Shutdown (along with a potential error) is reported via + // doneChan. startedChan is notified once spectred is started so this can + // be properly logged + doneChan := make(chan error) + startedChan := make(chan struct{}) + spawn("spectredMain-windows", func() { + err := s.main(startedChan) + doneChan <- err + }) + + // Service is now started. + changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted} +loop: + for { + select { + case c := <-r: + switch c.Cmd { + case svc.Interrogate: + changes <- c.CurrentStatus + + case svc.Stop, svc.Shutdown: + // Service stop is pending. Don't accept any + // more commands while pending. + changes <- svc.Status{State: svc.StopPending} + + // Signal the main function to exit. + signal.ShutdownRequestChannel <- struct{}{} + + default: + s.eventLog.Error(1, fmt.Sprintf("Unexpected control "+ + "request #%d.", c)) + } + + case <-startedChan: + s.logServiceStart() + + case err := <-doneChan: + if err != nil { + s.eventLog.Error(1, err.Error()) + } + break loop + } + } + + // Service is now stopped. + changes <- svc.Status{State: svc.Stopped} + return false, 0 +} + +// logServiceStart logs information about spectred when the main server has +// been started to the Windows event log. +func (s *Service) logServiceStart() { + var message string + message += fmt.Sprintf("%s version %s\n", s.description.DisplayName, version.Version()) + message += fmt.Sprintf("Configuration file: %s\n", s.cfg.ConfigFile) + message += fmt.Sprintf("Application directory: %s\n", s.cfg.AppDir) + message += fmt.Sprintf("Logs directory: %s\n", s.cfg.LogDir) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..f5d28cb --- /dev/null +++ b/main.go @@ -0,0 +1,18 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + _ "net/http/pprof" + "os" + + "github.com/spectre-project/spectred/app" +) + +func main() { + if err := app.StartApp(); err != nil { + os.Exit(1) + } +} diff --git a/stability-tests/README.md b/stability-tests/README.md new file mode 100644 index 0000000..35bd597 --- /dev/null +++ b/stability-tests/README.md @@ -0,0 +1,12 @@ +# Stability-Test Tools + +This package provides some higher-level tests for spectred. These are +tests that are beyond the scope of unit-tests, and some of them might +take long time to run. + +# Running + +* To run only the fast running tests call `./install_and_test.sh` +* To include all tests call `SLOW=1 ./install_and_test.sh` (Note this + will take many hours to finish) +* To run a single test cd `[test-name]/run` and call `./run.sh` diff --git a/stability-tests/application-level-garbage/README.md b/stability-tests/application-level-garbage/README.md new file mode 100644 index 0000000..98734d3 --- /dev/null +++ b/stability-tests/application-level-garbage/README.md @@ -0,0 +1,10 @@ +# Application Level Garbage Generator + +This tool sends invalid blocks to a node, making sure the node +responds with a reject, and does not crash as a result. + +## Running + +1. `go install` spectred and application-level-garbage. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/application-level-garbage/config.go b/stability-tests/application-level-garbage/config.go new file mode 100644 index 0000000..05af44d --- /dev/null +++ b/stability-tests/application-level-garbage/config.go @@ -0,0 +1,60 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/infrastructure/config" +) + +const ( + defaultLogFilename = "application_level_garbage.log" + defaultErrLogFilename = "application_level_garbage_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"` + BlocksFilePath string `long:"blocks" short:"b" description:"path of file containing malformed blocks"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + + _, err := parser.Parse() + + if err != nil { + if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp { + os.Exit(0) + } + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/application-level-garbage/log.go b/stability-tests/application-level-garbage/log.go new file mode 100644 index 0000000..7229b1b --- /dev/null +++ b/stability-tests/application-level-garbage/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("APLG") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/application-level-garbage/main.go b/stability-tests/application-level-garbage/main.go new file mode 100644 index 0000000..2e1806f --- /dev/null +++ b/stability-tests/application-level-garbage/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/standalone" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "applicationLevelGarbage-main", nil) + err := parseConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err) + os.Exit(1) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + spectredConfig := config.DefaultConfig() + spectredConfig.NetworkFlags = cfg.NetworkFlags + + minimalNetAdapter, err := standalone.NewMinimalNetAdapter(spectredConfig) + if err != nil { + fmt.Fprintf(os.Stderr, "Error creating minimalNetAdapter: %+v", err) + backendLog.Close() + os.Exit(1) + } + + blocksChan, err := readBlocks() + if err != nil { + log.Errorf("Error reading blocks: %+v", err) + backendLog.Close() + os.Exit(1) + } + + err = sendBlocks(cfg.NodeP2PAddress, minimalNetAdapter, blocksChan) + if err != nil { + log.Errorf("Error sending blocks: %+v", err) + backendLog.Close() + os.Exit(1) + } +} diff --git a/stability-tests/application-level-garbage/read.go b/stability-tests/application-level-garbage/read.go new file mode 100644 index 0000000..3418140 --- /dev/null +++ b/stability-tests/application-level-garbage/read.go @@ -0,0 +1,32 @@ +package main + +import ( + "encoding/json" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/stability-tests/common" +) + +var blockBuffer []byte + +func readBlocks() (<-chan *externalapi.DomainBlock, error) { + c := make(chan *externalapi.DomainBlock) + + spawn("applicationLevelGarbage-readBlocks", func() { + lineNum := 0 + for blockJSON := range common.ScanFile(activeConfig().BlocksFilePath) { + domainBlock := &externalapi.DomainBlock{} + + err := json.Unmarshal(blockJSON, domainBlock) + if err != nil { + panic(errors.Wrapf(err, "error deserializing line No. %d with json %s", lineNum, blockJSON)) + } + + c <- domainBlock + } + close(c) + }) + + return c, nil +} diff --git a/stability-tests/application-level-garbage/run/blocks.dat b/stability-tests/application-level-garbage/run/blocks.dat new file mode 100644 index 0000000..f4f1d67 --- /dev/null +++ b/stability-tests/application-level-garbage/run/blocks.dat @@ -0,0 +1,6 @@ +// Genesis block +{"Header":{"Version":1,"ParentHashes":[],"HashMerkleRoot":[0,148,253,255,77,178,77,24,149,33,54,42,20,251,25,122,153,81,126,63,68,246,46,11,231,179,192,187,0,59,11,189],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"TimeInMilliseconds":1593528309396,"Bits":511705087,"Nonce":282367},"Transactions":[{"Version":1,"Inputs":[],"Outputs":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ=","Fee":0,"Mass":0}]} +// Bad hasMerkleRoot +{"Header":{"Version":268435456,"ParentHashes":[[19,45,252,243,207,3,251,33,48,184,103,121,188,46,24,78,8,215,235,247,182,134,60,62,224,140,141,2,60,253,254,102]],"HashMerkleRoot":[1,2,3,4,5],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Timestamp":{},"Bits":511705087,"Nonce":4283},"Transactions":[{"Version":1,"TxIn":[],"TxOut":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]} +// Zeroed hashMerkleRoot +{"Header":{"Version":268435456,"ParentHashes":[[19,45,252,243,207,3,251,33,48,184,103,121,188,46,24,78,8,215,235,247,182,134,60,62,224,140,141,2,60,253,254,102]],"HashMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"AcceptedIDMerkleRoot":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"UTXOCommitment":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Timestamp":{},"Bits":511705087,"Nonce":4283},"Transactions":[{"Version":1,"TxIn":[],"TxOut":[],"LockTime":0,"SubnetworkID":[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"Gas":0,"Payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]} diff --git a/stability-tests/application-level-garbage/run/run.sh b/stability-tests/application-level-garbage/run/run.sh new file mode 100755 index 0000000..80f245b --- /dev/null +++ b/stability-tests/application-level-garbage/run/run.sh @@ -0,0 +1,33 @@ +#!/bin/bash +rm -rf /tmp/spectred-temp + +spectred --devnet --appdir=/tmp/spectred-temp --profile=6061 --loglevel=debug & +SPECTRED_PID=$! +SPECTRED_KILLED=0 +function killSpectredIfNotKilled() { + if [ $SPECTRED_KILLED -eq 0 ]; then + kill $SPECTRED_PID + fi +} +trap "killSpectredIfNotKilled" EXIT + +sleep 1 + +application-level-garbage --devnet -alocalhost:18611 -b blocks.dat --profile=7000 +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_KILLED=1 +SPECTRED_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "application-level-garbage test: PASSED" + exit 0 +fi +echo "application-level-garbage test: FAILED" +exit 1 diff --git a/stability-tests/application-level-garbage/send.go b/stability-tests/application-level-garbage/send.go new file mode 100644 index 0000000..056992f --- /dev/null +++ b/stability-tests/application-level-garbage/send.go @@ -0,0 +1,51 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/app/protocol/common" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/standalone" +) + +func sendBlocks(address string, minimalNetAdapter *standalone.MinimalNetAdapter, blocksChan <-chan *externalapi.DomainBlock) error { + for block := range blocksChan { + routes, err := minimalNetAdapter.Connect(address) + if err != nil { + return err + } + + blockHash := consensushashing.BlockHash(block) + log.Infof("Sending block %s", blockHash) + + err = routes.OutgoingRoute.Enqueue(&appmessage.MsgInvRelayBlock{ + Hash: blockHash, + }) + if err != nil { + return err + } + + message, err := routes.WaitForMessageOfType(appmessage.CmdRequestRelayBlocks, common.DefaultTimeout) + if err != nil { + return err + } + requestRelayBlockMessage := message.(*appmessage.MsgRequestRelayBlocks) + if len(requestRelayBlockMessage.Hashes) != 1 || *requestRelayBlockMessage.Hashes[0] != *blockHash { + return errors.Errorf("Expecting requested hashes to be [%s], but got %v", + blockHash, requestRelayBlockMessage.Hashes) + } + + err = routes.OutgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block)) + if err != nil { + return err + } + + // TODO(libp2p): Wait for reject message once it has been implemented + err = routes.WaitForDisconnect(common.DefaultTimeout) + if err != nil { + return err + } + } + return nil +} diff --git a/stability-tests/common/cmd.go b/stability-tests/common/cmd.go new file mode 100644 index 0000000..ea03866 --- /dev/null +++ b/stability-tests/common/cmd.go @@ -0,0 +1,32 @@ +package common + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// StartCmd runs a command as a separate process. +// The `name` parameter is used for logs. +// The command executable should be in args[0] +func StartCmd(name string, args ...string) (*exec.Cmd, error) { + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = NewLogWriter(log, logger.LevelTrace, fmt.Sprintf("%s-STDOUT", name)) + cmd.Stderr = NewLogWriter(log, logger.LevelWarn, fmt.Sprintf("%s-STDERR", name)) + log.Infof("Starting command %s: %s", name, cmd) + err := cmd.Start() + if err != nil { + return nil, errors.WithStack(err) + } + + return cmd, nil +} + +// NetworkCliArgumentFromNetParams returns the spectred command line argument that starts the given network. +func NetworkCliArgumentFromNetParams(params *dagconfig.Params) string { + return fmt.Sprintf("--%s", strings.TrimPrefix(params.Name, "spectre-")) +} diff --git a/stability-tests/common/file_scanner.go b/stability-tests/common/file_scanner.go new file mode 100644 index 0000000..fd8921c --- /dev/null +++ b/stability-tests/common/file_scanner.go @@ -0,0 +1,67 @@ +package common + +import ( + "bufio" + "bytes" + "encoding/hex" + "os" + + "github.com/pkg/errors" +) + +// ScanFile opens the file in the specified path, and returns a channel that +// sends the contents of the file line-by-line, ignoring lines beggining with // +func ScanFile(filePath string) <-chan []byte { + c := make(chan []byte) + + spawn("ScanFile", func() { + file, err := os.Open(filePath) + if err != nil { + panic(errors.Wrapf(err, "error opening file %s", filePath)) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + panic(errors.Wrap(err, "error reading line")) + } + + line := scanner.Bytes() + if bytes.HasPrefix(line, []byte("//")) { + continue + } + + c <- line + } + close(c) + }) + + return c +} + +// ScanHexFile opens the file in the specified path, and returns a channel that +// sends the contents of the file line-by-line, ignoring lines beggining with //, +// parsing the hex data in all other lines +func ScanHexFile(filePath string) <-chan []byte { + c := make(chan []byte) + + spawn("ScanHexFile", func() { + lineNum := 1 + for lineHex := range ScanFile(filePath) { + lineBytes := make([]byte, hex.DecodedLen(len(lineHex))) + _, err := hex.Decode(lineBytes, lineHex) + if err != nil { + panic(errors.Wrapf(err, "error decoding line No. %d with hex %s", lineNum, lineHex)) + } + + c <- lineBytes + + lineNum++ + } + close(c) + }) + + return c +} diff --git a/stability-tests/common/log-writer.go b/stability-tests/common/log-writer.go new file mode 100644 index 0000000..9558283 --- /dev/null +++ b/stability-tests/common/log-writer.go @@ -0,0 +1,29 @@ +package common + +import ( + "strings" + + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// LogWriter writes to the given log with the given log level and prefix +type LogWriter struct { + log *logger.Logger + level logger.Level + prefix string +} + +func (clw LogWriter) Write(p []byte) (n int, err error) { + logWithoutNewLine := strings.TrimSuffix(string(p), "\n") + clw.log.Writef(clw.level, "%s: %s", clw.prefix, logWithoutNewLine) + return len(p), nil +} + +// NewLogWriter returns a new LogWriter that forwards to `log` all data written to it using at `level` level +func NewLogWriter(log *logger.Logger, level logger.Level, prefix string) LogWriter { + return LogWriter{ + log: log, + level: level, + prefix: prefix, + } +} diff --git a/stability-tests/common/log.go b/stability-tests/common/log.go new file mode 100644 index 0000000..76fdebd --- /dev/null +++ b/stability-tests/common/log.go @@ -0,0 +1,72 @@ +package common + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common/mine" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/panics" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log *logger.Logger +var spawn func(name string, spawnedFunction func()) + +const logSubsytem = "STCM" + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + backend := logger.NewBackend() + log = backend.Logger(logSubsytem) + log.SetLevel(logger.LevelOff) + spawn = panics.GoroutineWrapperFunc(log) + logger.SetLogLevels(logger.LevelOff) + logger.InitLogStdout(logger.LevelInfo) +} + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(backend *logger.Backend, level logger.Level) { + log = backend.Logger(logSubsytem) + log.SetLevel(level) + spawn = panics.GoroutineWrapperFunc(log) + + mine.UseLogger(backend, level) + rpc.UseLogger(backend, level) + logger.SetLogLevels(level) +} + +// InitBackend initializes the test log backend +func InitBackend(backendLog *logger.Backend, logFile, errLogFile string) { + err := backendLog.AddLogFile(logFile, logger.LevelTrace) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %+v\n", logFile, logger.LevelTrace, err) + os.Exit(1) + } + err = backendLog.AddLogFile(errLogFile, logger.LevelWarn) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding log file %s as log rotator for level %s: %+v\n", errLogFile, logger.LevelWarn, err) + os.Exit(1) + } + + err = backendLog.AddLogWriter(os.Stdout, logger.LevelDebug) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error adding stdout to the loggerfor level %s: %+v\n", logger.LevelInfo, err) + os.Exit(1) + } + + err = backendLog.Run() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error starting the logger: %s ", err) + os.Exit(1) + } +} diff --git a/stability-tests/common/mine/db.go b/stability-tests/common/mine/db.go new file mode 100644 index 0000000..7e8a8f0 --- /dev/null +++ b/stability-tests/common/mine/db.go @@ -0,0 +1,95 @@ +package mine + +import ( + "path/filepath" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/infrastructure/db/database" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" +) + +const leveldbCacheSizeMiB = 256 + +var blockIDToHashBucket = database.MakeBucket([]byte("id-to-block-hash")) +var lastMinedBlockKey = database.MakeBucket(nil).Key([]byte("last-sent-block")) + +type miningDB struct { + idToBlockHash map[string]*externalapi.DomainHash + hashToBlockID map[externalapi.DomainHash]string + db *ldb.LevelDB +} + +func (mdb *miningDB) hashByID(id string) *externalapi.DomainHash { + return mdb.idToBlockHash[id] +} + +func (mdb *miningDB) putID(id string, hash *externalapi.DomainHash) error { + mdb.idToBlockHash[id] = hash + mdb.hashToBlockID[*hash] = id + return mdb.db.Put(blockIDToHashBucket.Key([]byte(id)), hash.ByteSlice()) +} + +func (mdb *miningDB) updateLastMinedBlock(id string) error { + return mdb.db.Put(lastMinedBlockKey, []byte(id)) +} + +func (mdb *miningDB) lastMinedBlock() (string, error) { + has, err := mdb.db.Has(lastMinedBlockKey) + if err != nil { + return "", err + } + + if !has { + return "0", nil + } + + blockID, err := mdb.db.Get(lastMinedBlockKey) + if err != nil { + return "", err + } + + return string(blockID), nil +} + +func newMiningDB(dataDir string) (*miningDB, error) { + idToBlockHash := make(map[string]*externalapi.DomainHash) + hashToBlockID := make(map[externalapi.DomainHash]string) + + dbPath := filepath.Join(dataDir, "minedb") + db, err := ldb.NewLevelDB(dbPath, leveldbCacheSizeMiB) + if err != nil { + return nil, err + } + + cursor, err := db.Cursor(blockIDToHashBucket) + if err != nil { + return nil, err + } + + for cursor.Next() { + key, err := cursor.Key() + if err != nil { + return nil, err + } + + value, err := cursor.Value() + if err != nil { + return nil, err + } + + hash, err := externalapi.NewDomainHashFromByteSlice(value) + if err != nil { + return nil, err + } + + id := string(key.Suffix()) + idToBlockHash[id] = hash + hashToBlockID[*hash] = id + } + + return &miningDB{ + idToBlockHash: idToBlockHash, + hashToBlockID: hashToBlockID, + db: db, + }, nil +} diff --git a/stability-tests/common/mine/log.go b/stability-tests/common/mine/log.go new file mode 100644 index 0000000..042625e --- /dev/null +++ b/stability-tests/common/mine/log.go @@ -0,0 +1,35 @@ +package mine + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log *logger.Logger +var spawn func(name string, spawnedFunction func()) + +const logSubsytem = "MFJS" + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + backend := logger.NewBackend() + log = backend.Logger(logSubsytem) + log.SetLevel(logger.LevelOff) + spawn = panics.GoroutineWrapperFunc(log) +} + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(backend *logger.Backend, level logger.Level) { + log = backend.Logger(logSubsytem) + log.SetLevel(level) + spawn = panics.GoroutineWrapperFunc(log) +} diff --git a/stability-tests/common/mine/mine.go b/stability-tests/common/mine/mine.go new file mode 100644 index 0000000..ed6604d --- /dev/null +++ b/stability-tests/common/mine/mine.go @@ -0,0 +1,158 @@ +package mine + +import ( + "math/rand" + "path/filepath" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/mining" + "github.com/spectre-project/spectred/stability-tests/common/rpc" +) + +// FromFile mines all blocks as described by `jsonFile` +func FromFile(jsonFile string, consensusConfig *consensus.Config, rpcClient *rpc.Client, dataDir string) error { + log.Infof("Mining blocks from JSON file %s from data directory %s", jsonFile, dataDir) + blockChan, err := readBlocks(jsonFile) + if err != nil { + return err + } + + return mineBlocks(consensusConfig, rpcClient, blockChan, dataDir) +} + +func mineBlocks(consensusConfig *consensus.Config, rpcClient *rpc.Client, blockChan <-chan JSONBlock, dataDir string) error { + mdb, err := newMiningDB(dataDir) + if err != nil { + return err + } + + dbPath := filepath.Join(dataDir, "db") + factory := consensus.NewFactory() + factory.SetTestDataDir(dbPath) + testConsensus, tearDownFunc, err := factory.NewTestConsensus(consensusConfig, "minejson") + if err != nil { + return err + } + defer tearDownFunc(true) + + info, err := testConsensus.GetSyncInfo() + if err != nil { + return err + } + + log.Infof("Starting with data directory with %d headers and %d blocks", info.HeaderCount, info.BlockCount) + + err = mdb.putID("0", consensusConfig.GenesisHash) + if err != nil { + return err + } + + totalBlocksSubmitted := 0 + lastLogTime := time.Now() + rpcWaitInInterval := 0 * time.Second + for blockData := range blockChan { + if hash := mdb.hashByID(blockData.ID); hash != nil { + _, err := rpcClient.GetBlock(hash.String(), false) + if err == nil { + continue + } + + if !strings.Contains(err.Error(), "not found") { + return err + } + } + + block, err := mineOrFetchBlock(blockData, mdb, testConsensus) + if err != nil { + return err + } + + beforeSubmitBlockTime := time.Now() + rejectReason, err := rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + return errors.Wrap(err, "error in SubmitBlock") + } + if rejectReason != appmessage.RejectReasonNone { + return errors.Errorf("block rejected in SubmitBlock") + } + rpcWaitInInterval += time.Since(beforeSubmitBlockTime) + + totalBlocksSubmitted++ + const logInterval = 1000 + if totalBlocksSubmitted%logInterval == 0 { + intervalDuration := time.Since(lastLogTime) + blocksPerSecond := logInterval / intervalDuration.Seconds() + log.Infof("It took %s to submit %d blocks (%f blocks/sec) while %s of it it waited for RPC response"+ + " (total blocks sent %d)", intervalDuration, logInterval, blocksPerSecond, rpcWaitInInterval, + totalBlocksSubmitted) + rpcWaitInInterval = 0 + lastLogTime = time.Now() + } + + blockHash := consensushashing.BlockHash(block) + log.Tracef("Submitted block %s with hash %s", blockData.ID, blockHash) + } + return nil +} + +func mineOrFetchBlock(blockData JSONBlock, mdb *miningDB, testConsensus testapi.TestConsensus) (*externalapi.DomainBlock, error) { + hash := mdb.hashByID(blockData.ID) + if mdb.hashByID(blockData.ID) != nil { + block, found, err := testConsensus.GetBlock(hash) + if err != nil { + return nil, err + } + + if !found { + return nil, errors.Errorf("block %s is missing", hash) + } + + return block, nil + } + + parentHashes := make([]*externalapi.DomainHash, len(blockData.Parents)) + for i, parentID := range blockData.Parents { + parentHashes[i] = mdb.hashByID(parentID) + } + block, _, err := testConsensus.BuildBlockWithParents(parentHashes, + &externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}}, []*externalapi.DomainTransaction{}) + if err != nil { + return nil, errors.Wrap(err, "error in BuildBlockWithParents") + } + + if !testConsensus.DAGParams().SkipProofOfWork { + SolveBlock(block) + } + + err = testConsensus.ValidateAndInsertBlock(block, true) + if err != nil { + return nil, errors.Wrap(err, "error in ValidateAndInsertBlock") + } + + blockHash := consensushashing.BlockHash(block) + err = mdb.putID(blockData.ID, blockHash) + if err != nil { + return nil, err + } + + err = mdb.updateLastMinedBlock(blockData.ID) + if err != nil { + return nil, err + } + + return block, nil +} + +var random = rand.New(rand.NewSource(time.Now().UnixNano())) + +// SolveBlock increments the given block's nonce until it matches the difficulty requirements in its bits field +func SolveBlock(block *externalapi.DomainBlock) { + mining.SolveBlock(block, random) +} diff --git a/stability-tests/common/mine/read.go b/stability-tests/common/mine/read.go new file mode 100644 index 0000000..c3cf242 --- /dev/null +++ b/stability-tests/common/mine/read.go @@ -0,0 +1,58 @@ +package mine + +import ( + "compress/gzip" + "encoding/json" + "os" +) + +// JSONBlock is a json representation of a block in mine format +type JSONBlock struct { + ID string `json:"id"` + Parents []string `json:"parents"` +} + +func readBlocks(jsonFile string) (<-chan JSONBlock, error) { + f, err := os.Open(jsonFile) + if err != nil { + return nil, err + } + + gzipReader, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + defer gzipReader.Close() + + decoder := json.NewDecoder(gzipReader) + + blockChan := make(chan JSONBlock) + spawn("mineFromJson.readBlocks", func() { + // read open bracket + _, err := decoder.Token() + if err != nil { + panic(err) + } + + // while the array contains values + for decoder.More() { + var block JSONBlock + // decode an array value (Message) + err := decoder.Decode(&block) + if err != nil { + panic(err) + } + + blockChan <- block + } + + // read closing bracket + _, err = decoder.Token() + if err != nil { + panic(err) + } + + close(blockChan) + }) + return blockChan, nil +} diff --git a/stability-tests/common/paths.go b/stability-tests/common/paths.go new file mode 100644 index 0000000..282483f --- /dev/null +++ b/stability-tests/common/paths.go @@ -0,0 +1,6 @@ +package common + +import "github.com/spectre-project/spectred/util" + +// DefaultAppDir is the default app directory to be used by all tests +var DefaultAppDir = util.AppDir("stability-tests", false) diff --git a/stability-tests/common/rpc/log.go b/stability-tests/common/rpc/log.go new file mode 100644 index 0000000..ad66f41 --- /dev/null +++ b/stability-tests/common/rpc/log.go @@ -0,0 +1,35 @@ +package rpc + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log *logger.Logger +var spawn func(name string, spawnedFunction func()) + +const logSubsytem = "CRPC" + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + backend := logger.NewBackend() + log = backend.Logger(logSubsytem) + log.SetLevel(logger.LevelOff) + spawn = panics.GoroutineWrapperFunc(log) +} + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(backend *logger.Backend, level logger.Level) { + log = backend.Logger(logSubsytem) + log.SetLevel(level) + spawn = panics.GoroutineWrapperFunc(log) +} diff --git a/stability-tests/common/rpc/rpc.go b/stability-tests/common/rpc/rpc.go new file mode 100644 index 0000000..12e030b --- /dev/null +++ b/stability-tests/common/rpc/rpc.go @@ -0,0 +1,61 @@ +package rpc + +import ( + "time" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" + + "github.com/spectre-project/spectred/domain/dagconfig" +) + +const defaultRPCServer = "localhost" + +// Config are configurations common to all tests that need to connect to json-rpc +type Config struct { + RPCServer string `short:"s" long:"rpcserver" description:"RPC server to connect to"` +} + +// ValidateRPCConfig makes sure that provided Config is valid or returns an error otherwise +func ValidateRPCConfig(config *Config) error { + if config.RPCServer == "" { + config.RPCServer = defaultRPCServer + } + return nil +} + +// Client wraps rpcclient.RPCClient with extra functionality needed for stability-tests +type Client struct { + *rpcclient.RPCClient + OnBlockAdded chan struct{} +} + +// ConnectToRPC connects to JSON-RPC server specified in the provided config +func ConnectToRPC(config *Config, dagParams *dagconfig.Params) (*Client, error) { + rpcAddress, err := dagParams.NormalizeRPCServerAddress(config.RPCServer) + if err != nil { + return nil, err + } + rpcClient, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + return nil, err + } + rpcClient.SetTimeout(time.Second * 120) + rpcClient.SetOnErrorHandler(func(err error) { + log.Errorf("Error from Client: %+v", err) + }) + + client := &Client{ + RPCClient: rpcClient, + OnBlockAdded: make(chan struct{}), + } + + return client, nil +} + +// RegisterForBlockAddedNotifications registers for block added notifications pushed by the node +func (c *Client) RegisterForBlockAddedNotifications() error { + return c.RPCClient.RegisterForBlockAddedNotifications(func(_ *appmessage.BlockAddedNotificationMessage) { + c.OnBlockAdded <- struct{}{} + }) +} diff --git a/stability-tests/common/run-spectred.go b/stability-tests/common/run-spectred.go new file mode 100644 index 0000000..d1c99de --- /dev/null +++ b/stability-tests/common/run-spectred.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "os" + "sync/atomic" + "syscall" + "testing" + + "github.com/spectre-project/spectred/domain/dagconfig" +) + +// RunSpectredForTesting runs spectred for testing purposes +func RunSpectredForTesting(t *testing.T, testName string, rpcAddress string) func() { + appDir, err := TempDir(testName) + if err != nil { + t.Fatalf("TempDir: %s", err) + } + + spectredRunCommand, err := StartCmd("SPECTRED", + "spectred", + NetworkCliArgumentFromNetParams(&dagconfig.DevnetParams), + "--appdir", appDir, + "--rpclisten", rpcAddress, + "--loglevel", "debug", + ) + if err != nil { + t.Fatalf("StartCmd: %s", err) + } + t.Logf("Spectred started with --appdir=%s", appDir) + + isShutdown := uint64(0) + go func() { + err := spectredRunCommand.Wait() + if err != nil { + if atomic.LoadUint64(&isShutdown) == 0 { + panic(fmt.Sprintf("Spectred closed unexpectedly: %s. See logs at: %s", err, appDir)) + } + } + }() + + return func() { + err := spectredRunCommand.Process.Signal(syscall.SIGTERM) + if err != nil { + t.Fatalf("Signal: %s", err) + } + err = os.RemoveAll(appDir) + if err != nil { + t.Fatalf("RemoveAll: %s", err) + } + atomic.StoreUint64(&isShutdown, 1) + t.Logf("Spectred stopped") + } +} diff --git a/stability-tests/common/tempdir.go b/stability-tests/common/tempdir.go new file mode 100644 index 0000000..50266dd --- /dev/null +++ b/stability-tests/common/tempdir.go @@ -0,0 +1,9 @@ +package common + +import "io/ioutil" + +// TempDir returns a temporary directory with the given pattern, prefixed with STABILITY_TEMP_DIR_ +func TempDir(pattern string) (string, error) { + const prefix = "STABILITY_TEMP_DIR_" + return ioutil.TempDir("", prefix+pattern) +} diff --git a/stability-tests/daa/README.md b/stability-tests/daa/README.md new file mode 100644 index 0000000..a3d57d3 --- /dev/null +++ b/stability-tests/daa/README.md @@ -0,0 +1,10 @@ +# DAA Tool + +This tool simulates various hashrate patterns to stress-test the DAA +algorithm. + +## Running + +1. `go install` spectred and daa. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/daa/average_duration.go b/stability-tests/daa/average_duration.go new file mode 100644 index 0000000..b10c767 --- /dev/null +++ b/stability-tests/daa/average_duration.go @@ -0,0 +1,37 @@ +package daa + +import "time" + +type averageDuration struct { + average float64 + count uint64 + sampleSize uint64 +} + +func newAverageDuration(sampleSize uint64) *averageDuration { + return &averageDuration{ + average: 0, + count: 0, + sampleSize: sampleSize, + } +} + +func (ad *averageDuration) add(duration time.Duration) { + durationNanoseconds := float64(duration.Nanoseconds()) + + ad.count++ + if ad.count > ad.sampleSize { + ad.count = ad.sampleSize + } + + if ad.count == 1 { + ad.average = durationNanoseconds + return + } + + ad.average = ad.average + ((durationNanoseconds - ad.average) / float64(ad.count)) +} + +func (ad *averageDuration) toDuration() time.Duration { + return time.Duration(ad.average) +} diff --git a/stability-tests/daa/daa_test.go b/stability-tests/daa/daa_test.go new file mode 100644 index 0000000..15f7ed5 --- /dev/null +++ b/stability-tests/daa/daa_test.go @@ -0,0 +1,320 @@ +package daa + +import ( + "math" + "math/rand" + "os" + "testing" + "time" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/pow" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const rpcAddress = "localhost:9000" +const miningAddress = "spectredev:qrcqat6l9zcjsu7swnaztqzrv0s7hu04skpaezxk43y4etj8ncwfk9keeuf4y" +const blockRateDeviationThreshold = 0.5 +const averageBlockRateSampleSize = 60 +const averageHashRateSampleSize = 100_000 + +func TestDAA(t *testing.T) { + if os.Getenv("RUN_STABILITY_TESTS") == "" { + t.Skip() + } + + machineHashNanoseconds := measureMachineHashNanoseconds(t) + t.Logf("Machine hashes per second: %d", hashNanosecondsToHashesPerSecond(machineHashNanoseconds)) + + tests := []struct { + name string + runDuration time.Duration + + // targetHashNanosecondsFunction receives the duration of time between now and the start + // of the run (moments before the first hash has been calculated). It returns the target + // duration of a single hash operation in nanoseconds (greater return value = lower hash rate) + targetHashNanosecondsFunction func(totalElapsedDuration time.Duration) int64 + }{ + { + name: "constant hash rate", + runDuration: 10 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + return machineHashNanoseconds * 2 + }, + }, + { + name: "sudden hash rate drop", + runDuration: 45 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if totalElapsedDuration < 5*time.Minute { + return machineHashNanoseconds * 2 + } + return machineHashNanoseconds * 10 + }, + }, + { + name: "sudden hash rate jump", + runDuration: 15 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if totalElapsedDuration < 5*time.Minute { + return machineHashNanoseconds * 10 + } + return machineHashNanoseconds * 2 + }, + }, + { + name: "hash rate peak", + runDuration: 10 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if totalElapsedDuration > 4*time.Minute && totalElapsedDuration < 5*time.Minute { + return machineHashNanoseconds * 2 + } + return machineHashNanoseconds * 10 + }, + }, + { + name: "hash rate valley", + runDuration: 10 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if totalElapsedDuration > 4*time.Minute && totalElapsedDuration < 5*time.Minute { + return machineHashNanoseconds * 10 + } + return machineHashNanoseconds * 2 + }, + }, + { + name: "periodic hash rate peaks", + runDuration: 10 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if int(totalElapsedDuration.Seconds())%30 == 0 { + return machineHashNanoseconds * 2 + } + return machineHashNanoseconds * 10 + }, + }, + { + name: "periodic hash rate valleys", + runDuration: 10 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + if int(totalElapsedDuration.Seconds())%30 == 0 { + return machineHashNanoseconds * 10 + } + return machineHashNanoseconds * 2 + }, + }, + { + name: "constant exponential hash rate increase", + runDuration: 15 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + fromHashNanoseconds := machineHashNanoseconds * 10 + toHashNanoseconds := machineHashNanoseconds * 2 + + if totalElapsedDuration < 10*time.Minute { + exponentialIncreaseDuration := 10 * time.Minute + timeElapsedFraction := float64(totalElapsedDuration.Nanoseconds()) / float64(exponentialIncreaseDuration.Nanoseconds()) + + return fromHashNanoseconds - + int64(math.Pow(float64(fromHashNanoseconds-toHashNanoseconds), timeElapsedFraction)) + } + + // 5 minute cooldown. We expect the DAA to still be "catching up" at the end + // of the exponential increase so, for the sake of testing, we wait a while for + // the hash rate to stabilize + + return toHashNanoseconds + }, + }, + { + name: "constant exponential hash rate decrease", + runDuration: 45 * time.Minute, + targetHashNanosecondsFunction: func(totalElapsedDuration time.Duration) int64 { + fromHashNanoseconds := machineHashNanoseconds * 2 + toHashNanoseconds := machineHashNanoseconds * 10 + + if totalElapsedDuration < 10*time.Minute { + exponentialDecreaseDuration := 10 * time.Minute + timeElapsedFraction := float64(totalElapsedDuration.Nanoseconds()) / float64(exponentialDecreaseDuration.Nanoseconds()) + + return fromHashNanoseconds + + int64(math.Pow(float64(toHashNanoseconds-fromHashNanoseconds), timeElapsedFraction)) + } + + // 5 minute cooldown. We expect the DAA to still be "catching up" at the end + // of the exponential decrease so, for the sake of testing, we wait a while for + // the hash rate to stabilize + + return toHashNanoseconds + }, + }, + } + + for _, test := range tests { + runDAATest(t, test.name, test.runDuration, test.targetHashNanosecondsFunction) + } +} + +func measureMachineHashNanoseconds(t *testing.T) int64 { + t.Logf("Measuring machine hash rate") + defer t.Logf("Finished measuring machine hash rate") + + genesisBlock := dagconfig.DevnetParams.GenesisBlock + state := pow.NewState(genesisBlock.Header.ToMutable()) + + machineHashesPerSecondMeasurementDuration := 10 * time.Second + hashes := int64(0) + state.Nonce = rand.Uint64() + loopForDuration(machineHashesPerSecondMeasurementDuration, func(isFinished *bool) { + state.CheckProofOfWork() + hashes++ + state.IncrementNonce() + }) + + return machineHashesPerSecondMeasurementDuration.Nanoseconds() / hashes +} + +func runDAATest(t *testing.T, testName string, runDuration time.Duration, + targetHashNanosecondsFunction func(totalElapsedDuration time.Duration) int64) { + + t.Logf("DAA TEST STARTED: %s", testName) + defer t.Logf("DAA TEST FINISHED: %s", testName) + + tearDownSpectred := common.RunSpectredForTesting(t, "spectred-daa-test", rpcAddress) + defer tearDownSpectred() + + rpcClient, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + t.Fatalf("NewRPCClient: %s", err) + } + + // These variables are for gathering stats. Useful mostly for debugging + averageHashDuration := newAverageDuration(averageHashRateSampleSize) + averageMiningDuration := newAverageDuration(averageBlockRateSampleSize) + previousDifficulty := float64(0) + blocksMined := 0 + + // Mine blocks the same way a CPU miner mines blocks until `runDuration` elapses + startTime := time.Now() + loopForDuration(runDuration, func(isFinished *bool) { + templateBlock := fetchBlockForMining(t, rpcClient) + headerForMining := templateBlock.Header.ToMutable() + minerState := pow.NewState(headerForMining) + + // Try hashes until we find a valid block + miningStartTime := time.Now() + minerState.Nonce = rand.Uint64() + for { + hashStartTime := time.Now() + + if minerState.CheckProofOfWork() { + headerForMining.SetNonce(minerState.Nonce) + templateBlock.Header = headerForMining.ToImmutable() + break + } + + // Throttle the hash rate by waiting until the target hash duration elapses + waitUntilTargetHashDurationHadElapsed(startTime, hashStartTime, targetHashNanosecondsFunction) + + // Collect stats about hash rate + hashDuration := time.Since(hashStartTime) + averageHashDuration.add(hashDuration) + + // Exit early if the test is finished + if *isFinished { + return + } + minerState.IncrementNonce() + } + + // Collect stats about block rate + miningDuration := time.Since(miningStartTime) + averageMiningDuration.add(miningDuration) + + logMinedBlockStatsAndUpdateStatFields(t, rpcClient, averageMiningDuration, averageHashDuration, startTime, + miningDuration, &previousDifficulty, &blocksMined) + + // Exit early if the test is finished + if *isFinished { + return + } + + submitMinedBlock(t, rpcClient, templateBlock) + }) + + averageMiningDurationInSeconds := averageMiningDuration.toDuration().Seconds() + expectedAverageMiningDurationInSeconds := float64(1) + deviation := math.Abs(expectedAverageMiningDurationInSeconds - averageMiningDurationInSeconds) + if deviation > blockRateDeviationThreshold { + t.Errorf("Block rate deviation %f is higher than threshold %f. Want: %f, got: %f", + deviation, blockRateDeviationThreshold, expectedAverageMiningDurationInSeconds, averageMiningDurationInSeconds) + } +} + +func fetchBlockForMining(t *testing.T, rpcClient *rpcclient.RPCClient) *externalapi.DomainBlock { + getBlockTemplateResponse, err := rpcClient.GetBlockTemplate(miningAddress, "") + if err != nil { + t.Fatalf("GetBlockTemplate: %s", err) + } + templateBlock, err := appmessage.RPCBlockToDomainBlock(getBlockTemplateResponse.Block) + if err != nil { + t.Fatalf("RPCBlockToDomainBlock: %s", err) + } + return templateBlock +} + +func waitUntilTargetHashDurationHadElapsed(startTime time.Time, hashStartTime time.Time, + targetHashNanosecondsFunction func(totalElapsedDuration time.Duration) int64) { + + // Yielding a thread in Go takes up to a few milliseconds whereas hashing once + // takes a few hundred nanoseconds, so we spin in place instead of e.g. calling time.Sleep() + for { + targetHashNanoseconds := targetHashNanosecondsFunction(time.Since(startTime)) + hashElapsedDurationNanoseconds := time.Since(hashStartTime).Nanoseconds() + if hashElapsedDurationNanoseconds >= targetHashNanoseconds { + break + } + } +} + +func logMinedBlockStatsAndUpdateStatFields(t *testing.T, rpcClient *rpcclient.RPCClient, + averageMiningDuration *averageDuration, averageHashDurations *averageDuration, + startTime time.Time, miningDuration time.Duration, previousDifficulty *float64, blocksMined *int) { + + averageMiningDurationAsDuration := averageMiningDuration.toDuration() + averageHashNanoseconds := averageHashDurations.toDuration().Nanoseconds() + averageHashesPerSecond := hashNanosecondsToHashesPerSecond(averageHashNanoseconds) + blockDAGInfoResponse, err := rpcClient.GetBlockDAGInfo() + if err != nil { + t.Fatalf("GetBlockDAGInfo: %s", err) + } + difficultyDelta := blockDAGInfoResponse.Difficulty - *previousDifficulty + *previousDifficulty = blockDAGInfoResponse.Difficulty + *blocksMined++ + t.Logf("Mined block. Took: %s, average block mining duration: %s, "+ + "average hashes per second: %d, difficulty delta: %f, time elapsed: %s, blocks mined: %d", + miningDuration, averageMiningDurationAsDuration, averageHashesPerSecond, difficultyDelta, time.Since(startTime), *blocksMined) +} + +func submitMinedBlock(t *testing.T, rpcClient *rpcclient.RPCClient, block *externalapi.DomainBlock) { + _, err := rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + t.Fatalf("SubmitBlock: %s", err) + } +} + +func hashNanosecondsToHashesPerSecond(hashNanoseconds int64) int64 { + return time.Second.Nanoseconds() / hashNanoseconds +} + +func loopForDuration(duration time.Duration, runFunction func(isFinished *bool)) { + isFinished := false + go func() { + for !isFinished { + runFunction(&isFinished) + } + }() + time.Sleep(duration) + isFinished = true +} diff --git a/stability-tests/daa/run/run.sh b/stability-tests/daa/run/run.sh new file mode 100755 index 0000000..5416024 --- /dev/null +++ b/stability-tests/daa/run/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +RUN_STABILITY_TESTS=true go test ../ -v -timeout 86400s +TEST_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "daa test: PASSED" + exit 0 +fi +echo "daa test: FAILED" +exit 1 diff --git a/stability-tests/docker/Dockerfile b/stability-tests/docker/Dockerfile new file mode 100644 index 0000000..f82a0bb --- /dev/null +++ b/stability-tests/docker/Dockerfile @@ -0,0 +1,30 @@ +ARG SPECTRED_IMAGE +ARG SPECTREMINER_IMAGE + +FROM ${SPECTRED_IMAGE} as spectred +FROM ${SPECTREMINER_IMAGE} as spectreminer + +FROM golang:1.19-alpine + +RUN mkdir -p /go/src/github.com/spectre-project/spectred/ + +WORKDIR /go/src/github.com/spectre-project/spectred/ + +RUN apk add bash build-base git + +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +COPY --from=spectred /app/ /app/ +COPY --from=spectreminer /app/ /app/ +ENV PATH="/app:${PATH}" + +WORKDIR /go/src/github.com/spectre-project/spectred/stability-tests + +RUN go install ./... + +ENTRYPOINT ["./run/run.sh"] diff --git a/stability-tests/infra-level-garbage/README.md b/stability-tests/infra-level-garbage/README.md new file mode 100644 index 0000000..ec4847a --- /dev/null +++ b/stability-tests/infra-level-garbage/README.md @@ -0,0 +1,10 @@ +# Infra Level Garbage Generator + +This tool sends invalid messages to a node, making sure the node does +not crash as a result. + +## Running + +1. `go install` spectred and infra-level-garbage. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/infra-level-garbage/config.go b/stability-tests/infra-level-garbage/config.go new file mode 100644 index 0000000..4401314 --- /dev/null +++ b/stability-tests/infra-level-garbage/config.go @@ -0,0 +1,60 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "infra_level_garbage.log" + defaultErrLogFilename = "infra_level_garbage_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"` + MessagesFilePath string `long:"messages" short:"m" description:"path of file containing malformed messages"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + + _, err := parser.Parse() + + if err != nil { + if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp { + os.Exit(0) + } + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/infra-level-garbage/log.go b/stability-tests/infra-level-garbage/log.go new file mode 100644 index 0000000..3fadda8 --- /dev/null +++ b/stability-tests/infra-level-garbage/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("IFLG") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/infra-level-garbage/main.go b/stability-tests/infra-level-garbage/main.go new file mode 100644 index 0000000..3fb852c --- /dev/null +++ b/stability-tests/infra-level-garbage/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/profiling" +) + +const timeout = 5 * time.Second + +func main() { + err := parseConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err) + os.Exit(1) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + messagesChan := common.ScanHexFile(cfg.MessagesFilePath) + + err = sendMessages(cfg.NodeP2PAddress, messagesChan) + if err != nil { + log.Errorf("Error sending messages: %+v", err) + backendLog.Close() + os.Exit(1) + } +} diff --git a/stability-tests/infra-level-garbage/run/messages.dat b/stability-tests/infra-level-garbage/run/messages.dat new file mode 100644 index 0000000..99524f9 --- /dev/null +++ b/stability-tests/infra-level-garbage/run/messages.dat @@ -0,0 +1,6 @@ +// Some block message without proper headers +0100000000b01c3b9e0d9ac0800a08425002a3eadbedc8d0ad3503d80e113c7bb2b520e584000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001cd3155e00000000ffff7f2003000000000000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0e00000b2f503253482f627463642fffffffffffffffff000000000000000000010000000000000000000000000000000000000000000000000000008948d3239cf9882b63c7330fa364f2db39735f2ba8d57b5c3168c963375ce7412417a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b876b617370612d73696d6e6574 +// Yet another block message without proper headers +00000010011269cc4545745bf9d54e43564f1bdf3109b776aa2a3335c9a180e092bbaecd4989d3c593eaefeae6a6565d23fbf690baa3ca6d57879b891ff9b4a9e100f325d1f51149f1ce2eb20d5756cc018fa8fb33c30c5b22bee6e9f1115d91e664c21cac00000000000000000000000000000000000000000000000000000000000000001dd3155e00000000ffff7f200100000000000000010100000001ff69cc4545745bf9d54e43564f1bdf3109b776aa2a3335c9a180e092bbaecd49ffffffff00ffffffffffffffff0100f2052a0100000017a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b870000000000000000010000000000000000000000000000000000000000000000000000009d41187eeda4734f163dbcea2049cdbfcac7818ce24f81f1307c7fb67b9057e22817a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b8701000000000000002f6b61737061642f +// Something silly +4352ab48e65214869465465798713187437834576457854798436757946539738469873456983456983457684567 diff --git a/stability-tests/infra-level-garbage/run/run.sh b/stability-tests/infra-level-garbage/run/run.sh new file mode 100755 index 0000000..eee4a29 --- /dev/null +++ b/stability-tests/infra-level-garbage/run/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash +rm -rf /tmp/spectred-temp + +spectred --devnet --appdir=/tmp/spectred-temp --profile=6061 & +SPECTRED_PID=$! + +sleep 1 + +infra-level-garbage --devnet -alocalhost:18611 -m messages.dat --profile=7000 +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "infra-level-garbage test: PASSED" + exit 0 +fi +echo "infra-level-garbage test: FAILED" +exit 1 diff --git a/stability-tests/infra-level-garbage/send.go b/stability-tests/infra-level-garbage/send.go new file mode 100644 index 0000000..c353e08 --- /dev/null +++ b/stability-tests/infra-level-garbage/send.go @@ -0,0 +1,56 @@ +package main + +import ( + "encoding/hex" + "net" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/protocol/common" +) + +func sendMessages(address string, messagesChan <-chan []byte) error { + connection, err := dialToNode(address) + if err != nil { + return err + } + for message := range messagesChan { + messageHex := make([]byte, hex.EncodedLen(len(message))) + hex.Encode(messageHex, message) + log.Infof("Sending message %s", messageHex) + + err := sendMessage(connection, message) + if err != nil { + // if failed once, we might have been disconnected because of a previous message, + // so re-connect and retry before reporting error + connection, err = dialToNode(address) + if err != nil { + return err + } + err = sendMessage(connection, message) + if err != nil { + return err + } + } + } + + return nil +} + +func sendMessage(connection net.Conn, message []byte) error { + err := connection.SetDeadline(time.Now().Add(common.DefaultTimeout)) + if err != nil { + return errors.Wrap(err, "Error setting connection deadline") + } + + _, err = connection.Write(message) + return err +} + +func dialToNode(address string) (net.Conn, error) { + connection, err := net.Dial("tcp", address) + if err != nil { + return nil, errors.Wrap(err, "Error connecting to node") + } + return connection, nil +} diff --git a/stability-tests/install_and_test.sh b/stability-tests/install_and_test.sh new file mode 100755 index 0000000..7c9ff94 --- /dev/null +++ b/stability-tests/install_and_test.sh @@ -0,0 +1,33 @@ +#!/bin/bash -ex + +# add go binary path. +export PATH="${PATH}:$(go env GOPATH)/bin" + +# go preparation. +go version +go get -v -t -d ../... +go get -v -u golang.org/x/lint/golint +go install -v golang.org/x/lint/golint +go install -v honnef.co/go/tools/cmd/staticcheck@latest + +# list files whose formatting differs. +test -z "$(go fmt ./...)" + +# static analysis. +staticcheck -checks SA4006,SA4008,SA4009,SA4010,SA5003,SA1004,SA1014,SA1021,SA1023,SA1024,SA1025,SA1026,SA1027,SA1028,SA2000,SA2001,SA2003,SA4000,SA4001,SA4003,SA4004,SA4011,SA4012,SA4013,SA4014,SA4015,SA4016,SA4017,SA4018,SA4019,SA4020,SA4021,SA4022,SA4023,SA5000,SA5002,SA5004,SA5005,SA5007,SA5008,SA5009,SA5010,SA5011,SA5012,SA6001,SA6002,SA9001,SA9002,SA9003,SA9004,SA9005,SA9006,ST1019 ./... + +# vet checks. +go vet -composites=false -v ./... + +# look for style mistakes. +golint -set_exit_status ./... + +# install. +go install -v ../... + +# tests. +[ -n "${SLOW}" ] && { + ./run/run.sh slow +} || { + ./run/run.sh +} diff --git a/stability-tests/many-tips/config.go b/stability-tests/many-tips/config.go new file mode 100644 index 0000000..0b03e0f --- /dev/null +++ b/stability-tests/many-tips/config.go @@ -0,0 +1,51 @@ +package main + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const ( + defaultLogFilename = "many_tips.log" + defaultErrLogFilename = "many_tips_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine" required:"false"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/many-tips/log.go b/stability-tests/many-tips/log.go new file mode 100644 index 0000000..8ce022f --- /dev/null +++ b/stability-tests/many-tips/log.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("MATS") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelDebug + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/many-tips/main.go b/stability-tests/many-tips/main.go new file mode 100644 index 0000000..365ad0d --- /dev/null +++ b/stability-tests/many-tips/main.go @@ -0,0 +1,305 @@ +package main + +import ( + "fmt" + "math/rand" + "os" + "os/exec" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/utils/mining" + "github.com/spectre-project/spectred/util" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +const rpcAddress = "localhost:9000" + +func main() { + err := realMain() + if err != nil { + log.Criticalf("An error occurred: %+v", err) + backendLog.Close() + os.Exit(1) + } + backendLog.Close() +} + +func realMain() error { + defer panics.HandlePanic(log, "many-tips-main", nil) + err := parseConfig() + if err != nil { + return errors.Wrap(err, "Error in parseConfig") + } + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + teardown, err := startNode() + if err != nil { + return errors.Wrap(err, "Error in startNode") + } + defer teardown() + + miningAddress, err := generateAddress() + if err != nil { + return errors.Wrap(err, "Failed generate a mining address") + } + rpcClient, err := rpc.ConnectToRPC(&rpc.Config{ + RPCServer: rpcAddress, + }, activeConfig().NetParams()) + if err != nil { + return errors.Wrap(err, "Error connecting to RPC server") + } + defer rpcClient.Disconnect() + + // Mine block that its timestamp is one second after the genesis timestamp. + blockTemplate, err := rpcClient.GetBlockTemplate(miningAddress.EncodeAddress(), "") + if err != nil { + return err + } + block, err := appmessage.RPCBlockToDomainBlock(blockTemplate.Block) + if err != nil { + return err + } + mutableHeader := block.Header.ToMutable() + genesisTimestamp := activeConfig().NetParams().GenesisBlock.Header.TimeInMilliseconds() + mutableHeader.SetTimeInMilliseconds(genesisTimestamp + 1000) + block.Header = mutableHeader.ToImmutable() + mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano()))) + _, err = rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + return err + } + // mine block at the current time + err = mineBlock(rpcClient, miningAddress) + if err != nil { + return errors.Wrap(err, "Error in mineBlock") + } + // Mine on top of it 10k tips. + numOfTips := 10000 + err = mineTips(numOfTips, miningAddress, rpcClient) + if err != nil { + return errors.Wrap(err, "Error in mineTips") + } + // Mines until the DAG will have only one tip. + err = mineLoopUntilHavingOnlyOneTipInDAG(rpcClient, miningAddress) + if err != nil { + return errors.Wrap(err, "Error in mineLoop") + } + return nil +} + +func startNode() (teardown func(), err error) { + log.Infof("Starting node") + dataDir, err := common.TempDir("spectred-datadir") + if err != nil { + panic(errors.Wrapf(err, "Error in Tempdir")) + } + log.Infof("spectred datadir: %s", dataDir) + + spectredCmd, err := common.StartCmd("SPECTRED", + "spectred", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "--appdir", dataDir, + "--logdir", dataDir, + "--rpclisten", rpcAddress, + "--loglevel", "debug", + "--allow-submit-block-when-not-synced", + ) + if err != nil { + return nil, err + } + shutdown := uint64(0) + + processesStoppedWg := sync.WaitGroup{} + processesStoppedWg.Add(1) + spawn("startNode-spectredCmd.Wait", func() { + err := spectredCmd.Wait() + if err != nil { + if atomic.LoadUint64(&shutdown) == 0 { + panics.Exit(log, fmt.Sprintf("spectredCmd closed unexpectedly: %s. See logs at: %s", err, dataDir)) + } + if !strings.Contains(err.Error(), "signal: killed") { + // TODO: Panic here and check why sometimes spectred closes ungracefully + log.Errorf("spectredCmd closed with an error: %s. See logs at: %s", err, dataDir) + } + } + processesStoppedWg.Done() + }) + return func() { + log.Infof("defer start-node") + atomic.StoreUint64(&shutdown, 1) + killWithSigterm(spectredCmd, "spectredCmd") + + processesStoppedChan := make(chan struct{}) + spawn("startNode-processStoppedWg.Wait", func() { + processesStoppedWg.Wait() + processesStoppedChan <- struct{}{} + }) + + const timeout = 10 * time.Second + select { + case <-processesStoppedChan: + case <-time.After(timeout): + panics.Exit(log, fmt.Sprintf("Processes couldn't be closed after %s", timeout)) + } + }, nil +} + +func generateAddress() (util.Address, error) { + privateKey, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + return nil, err + } + pubKey, err := privateKey.SchnorrPublicKey() + if err != nil { + return nil, err + } + pubKeySerialized, err := pubKey.Serialize() + if err != nil { + return nil, err + } + return util.NewAddressPublicKey(pubKeySerialized[:], activeConfig().ActiveNetParams.Prefix) +} + +func mineBlock(rpcClient *rpc.Client, miningAddress util.Address) error { + blockTemplate, err := rpcClient.GetBlockTemplate(miningAddress.EncodeAddress(), "") + if err != nil { + return err + } + block, err := appmessage.RPCBlockToDomainBlock(blockTemplate.Block) + if err != nil { + return err + } + mining.SolveBlock(block, rand.New(rand.NewSource(time.Now().UnixNano()))) + _, err = rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + return err + } + return nil +} + +func mineTips(numOfTips int, miningAddress util.Address, rpcClient *rpc.Client) error { + blockTemplate, err := rpcClient.GetBlockTemplate(miningAddress.EncodeAddress(), "") + if err != nil { + return err + } + block, err := appmessage.RPCBlockToDomainBlock(blockTemplate.Block) + if err != nil { + return err + } + rd := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < numOfTips; i++ { + mining.SolveBlock(block, rd) + _, err = rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + return err + } + if (i%1000 == 0) && (i != 0) { + log.Infof("Mined %d blocks.", i) + } + } + dagInfo, err := rpcClient.GetBlockDAGInfo() + if err != nil { + return err + } + log.Infof("There are %d tips in the DAG", len(dagInfo.TipHashes)) + return nil +} + +// Checks how many blocks were mined and how long it took to get only one tip in the DAG (after having 10k tips in the DAG). +func mineLoopUntilHavingOnlyOneTipInDAG(rpcClient *rpc.Client, miningAddress util.Address) error { + dagInfo, err := rpcClient.GetBlockDAGInfo() + if err != nil { + return errors.Wrapf(err, "error in GetBlockDAGInfo") + } + numOfBlocksBeforeMining := dagInfo.BlockCount + + spectreminerCmd, err := common.StartCmd("MINER", + "spectreminer", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "-s", rpcAddress, + "--mine-when-not-synced", + "--miningaddr", miningAddress.EncodeAddress(), + "--target-blocks-per-second=0", + ) + if err != nil { + return err + } + startMiningTime := time.Now() + shutdown := uint64(0) + + spawn("spectre-miner-Cmd.Wait", func() { + err := spectreminerCmd.Wait() + if err != nil { + if atomic.LoadUint64(&shutdown) == 0 { + panics.Exit(log, fmt.Sprintf("minerCmd closed unexpectedly: %s.", err)) + } + if !strings.Contains(err.Error(), "signal: killed") { + // TODO: Panic here and check why sometimes miner closes ungracefully + log.Errorf("minerCmd closed with an error: %s", err) + } + } + }) + + numOfTips, err := getCurrentTipsLength(rpcClient) + if err != nil { + return errors.Wrapf(err, "Error in getCurrentTipsLength") + } + hasTimedOut := false + spawn("ChecksIfTimeIsUp", func() { + timer := time.NewTimer(30 * time.Minute) + <-timer.C + hasTimedOut = true + }) + for numOfTips > 1 && !hasTimedOut { + time.Sleep(1 * time.Second) + numOfTips, err = getCurrentTipsLength(rpcClient) + if err != nil { + return errors.Wrapf(err, "Error in getCurrentTipsLength") + } + } + + if hasTimedOut { + return errors.Errorf("Out of time - the graph still has more than one tip.") + } + duration := time.Since(startMiningTime) + log.Infof("It took %s until there was only one tip in the DAG after having 10k tips.", duration) + dagInfo, err = rpcClient.GetBlockDAGInfo() + if err != nil { + return errors.Wrapf(err, "Failed in GetBlockDAGInfo") + } + numOfAddedBlocks := dagInfo.BlockCount - numOfBlocksBeforeMining + log.Infof("Added %d blocks to reach this.", numOfAddedBlocks) + atomic.StoreUint64(&shutdown, 1) + killWithSigterm(spectreminerCmd, "spectreminerCmd") + return nil +} + +func getCurrentTipsLength(rpcClient *rpc.Client) (int, error) { + dagInfo, err := rpcClient.GetBlockDAGInfo() + if err != nil { + return 0, err + } + log.Infof("Current number of tips is %d", len(dagInfo.TipHashes)) + return len(dagInfo.TipHashes), nil +} + +func killWithSigterm(cmd *exec.Cmd, commandName string) { + err := cmd.Process.Signal(syscall.SIGTERM) + if err != nil { + log.Criticalf("Error sending SIGKILL to %s", commandName) + } +} diff --git a/stability-tests/many-tips/run/run.sh b/stability-tests/many-tips/run/run.sh new file mode 100755 index 0000000..604deaa --- /dev/null +++ b/stability-tests/many-tips/run/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +many-tips --devnet -n=1000 --profile=7000 +TEST_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "many-tips test: PASSED" + exit 0 +fi +echo "many-tips test: FAILED" +exit 1 diff --git a/stability-tests/mempool-limits/README.md b/stability-tests/mempool-limits/README.md new file mode 100644 index 0000000..fcf24d6 --- /dev/null +++ b/stability-tests/mempool-limits/README.md @@ -0,0 +1,13 @@ +# Mempool Limits Tool + +This tool: + +1. Fills up the mempool beyond its transaction limit to make sure + eviction works correctly. +2. Mines blocks until the mempool is expected to become empty. + +## Running + +1. `go install` spectred and mempool-limits. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/mempool-limits/config.go b/stability-tests/mempool-limits/config.go new file mode 100644 index 0000000..6727578 --- /dev/null +++ b/stability-tests/mempool-limits/config.go @@ -0,0 +1,44 @@ +package mempoollimits + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const ( + defaultLogFilename = "mempool-limits.log" + defaultErrLogFilename = "mempool-limits_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + LogLevel string `long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + SpectredRPCAddress string `long:"rpc-address" description:"RPC address of the spectred node"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag|flags.IgnoreUnknown) + _, err := parser.Parse() + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/mempool-limits/log.go b/stability-tests/mempool-limits/log.go new file mode 100644 index 0000000..e0853f8 --- /dev/null +++ b/stability-tests/mempool-limits/log.go @@ -0,0 +1,30 @@ +package mempoollimits + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("MPLM") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelInfo + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/mempool-limits/main_test.go b/stability-tests/mempool-limits/main_test.go new file mode 100644 index 0000000..e61330f --- /dev/null +++ b/stability-tests/mempool-limits/main_test.go @@ -0,0 +1,112 @@ +package mempoollimits + +import ( + "os" + "testing" + + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +const ( + mempoolSizeLimit = 1_000_000 + overfillMempoolByAmount = 1_000 +) + +func TestMempoolLimits(t *testing.T) { + if os.Getenv("RUN_STABILITY_TESTS") == "" { + t.Skip() + } + + defer panics.HandlePanic(log, "mempool-limits-main", nil) + err := parseConfig() + if err != nil { + t.Fatalf("error in parseConfig: %s", err) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + payAddressKeyPair := decodePayAddressKeyPair(t) + payToPayAddressScript := buildPayToPayAddressScript(t) + rpcClient := buildRPCClient(t) + + // Create enough funds for the test + fundingTransactions := generateFundingCoinbaseTransactions(t, rpcClient) + + // Fill up the mempool to the brim + submitAnAmountOfTransactionsToTheMempool(t, rpcClient, payAddressKeyPair, + payToPayAddressScript, fundingTransactions, mempoolSizeLimit, false) + + // Make sure that the mempool size is exactly the limit + mempoolSize := getMempoolSize(t, rpcClient) + if mempoolSize != mempoolSizeLimit { + t.Fatalf("Unexpected mempool size. Want: %d, got: %d", + mempoolSizeLimit, mempoolSize) + } + + // Add some more transactions to the mempool. We expect the + // mempool to either not grow or even to shrink, since an eviction + // may also remove any dependant (chained) transactions. + // Note that we pass ignoreOrphanRejects: true because we + // expect some of the submitted transactions to depend on + // transactions that had been evicted from the mempool + submitAnAmountOfTransactionsToTheMempool(t, rpcClient, payAddressKeyPair, + payToPayAddressScript, fundingTransactions, overfillMempoolByAmount, true) + + // Make sure that the mempool size is the limit or smaller + mempoolSize = getMempoolSize(t, rpcClient) + if mempoolSize > mempoolSizeLimit { + t.Fatalf("Unexpected mempool size. Want at most: %d, got: %d", + mempoolSizeLimit, mempoolSize) + } + + // Empty mempool out by continuously adding blocks to the DAG + emptyOutMempool(t, rpcClient) + + log.Infof("mempool-limits passed") +} + +func buildRPCClient(t *testing.T) *rpcclient.RPCClient { + client, err := rpcclient.NewRPCClient(activeConfig().SpectredRPCAddress) + if err != nil { + t.Fatalf("error connecting to %s: %s", activeConfig().SpectredRPCAddress, err) + } + return client +} + +func getMempoolSize(t *testing.T, rpcClient *rpcclient.RPCClient) uint64 { + getInfoResponse, err := rpcClient.GetInfo() + if err != nil { + t.Fatalf("GetInfo: %+v", err) + } + return getInfoResponse.MempoolSize +} + +func emptyOutMempool(t *testing.T, rpcClient *rpcclient.RPCClient) { + log.Infof("Adding blocks until mempool shrinks to 0 transactions") + getInfoResponse, err := rpcClient.GetInfo() + if err != nil { + t.Fatalf("GetInfo: %+v", err) + } + currentMempoolSize := getInfoResponse.MempoolSize + for currentMempoolSize > 0 { + mineBlockAndGetCoinbaseTransaction(t, rpcClient) + getInfoResponse, err := rpcClient.GetInfo() + if err != nil { + t.Fatalf("GetInfo: %+v", err) + } + if getInfoResponse.MempoolSize == currentMempoolSize { + t.Fatalf("Mempool did not shrink after a block was added to the DAG") + } + log.Infof("Mempool shrank from %d transactions to %d transactions", + currentMempoolSize, getInfoResponse.MempoolSize) + currentMempoolSize = getInfoResponse.MempoolSize + } +} diff --git a/stability-tests/mempool-limits/run/run.sh b/stability-tests/mempool-limits/run/run.sh new file mode 100755 index 0000000..e5142ce --- /dev/null +++ b/stability-tests/mempool-limits/run/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +APPDIR=/tmp/spectred-temp +SPECTRED_RPC_PORT=29587 + +rm -rf "${APPDIR}" + +spectred --simnet --appdir="${APPDIR}" --rpclisten=0.0.0.0:"${SPECTRED_RPC_PORT}" --profile=6061 & +SPECTRED_PID=$! + +sleep 1 + +RUN_STABILITY_TESTS=true go test ../ -v -timeout 86400s -- --rpc-address=127.0.0.1:"${SPECTRED_RPC_PORT}" --profile=7000 +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "mempool-limits test: PASSED" + exit 0 +fi +echo "mempool-limits test: FAILED" +exit 1 diff --git a/stability-tests/mempool-limits/transactions.go b/stability-tests/mempool-limits/transactions.go new file mode 100644 index 0000000..47a1f7d --- /dev/null +++ b/stability-tests/mempool-limits/transactions.go @@ -0,0 +1,201 @@ +package mempoollimits + +import ( + "encoding/hex" + "strings" + "testing" + + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/subnetworks" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + utxopkg "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" + "github.com/spectre-project/spectred/stability-tests/common/mine" + "github.com/spectre-project/spectred/util" +) + +const ( + payAddress = "spectresim:qzuax2jhawd354e54thhpd9m9wg03pdzwjlpr4vtq3k7xrpumhhtwyt2qar3r" + payAddressPrivateKey = "05d8f681e954a550395ee2297fc1a14f6e801f554c0b9d48cd7165a7ea72ff77" + fundingCoinbaseTransactionAmount = 1000 + outputsPerTransaction = 3 + transactionFee = 1000 + coinbaseMaturity = 100 +) + +// fundingCoinbaseTransactions contains a collection of transactions +// to be utilized when generating further transactions to fill up +// the mempool. +// It's a separate type because we modify the transactions in place +// whenever we pass an instance of this type into +// submitAnAmountOfTransactionsToTheMempool. +type fundingCoinbaseTransactions struct { + transactions []*externalapi.DomainTransaction +} + +func generateFundingCoinbaseTransactions(t *testing.T, rpcClient *rpcclient.RPCClient) *fundingCoinbaseTransactions { + // Mine a block, since we need at least one block above the genesis + // to create a spendable UTXO + mineBlockAndGetCoinbaseTransaction(t, rpcClient) + + log.Infof("Generating funding coinbase transactions") + fundingCoinbaseTransactions := &fundingCoinbaseTransactions{ + transactions: make([]*externalapi.DomainTransaction, fundingCoinbaseTransactionAmount), + } + for i := 0; i < fundingCoinbaseTransactionAmount; i++ { + fundingCoinbaseTransactions.transactions[i] = mineBlockAndGetCoinbaseTransaction(t, rpcClient) + } + + log.Infof("Maturing funding coinbase transactions") + for i := 0; i < coinbaseMaturity; i++ { + mineBlockAndGetCoinbaseTransaction(t, rpcClient) + } + + return fundingCoinbaseTransactions +} + +func submitAnAmountOfTransactionsToTheMempool(t *testing.T, rpcClient *rpcclient.RPCClient, + payAddressKeyPair *secp256k1.SchnorrKeyPair, payToPayAddressScript *externalapi.ScriptPublicKey, + fundingTransactions *fundingCoinbaseTransactions, amountToSubmit int, ignoreOrphanRejects bool) { + + log.Infof("Generating %d transactions", amountToSubmit) + transactions := make([]*externalapi.DomainTransaction, 0) + for len(transactions) < amountToSubmit { + var coinbaseTransaction *externalapi.DomainTransaction + coinbaseTransaction, fundingTransactions.transactions = fundingTransactions.transactions[0], fundingTransactions.transactions[1:] + + unspentTransactions := []*externalapi.DomainTransaction{coinbaseTransaction} + for len(transactions) < amountToSubmit && len(unspentTransactions) > 0 { + var transactionToSpend *externalapi.DomainTransaction + transactionToSpend, unspentTransactions = unspentTransactions[0], unspentTransactions[1:] + spendingTransactions := generateTransactionsWithMultipleOutputs(t, payAddressKeyPair, payToPayAddressScript, transactionToSpend) + transactions = append(transactions, spendingTransactions...) + unspentTransactions = append(unspentTransactions, spendingTransactions...) + } + log.Infof("Generated %d transactions", len(transactions)) + } + + transactions = transactions[:amountToSubmit] + log.Infof("Submitting %d transactions", len(transactions)) + + for i, transaction := range transactions { + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(transaction) + _, err := rpcClient.SubmitTransaction(rpcTransaction, false) + if err != nil { + if ignoreOrphanRejects && strings.Contains(err.Error(), "orphan") { + continue + } + t.Fatalf("SubmitTransaction: %+v", err) + } + log.Infof("Submitted %d transactions", i+1) + } +} + +func mineBlockAndGetCoinbaseTransaction(t *testing.T, rpcClient *rpcclient.RPCClient) *externalapi.DomainTransaction { + getBlockTemplateResponse, err := rpcClient.GetBlockTemplate(payAddress, "") + if err != nil { + t.Fatalf("GetBlockTemplate: %+v", err) + } + templateBlock, err := appmessage.RPCBlockToDomainBlock(getBlockTemplateResponse.Block) + if err != nil { + t.Fatalf("RPCBlockToDomainBlock: %+v", err) + } + mine.SolveBlock(templateBlock) + _, err = rpcClient.SubmitBlockAlsoIfNonDAA(templateBlock) + if err != nil { + t.Fatalf("SubmitBlock: %+v", err) + } + return templateBlock.Transactions[transactionhelper.CoinbaseTransactionIndex] +} + +func generateTransactionsWithMultipleOutputs(t *testing.T, + payAddressKeyPair *secp256k1.SchnorrKeyPair, payToPayAddressScript *externalapi.ScriptPublicKey, + fundingTransaction *externalapi.DomainTransaction) []*externalapi.DomainTransaction { + + var transactions []*externalapi.DomainTransaction + for fundingTransactionOutputIndex, fundingTransactionOutput := range fundingTransaction.Outputs { + if fundingTransactionOutput.Value < transactionFee { + continue + } + outputValue := (fundingTransactionOutput.Value - transactionFee) / outputsPerTransaction + + fundingTransactionID := consensushashing.TransactionID(fundingTransaction) + spendingTransactionInputs := []*externalapi.DomainTransactionInput{ + { + PreviousOutpoint: externalapi.DomainOutpoint{ + TransactionID: *fundingTransactionID, + Index: uint32(fundingTransactionOutputIndex), + }, + UTXOEntry: utxopkg.NewUTXOEntry( + fundingTransactionOutput.Value, + payToPayAddressScript, + false, + 0), + }, + } + + spendingTransactionOutputs := make([]*externalapi.DomainTransactionOutput, outputsPerTransaction) + for i := 0; i < outputsPerTransaction; i++ { + spendingTransactionOutputs[i] = &externalapi.DomainTransactionOutput{ + Value: outputValue, + ScriptPublicKey: payToPayAddressScript, + } + } + + spendingTransaction := &externalapi.DomainTransaction{ + Version: constants.MaxTransactionVersion, + Inputs: spendingTransactionInputs, + Outputs: spendingTransactionOutputs, + LockTime: 0, + SubnetworkID: subnetworks.SubnetworkIDNative, + Gas: 0, + Payload: nil, + } + + for spendingTransactionInputIndex, spendingTransactionInput := range spendingTransactionInputs { + signatureScript, err := txscript.SignatureScript( + spendingTransaction, + spendingTransactionInputIndex, + consensushashing.SigHashAll, + payAddressKeyPair, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("SignatureScript: %+v", err) + } + spendingTransactionInput.SignatureScript = signatureScript + } + + transactions = append(transactions, spendingTransaction) + } + return transactions +} + +func decodePayAddressKeyPair(t *testing.T) *secp256k1.SchnorrKeyPair { + privateKeyBytes, err := hex.DecodeString(payAddressPrivateKey) + if err != nil { + t.Fatalf("DecodeString: %+v", err) + } + keyPair, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + t.Fatalf("DeserializeSchnorrPrivateKeyFromSlice: %+v", err) + } + return keyPair +} + +func buildPayToPayAddressScript(t *testing.T) *externalapi.ScriptPublicKey { + address, err := util.DecodeAddress(payAddress, dagconfig.SimnetParams.Prefix) + if err != nil { + t.Fatalf("DecodeAddress: %+v", err) + } + script, err := txscript.PayToAddrScript(address) + if err != nil { + t.Fatalf("PayToAddrScript: %+v", err) + } + return script +} diff --git a/stability-tests/minejson/config.go b/stability-tests/minejson/config.go new file mode 100644 index 0000000..8e95084 --- /dev/null +++ b/stability-tests/minejson/config.go @@ -0,0 +1,61 @@ +package main + +import ( + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "minejson.log" + defaultErrLogFilename = "minejson_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + rpc.Config + DAGFile string `long:"dag-file" description:"Path to DAG JSON file"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + err = rpc.ValidateRPCConfig(&cfg.Config) + if err != nil { + return err + } + + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + return nil +} diff --git a/stability-tests/minejson/example.json b/stability-tests/minejson/example.json new file mode 100644 index 0000000..3af03a4 --- /dev/null +++ b/stability-tests/minejson/example.json @@ -0,0 +1,30 @@ +[ + { + "id": "0" + }, + { + "id": "1", + "parents": [ + "0" + ] + }, + { + "id": "2", + "parents": [ + "0" + ] + }, + { + "id": "3", + "parents": [ + "1", + "2" + ] + }, + { + "id": "4", + "parents": [ + "1" + ] + } +] \ No newline at end of file diff --git a/stability-tests/minejson/log.go b/stability-tests/minejson/log.go new file mode 100644 index 0000000..30a985e --- /dev/null +++ b/stability-tests/minejson/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("MNJS") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/minejson/main.go b/stability-tests/minejson/main.go new file mode 100644 index 0000000..003f8a8 --- /dev/null +++ b/stability-tests/minejson/main.go @@ -0,0 +1,43 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/mine" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "minejson-main", nil) + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error parsing configuration")) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams()) + if err != nil { + panic(errors.Wrap(err, "error connecting to JSON-RPC server")) + } + defer rpcClient.Disconnect() + + dataDir, err := common.TempDir("minejson") + if err != nil { + panic(err) + } + + consensusConfig := consensus.Config{Params: *cfg.NetParams()} + + err = mine.FromFile(cfg.DAGFile, &consensusConfig, rpcClient, dataDir) + if err != nil { + panic(errors.Wrap(err, "error in mine.FromFile")) + } +} diff --git a/stability-tests/netsync/README.md b/stability-tests/netsync/README.md new file mode 100644 index 0000000..d41bbb8 --- /dev/null +++ b/stability-tests/netsync/README.md @@ -0,0 +1,14 @@ +# Netsync Stability Tester + +This tests that the netsync is at least 5 blocks per second. + +Note: the test doesn't delete spectred's data directory and it's the +user responsibility to delete the data directories that appear in the +log. + +## Running + +1. `go install spectred` +2. `go install ./...` +3. `cd run` +4. `./run.sh` diff --git a/stability-tests/netsync/chain_generator/config.go b/stability-tests/netsync/chain_generator/config.go new file mode 100644 index 0000000..5bf53f3 --- /dev/null +++ b/stability-tests/netsync/chain_generator/config.go @@ -0,0 +1,44 @@ +package main + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const ( + defaultLogFilename = "netsync.log" + defaultErrLogFilename = "netsync_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine" required:"true"` + TargetFile string `short:"f" long:"targetfile" description:"The target file for the JSON" required:"true"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/netsync/chain_generator/log.go b/stability-tests/netsync/chain_generator/log.go new file mode 100644 index 0000000..3a3196b --- /dev/null +++ b/stability-tests/netsync/chain_generator/log.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("CHGN") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelInfo + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/netsync/chain_generator/main.go b/stability-tests/netsync/chain_generator/main.go new file mode 100644 index 0000000..e08aa2f --- /dev/null +++ b/stability-tests/netsync/chain_generator/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "encoding/json" + "os" + "strconv" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/mine" +) + +func main() { + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error in parseConfig")) + } + common.UseLogger(backendLog, log.Level()) + + blocks := generateBlocks() + err = writeJSONToFile(blocks, cfg.TargetFile) + if err != nil { + panic(errors.Wrap(err, "error in generateBlocks")) + } +} + +func generateBlocks() []mine.JSONBlock { + numBlocks := int(activeConfig().NumberOfBlocks) + blocks := make([]mine.JSONBlock, 0, numBlocks) + blocks = append(blocks, mine.JSONBlock{ + ID: "0", + }) + for i := 1; i < numBlocks; i++ { + blocks = append(blocks, mine.JSONBlock{ + ID: strconv.Itoa(i), + Parents: []string{strconv.Itoa(i - 1)}, + }) + } + + return blocks +} + +func writeJSONToFile(blocks []mine.JSONBlock, fileName string) error { + f, err := openFile(fileName) + if err != nil { + return errors.Wrap(err, "error in openFile") + } + encoder := json.NewEncoder(f) + err = encoder.Encode(blocks) + return errors.Wrap(err, "error in Encode") +} + +func openFile(name string) (*os.File, error) { + os.Remove(name) + f, err := os.Create(name) + return f, errors.WithStack(err) +} diff --git a/stability-tests/netsync/check_resolve_virtual.go b/stability-tests/netsync/check_resolve_virtual.go new file mode 100644 index 0000000..158761c --- /dev/null +++ b/stability-tests/netsync/check_resolve_virtual.go @@ -0,0 +1,41 @@ +package main + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/stability-tests/common/rpc" +) + +func checkResolveVirtual(syncerClient, syncedClient *rpc.Client) error { + err := syncedClient.RegisterForBlockAddedNotifications() + if err != nil { + return errors.Wrap(err, "error registering for blockAdded notifications") + } + + syncedBlockCountResponse, err := syncedClient.GetBlockCount() + if err != nil { + return err + } + + rejectReason, err := mineOnTips(syncerClient) + if err != nil { + panic(err) + } + if rejectReason != appmessage.RejectReasonNone { + panic(fmt.Sprintf("mined block rejected: %s", rejectReason)) + } + + expectedDuration := time.Duration(syncedBlockCountResponse.BlockCount) * 100 * time.Millisecond + start := time.Now() + select { + case <-time.After(expectedDuration): + return errors.Errorf("it took more than %s to resolve the virtual", expectedDuration) + case <-syncedClient.OnBlockAdded: + } + + log.Infof("It took %s to resolve the virtual", time.Since(start)) + return nil +} diff --git a/stability-tests/netsync/check_sync_rate.go b/stability-tests/netsync/check_sync_rate.go new file mode 100644 index 0000000..74841e7 --- /dev/null +++ b/stability-tests/netsync/check_sync_rate.go @@ -0,0 +1,55 @@ +package main + +import ( + "time" + + "github.com/spectre-project/spectred/stability-tests/common/rpc" + + "github.com/pkg/errors" +) + +func checkSyncRate(syncerClient, syncedClient *rpc.Client) error { + log.Info("Checking the sync rate") + syncerBlockCountResponse, err := syncerClient.GetBlockCount() + if err != nil { + return err + } + + syncerGetSelectedTipHashResponse, err := syncerClient.GetSelectedTipHash() + if err != nil { + return err + } + + syncerHeadersCount := syncerBlockCountResponse.HeaderCount + syncerBlockCount := syncerBlockCountResponse.BlockCount + log.Infof("SYNCER block count: %d headers and %d blocks", syncerHeadersCount, syncerBlockCount) + // We give 5 seconds for IBD to start and then 100 milliseconds for each block. + expectedTime := time.Now().Add(5*time.Second + time.Duration(syncerHeadersCount)*100*time.Millisecond) + start := time.Now() + const tickDuration = 10 * time.Second + ticker := time.NewTicker(tickDuration) + defer ticker.Stop() + for range ticker.C { + log.Info("Getting SYNCED block count") + syncedBlockCountResponse, err := syncedClient.GetBlockCount() + if err != nil { + return err + } + log.Infof("SYNCED block count: %d headers and %d blocks", syncedBlockCountResponse.HeaderCount, + syncedBlockCountResponse.BlockCount) + + syncedGetSelectedTipHashResponse, err := syncedClient.GetSelectedTipHash() + if err != nil { + return err + } + + if syncedGetSelectedTipHashResponse.SelectedTipHash == syncerGetSelectedTipHashResponse.SelectedTipHash { + break + } + if time.Now().After(expectedTime) { + return errors.Errorf("SYNCED is not synced in the expected rate") + } + } + log.Infof("IBD took approximately %s", time.Since(start)) + return nil +} diff --git a/stability-tests/netsync/config.go b/stability-tests/netsync/config.go new file mode 100644 index 0000000..be735ae --- /dev/null +++ b/stability-tests/netsync/config.go @@ -0,0 +1,55 @@ +package main + +import ( + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/stability-tests/common" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "netsync.log" + defaultErrLogFilename = "netsync_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + DAGFile string `long:"dag-file" description:"Path to DAG JSON file"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + MiningDataDirectory string `long:"mining-data-dir" description:"Mining Data directory (will generate a random one if omitted)"` + SyncerDataDirectory string `long:"syncer-data-dir" description:"Syncer Data directory (will generate a random one if omitted)"` + SynceeDataDirectory string `long:"syncee-data-dir" description:"Syncee Data directory (will generate a random one if omitted)"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/netsync/dags-fast/000170-chain.json.gz b/stability-tests/netsync/dags-fast/000170-chain.json.gz new file mode 100644 index 0000000..c0f1c0a Binary files /dev/null and b/stability-tests/netsync/dags-fast/000170-chain.json.gz differ diff --git a/stability-tests/netsync/dags-fast/100-chain.json.gz b/stability-tests/netsync/dags-fast/100-chain.json.gz new file mode 100644 index 0000000..f780b8b Binary files /dev/null and b/stability-tests/netsync/dags-fast/100-chain.json.gz differ diff --git a/stability-tests/netsync/dags-fast/wide-dag-blocks--2^12-delay-factor--1-k--18.json.gz b/stability-tests/netsync/dags-fast/wide-dag-blocks--2^12-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..9701968 Binary files /dev/null and b/stability-tests/netsync/dags-fast/wide-dag-blocks--2^12-delay-factor--1-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/100000-chain.json.gz b/stability-tests/netsync/dags-slow/100000-chain.json.gz new file mode 100644 index 0000000..31f52f5 Binary files /dev/null and b/stability-tests/netsync/dags-slow/100000-chain.json.gz differ diff --git a/stability-tests/netsync/dags-slow/100070-chain.json.gz b/stability-tests/netsync/dags-slow/100070-chain.json.gz new file mode 100644 index 0000000..9a523ac Binary files /dev/null and b/stability-tests/netsync/dags-slow/100070-chain.json.gz differ diff --git a/stability-tests/netsync/dags-slow/many-small-chains-and-one-big-chain.json.gz b/stability-tests/netsync/dags-slow/many-small-chains-and-one-big-chain.json.gz new file mode 100644 index 0000000..7c2cccf Binary files /dev/null and b/stability-tests/netsync/dags-slow/many-small-chains-and-one-big-chain.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^16-delay-factor--1-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^16-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..fcd885f Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^16-delay-factor--1-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--1-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--1-k--18.json.gz new file mode 100644 index 0000000..141ebdf Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--1-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--2-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--2-k--18.json.gz new file mode 100644 index 0000000..66becd5 Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--2-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--4-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--4-k--18.json.gz new file mode 100644 index 0000000..71dedc6 Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--4-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--6-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--6-k--18.json.gz new file mode 100644 index 0000000..a3909c9 Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--6-k--18.json.gz differ diff --git a/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--8-k--18.json.gz b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--8-k--18.json.gz new file mode 100644 index 0000000..518c1b0 Binary files /dev/null and b/stability-tests/netsync/dags-slow/wide-dag-blocks--2^20-delay-factor--8-k--18.json.gz differ diff --git a/stability-tests/netsync/fast-pruning-ibd-test/dag-for-fast-pruning-ibd-test.json.gz b/stability-tests/netsync/fast-pruning-ibd-test/dag-for-fast-pruning-ibd-test.json.gz new file mode 100644 index 0000000..91a3f24 Binary files /dev/null and b/stability-tests/netsync/fast-pruning-ibd-test/dag-for-fast-pruning-ibd-test.json.gz differ diff --git a/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json b/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json new file mode 100644 index 0000000..44ced89 --- /dev/null +++ b/stability-tests/netsync/fast-pruning-ibd-test/fast-pruning-ibd-test-params.json @@ -0,0 +1,7 @@ +{ + "skipProofOfWork": true, + "mergeSetSizeLimit": 30, + "finalityDuration": 200000, + "hardForkOmitGenesisFromParentsDaaScore": 2505, + "k": 0 +} diff --git a/stability-tests/netsync/fast-pruning-ibd-test/generate_test.go b/stability-tests/netsync/fast-pruning-ibd-test/generate_test.go new file mode 100644 index 0000000..c2a72ec --- /dev/null +++ b/stability-tests/netsync/fast-pruning-ibd-test/generate_test.go @@ -0,0 +1,149 @@ +package fast_pruning_ibd_test + +import ( + "io/ioutil" + "os" + "path" + "testing" + "time" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +// TestGenerateFastPruningIBDTest generates the json needed for dag-for-fast-pruning-ibd-test.json.gz +func TestGenerateFastPruningIBDTest(t *testing.T) { + t.Skip() + testutils.ForAllNets(t, true, func(t *testing.T, consensusConfig *consensus.Config) { + if consensusConfig.Name != dagconfig.DevnetParams.Name { + return + } + + factory := consensus.NewFactory() + + // This is done to reduce the pruning depth to 6 blocks + finalityDepth := 200 + consensusConfig.FinalityDuration = time.Duration(finalityDepth) * consensusConfig.TargetTimePerBlock + consensusConfig.K = 0 + consensusConfig.PruningProofM = 1 + consensusConfig.MergeSetSizeLimit = 30 + + tc, teardownSyncer, err := factory.NewTestConsensus(consensusConfig, "TestValidateAndInsertPruningPointSyncer") + if err != nil { + t.Fatalf("Error setting up tc: %+v", err) + } + defer teardownSyncer(false) + + numBlocks := finalityDepth + tipHash := consensusConfig.GenesisHash + for i := 0; i < numBlocks; i++ { + tipHash, _, err = tc.AddBlock([]*externalapi.DomainHash{tipHash}, nil, nil) + if err != nil { + t.Fatal(err) + } + } + + tip, _, err := tc.GetBlock(tipHash) + if err != nil { + t.Fatal(err) + } + + header := tip.Header.ToMutable() + + for i := uint64(1); i < 1000; i++ { + if i%100 == 0 { + t.Logf("Added %d tips", i) + } + header.SetNonce(tip.Header.Nonce() + i) + block := &externalapi.DomainBlock{Header: header.ToImmutable(), Transactions: tip.Transactions} + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + } + + emptyCoinbase := &externalapi.DomainCoinbaseData{ + ScriptPublicKey: &externalapi.ScriptPublicKey{ + Script: nil, + Version: 0, + }, + } + + pruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatal(err) + } + + for i := 0; ; i++ { + currentPruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatal(err) + } + + if !pruningPoint.Equal(currentPruningPoint) { + t.Fatalf("Pruning point unexpectedly changed") + } + + tips, err := tc.Tips() + if err != nil { + t.Fatal(err) + } + + if len(tips) == 1 { + break + } + + if i%10 == 0 { + t.Logf("Number of tips: %d", len(tips)) + } + + block, err := tc.BuildBlock(emptyCoinbase, nil) + if err != nil { + t.Fatal(err) + } + + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + } + + for { + currentPruningPoint, err := tc.PruningPoint() + if err != nil { + t.Fatal(err) + } + + if !pruningPoint.Equal(currentPruningPoint) { + break + } + + block, err := tc.BuildBlock(emptyCoinbase, nil) + if err != nil { + t.Fatal(err) + } + + err = tc.ValidateAndInsertBlock(block, true) + if err != nil { + t.Fatalf("ValidateAndInsertBlock: %+v", err) + } + } + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + + err = tc.ToJSON(file) + if err != nil { + t.Fatal(err) + } + stat, err := file.Stat() + if err != nil { + t.Fatal(err) + } + t.Logf("DAG saved at %s", path.Join(os.TempDir(), stat.Name())) + }) +} diff --git a/stability-tests/netsync/log.go b/stability-tests/netsync/log.go new file mode 100644 index 0000000..5a7a134 --- /dev/null +++ b/stability-tests/netsync/log.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("NTSN") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelInfo + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/netsync/main.go b/stability-tests/netsync/main.go new file mode 100644 index 0000000..6559cb1 --- /dev/null +++ b/stability-tests/netsync/main.go @@ -0,0 +1,66 @@ +package main + +import ( + "sync/atomic" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "netsync-main", nil) + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error in parseConfig")) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + shutdown := uint64(0) + + syncerClient, syncerTeardown, err := setupSyncer() + if err != nil { + panic(errors.Wrap(err, "error in setupSyncer")) + } + syncerClient.SetOnErrorHandler(func(err error) { + if atomic.LoadUint64(&shutdown) == 0 { + log.Debugf("received error from SYNCER: %s", err) + } + }) + defer func() { + syncerClient.Disconnect() + syncerTeardown() + }() + + syncedClient, syncedTeardown, err := setupSyncee() + if err != nil { + panic(errors.Wrap(err, "error in setupSyncee")) + } + syncedClient.SetOnErrorHandler(func(err error) { + if atomic.LoadUint64(&shutdown) == 0 { + log.Debugf("received error from SYNCEE: %s", err) + } + }) + defer func() { + syncedClient.Disconnect() + syncedTeardown() + }() + + err = checkSyncRate(syncerClient, syncedClient) + if err != nil { + panic(errors.Wrap(err, "error in checkSyncRate")) + } + + err = checkResolveVirtual(syncerClient, syncedClient) + if err != nil { + panic(errors.Wrap(err, "error in checkResolveVirtual")) + } + + atomic.StoreUint64(&shutdown, 1) +} diff --git a/stability-tests/netsync/node.go b/stability-tests/netsync/node.go new file mode 100644 index 0000000..f9fc29d --- /dev/null +++ b/stability-tests/netsync/node.go @@ -0,0 +1,204 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/spectre-project/spectred/domain/consensus" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/mine" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util" + "github.com/spectre-project/spectred/util/panics" +) + +const ( + syncerRPCAddress = "localhost:9000" + syncedRPCAddress = "localhost:9100" + + syncerListen = "localhost:9001" + syncedListen = "localhost:9101" +) + +func startNode(name string, rpcAddress, listen, connect, profilePort, dataDir string) (*exec.Cmd, func(), error) { + log.Infof("Data directory for %s is %s", name, dataDir) + + args := []string{ + "spectred", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "--appdir", dataDir, + "--logdir", dataDir, + "--rpclisten", rpcAddress, + "--listen", listen, + "--profile", profilePort, + "--loglevel", "debug", + "--allow-submit-block-when-not-synced", + } + if connect != "" { + args = append(args, "--connect", connect) + } + + if activeConfig().OverrideDAGParamsFile != "" { + args = append(args, "--override-dag-params-file", activeConfig().OverrideDAGParamsFile) + } + + cmd, err := common.StartCmd(name, + args..., + ) + if err != nil { + return nil, nil, err + } + + var shutdown uint32 + stopped := make(chan struct{}) + spawn("startNode-cmd.Wait", func() { + err := cmd.Wait() + if err != nil { + if atomic.LoadUint32(&shutdown) == 0 { + panics.Exit(log, fmt.Sprintf("%s ( %s ) closed unexpectedly: %s", name, cmd, err)) + } + if !strings.Contains(err.Error(), "signal: killed") { + panics.Exit(log, fmt.Sprintf("%s ( %s ) closed with an error: %s", name, cmd, err)) + } + } + stopped <- struct{}{} + }) + + return cmd, func() { + atomic.StoreUint32(&shutdown, 1) + killWithSigkill(cmd, name) + const timeout = time.Second + select { + case <-stopped: + case <-time.After(timeout): + panics.Exit(log, fmt.Sprintf("%s couldn't be closed after %s", name, timeout)) + } + }, nil +} + +func killWithSigkill(cmd *exec.Cmd, commandName string) { + log.Error("SIGKILLED") + err := cmd.Process.Signal(syscall.SIGKILL) + if err != nil { + log.Criticalf("error sending SIGKILL to %s", commandName) + } +} + +func setupNodeWithRPC(name, listen, rpcListen, connect, profilePort, dataDir string) (*rpc.Client, func(), error) { + _, teardown, err := startNode(name, rpcListen, listen, connect, profilePort, dataDir) + if err != nil { + return nil, nil, errors.Wrap(err, "error in startNode") + } + defer func() { + if r := recover(); r != nil { + teardown() + panic(r) + } + }() + + log.Infof("Waiting for node %s to start...", name) + const initTime = 2 * time.Second + time.Sleep(initTime) + + rpcClient, err := rpc.ConnectToRPC(&rpc.Config{ + RPCServer: rpcListen, + }, activeConfig().NetParams()) + if err != nil { + return nil, nil, errors.Wrap(err, "error connecting to JSON-RPC server") + } + + return rpcClient, teardown, nil +} + +func setupSyncee() (*rpc.Client, func(), error) { + const syncedProfilePort = "6061" + + synceeDataDir, err := useDirOrCreateTemp(activeConfig().SynceeDataDirectory, "syncee-spectred-data-dir") + if err != nil { + return nil, nil, err + } + + return setupNodeWithRPC("SYNCEE", syncedListen, syncedRPCAddress, syncerListen, syncedProfilePort, + synceeDataDir) +} + +func setupSyncer() (*rpc.Client, func(), error) { + const syncerProfilePort = "6062" + + syncerDataDir, err := useDirOrCreateTemp(activeConfig().SyncerDataDirectory, "syncer-spectred-data-dir") + if err != nil { + return nil, nil, err + } + + rpcClient, teardown, err := setupNodeWithRPC("SYNCER", syncerListen, syncerRPCAddress, "", + syncerProfilePort, syncerDataDir) + if err != nil { + return nil, nil, err + } + defer func() { + if r := recover(); r != nil { + teardown() + panic(r) + } + }() + + miningDataDir, err := useDirOrCreateTemp(activeConfig().MiningDataDirectory, "syncer-mining-data-dir") + if err != nil { + return nil, nil, err + } + + err = mine.FromFile(cfg.DAGFile, &consensus.Config{Params: *activeConfig().NetParams()}, rpcClient, miningDataDir) + if err != nil { + return nil, nil, errors.Wrap(err, "error in mine.FromFile") + } + + log.Info("Mining on top of syncer tips") + rejectReason, err := mineOnTips(rpcClient) + if err != nil { + panic(err) + } + if rejectReason != appmessage.RejectReasonNone { + panic(fmt.Sprintf("mined block rejected: %s", rejectReason)) + } + + return rpcClient, teardown, nil +} + +func useDirOrCreateTemp(dataDir, tempName string) (string, error) { + if dataDir != "" { + return dataDir, nil + } + + return common.TempDir(tempName) +} + +func mineOnTips(client *rpc.Client) (appmessage.RejectReason, error) { + fakePublicKey := make([]byte, util.PublicKeySize) + addr, err := util.NewAddressPublicKey(fakePublicKey, activeConfig().NetParams().Prefix) + if err != nil { + return appmessage.RejectReasonNone, err + } + + template, err := client.GetBlockTemplate(addr.String(), "") + if err != nil { + return appmessage.RejectReasonNone, err + } + + domainBlock, err := appmessage.RPCBlockToDomainBlock(template.Block) + if err != nil { + return appmessage.RejectReasonNone, err + } + + if !activeConfig().NetParams().SkipProofOfWork { + mine.SolveBlock(domainBlock) + } + + return client.SubmitBlockAlsoIfNonDAA(domainBlock) +} diff --git a/stability-tests/netsync/run/run-fast.sh b/stability-tests/netsync/run/run-fast.sh new file mode 100755 index 0000000..3adf5fd --- /dev/null +++ b/stability-tests/netsync/run/run-fast.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -ex + +FAST_DAGS_DIR="../dags-fast" +mapfile -t DAGS < <( ls $FAST_DAGS_DIR) + +for dagArchive in "${DAGS[@]}" +do + JSON_FILE=$FAST_DAGS_DIR/$dagArchive + netsync --simnet --dag-file $JSON_FILE --profile=7000 + TEST_EXIT_CODE=$? + echo "$dagArchive processed" + if [ $TEST_EXIT_CODE -ne 0 ]; then + echo "netsync (fast) test: FAILED" + exit 1 + fi + rm -rf /tmp/STABILITY_TEMP_DIR_* +done + +JSON_FILE="../fast-pruning-ibd-test/dag-for-fast-pruning-ibd-test.json.gz" +netsync --devnet --dag-file $JSON_FILE --profile=7000 --override-dag-params-file=../fast-pruning-ibd-test/fast-pruning-ibd-test-params.json +TEST_EXIT_CODE=$? +echo "dag-for-fast-pruning-ibd-test.json processed" +if [ $TEST_EXIT_CODE -ne 0 ]; then + echo "netsync (fast) test: FAILED" + exit 1 +fi +rm -rf /tmp/STABILITY_TEMP_DIR_* + +echo "netsync (fast) test: PASSED" +exit 0 + diff --git a/stability-tests/netsync/run/run-slow.sh b/stability-tests/netsync/run/run-slow.sh new file mode 100755 index 0000000..b9cddee --- /dev/null +++ b/stability-tests/netsync/run/run-slow.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +SLOW_DAGS_DIR="../dags-slow" +mapfile -t DAGS < <( ls $SLOW_DAGS_DIR) + +for dagArchive in "${DAGS[@]}" +do + JSON_FILE=$SLOW_DAGS_DIR/$dagArchive + netsync --simnet --dag-file $JSON_FILE --profile=7000 + echo "$dagArchive processed" + if [ $TEST_EXIT_CODE -ne 0 ]; then + echo "netsync (slow) test: FAILED" + exit 1 + fi + rm -rf /tmp/STABILITY_TEMP_DIR_* +done + +echo "netsync (slow) test: PASSED" +exit 0 + diff --git a/stability-tests/netsync/run/run.sh b/stability-tests/netsync/run/run.sh new file mode 100755 index 0000000..6aa9cb9 --- /dev/null +++ b/stability-tests/netsync/run/run.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +FAST_DAGS_DIR="../dags-fast" +SLOW_DAGS_DIR="../dags-slow" +mapfile -t FAST_DAGS < <( ls $FAST_DAGS_DIR) +mapfile -t SLOW_DAGS < <( ls $SLOW_DAGS_DIR) + +DAGS=() + +for dagArchive in "${FAST_DAGS[@]}" +do + DAGS+=("$FAST_DAGS_DIR/$dagArchive") +done + +for dagArchive in "${SLOW_DAGS[@]}" +do + DAGS+=("$SLOW_DAGS_DIR/$dagArchive") +done + +for dagArchive in "${DAGS[@]}" +do + JSON_FILE=$FAST_DAGS_DIR/$dagArchive + netsync --simnet --dag-file $JSON_FILE --profile=7000 + TEST_EXIT_CODE=$? + echo "$dagArchive processed" + if [ $TEST_EXIT_CODE -ne 0 ]; then + echo "netsync test: FAILED" + exit 1 + fi +done + +echo "netsync test: PASSED" +exit 0 diff --git a/stability-tests/orphans/README.md b/stability-tests/orphans/README.md new file mode 100644 index 0000000..ed7e58b --- /dev/null +++ b/stability-tests/orphans/README.md @@ -0,0 +1,10 @@ +# Orphans + +This tool makes sure orphan resolution works and doesn't crash +spectred. + +## Running + +1. `go install` spectred and orphans. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/orphans/check_top_is_tip.go b/stability-tests/orphans/check_top_is_tip.go new file mode 100644 index 0000000..c82a623 --- /dev/null +++ b/stability-tests/orphans/check_top_is_tip.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/stability-tests/common/rpc" +) + +func checkTopBlockIsTip(rpcClient *rpc.Client, topBlock *externalapi.DomainBlock) error { + selectedTipHashResponse, err := rpcClient.GetSelectedTipHash() + if err != nil { + return err + } + + topBlockHash := consensushashing.BlockHash(topBlock) + if selectedTipHashResponse.SelectedTipHash != topBlockHash.String() { + return errors.Errorf("selectedTipHash is '%s' while expected to be topBlock's hash `%s`", + selectedTipHashResponse.SelectedTipHash, topBlockHash) + } + + return nil +} diff --git a/stability-tests/orphans/config.go b/stability-tests/orphans/config.go new file mode 100644 index 0000000..9179720 --- /dev/null +++ b/stability-tests/orphans/config.go @@ -0,0 +1,61 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "orphans.log" + defaultErrLogFilename = "orphans_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + rpc.Config + NodeP2PAddress string `long:"addr" short:"a" description:"node's P2P address"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + OrphanChainLength int `long:"num-orphans" short:"n" description:"Desired length of orphan chain"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + + _, err := parser.Parse() + + if err != nil { + if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp { + os.Exit(0) + } + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/orphans/connect_to_node.go b/stability-tests/orphans/connect_to_node.go new file mode 100644 index 0000000..ffc88d5 --- /dev/null +++ b/stability-tests/orphans/connect_to_node.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/standalone" +) + +func connectToNode() *standalone.Routes { + cfg := activeConfig() + + spectredConfig := config.DefaultConfig() + spectredConfig.NetworkFlags = cfg.NetworkFlags + + minimalNetAdapter, err := standalone.NewMinimalNetAdapter(spectredConfig) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating minimalNetAdapter: %+v", err) + os.Exit(1) + } + routes, err := minimalNetAdapter.Connect(cfg.NodeP2PAddress) + if err != nil { + fmt.Fprintf(os.Stderr, "error connecting to node: %+v", err) + os.Exit(1) + } + return routes +} diff --git a/stability-tests/orphans/log.go b/stability-tests/orphans/log.go new file mode 100644 index 0000000..be15156 --- /dev/null +++ b/stability-tests/orphans/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("ORPH") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/orphans/main.go b/stability-tests/orphans/main.go new file mode 100644 index 0000000..dbd69d6 --- /dev/null +++ b/stability-tests/orphans/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "fmt" + "os" + "time" + + "github.com/pkg/errors" + + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/profiling" +) + +var timeout = 30 * time.Second + +func main() { + err := parseConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err) + os.Exit(1) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + blocks, topBlock, err := prepareBlocks() + if err != nil { + log.Errorf("Error preparing blocks: %+v", err) + backendLog.Close() + os.Exit(1) + } + + routes := connectToNode() + + rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams()) + if err != nil { + panic(errors.Wrap(err, "error connecting to JSON-RPC server")) + } + + defer rpcClient.Disconnect() + err = sendBlocks(routes, blocks, topBlock) + if err != nil { + backendLog.Close() + log.Errorf("Error sending blocks: %+v", err) + os.Exit(1) + } + + // Wait a second to let spectred process orphans + <-time.After(1 * time.Second) + + err = checkTopBlockIsTip(rpcClient, topBlock) + if err != nil { + log.Errorf("Error in checkTopBlockIsTip: %+v", err) + backendLog.Close() + os.Exit(1) + } +} diff --git a/stability-tests/orphans/prepare_blocks.go b/stability-tests/orphans/prepare_blocks.go new file mode 100644 index 0000000..8dd5b2a --- /dev/null +++ b/stability-tests/orphans/prepare_blocks.go @@ -0,0 +1,62 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/mine" +) + +const leveldbCacheSizeMiB = 256 + +func prepareBlocks() (blocks []*externalapi.DomainBlock, topBlock *externalapi.DomainBlock, err error) { + config := activeConfig() + testDatabaseDir, err := common.TempDir("minejson") + if err != nil { + return nil, nil, err + } + db, err := ldb.NewLevelDB(testDatabaseDir, leveldbCacheSizeMiB) + if err != nil { + return nil, nil, err + } + defer db.Close() + + testConsensus, tearDownFunc, err := consensus.NewFactory().NewTestConsensus(&consensus.Config{Params: *config.ActiveNetParams}, "prepareBlocks") + if err != nil { + return nil, nil, err + } + defer tearDownFunc(true) + + virtualSelectedParent, err := testConsensus.GetVirtualSelectedParent() + if err != nil { + return nil, nil, err + } + currentParentHash := virtualSelectedParent + + blocksCount := config.OrphanChainLength + 1 + blocks = make([]*externalapi.DomainBlock, 0, blocksCount) + + for i := 0; i < blocksCount; i++ { + block, _, err := testConsensus.BuildBlockWithParents( + []*externalapi.DomainHash{currentParentHash}, + &externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}}, + []*externalapi.DomainTransaction{}) + if err != nil { + return nil, nil, errors.Wrap(err, "error in BuildBlockWithParents") + } + + mine.SolveBlock(block) + err = testConsensus.ValidateAndInsertBlock(block, true) + if err != nil { + return nil, nil, errors.Wrap(err, "error in ValidateAndInsertBlock") + } + + blocks = append(blocks, block) + currentParentHash = consensushashing.BlockHash(block) + } + + return blocks[:len(blocks)-1], blocks[len(blocks)-1], nil +} diff --git a/stability-tests/orphans/run/run.sh b/stability-tests/orphans/run/run.sh new file mode 100755 index 0000000..dfb38ed --- /dev/null +++ b/stability-tests/orphans/run/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash +rm -rf /tmp/spectred-temp + +spectred --simnet --appdir=/tmp/spectred-temp --profile=6061 & +SPECTRED_PID=$! + +sleep 1 + +orphans --simnet -alocalhost:18511 -n20 --profile=7000 +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "orphans test: PASSED" + exit 0 +fi +echo "orphans test: FAILED" +exit 1 diff --git a/stability-tests/orphans/send_blocks.go b/stability-tests/orphans/send_blocks.go new file mode 100644 index 0000000..47b24a4 --- /dev/null +++ b/stability-tests/orphans/send_blocks.go @@ -0,0 +1,79 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/infrastructure/network/netadapter/standalone" +) + +func sendBlocks(routes *standalone.Routes, blocks []*externalapi.DomainBlock, topBlock *externalapi.DomainBlock) error { + topBlockHash := consensushashing.BlockHash(topBlock) + log.Infof("Sending top block with hash %s", topBlockHash) + err := routes.OutgoingRoute.Enqueue(&appmessage.MsgInvRelayBlock{Hash: topBlockHash}) + if err != nil { + return err + } + + err = waitForRequestAndSend(routes, topBlock) + if err != nil { + return err + } + + for i := len(blocks) - 1; i >= 0; i-- { + block := blocks[i] + + orphanBlock := topBlock + if i+1 != len(blocks) { + orphanBlock = blocks[i+1] + } + log.Infof("Waiting for request for block locator for block number %d with hash %s", i, consensushashing.BlockHash(block)) + err = waitForRequestForBlockLocator(routes, orphanBlock) + if err != nil { + return err + } + + log.Infof("Waiting for request and sending block number %d with hash %s", i, consensushashing.BlockHash(block)) + err = waitForRequestAndSend(routes, block) + if err != nil { + return err + } + } + + return nil +} + +func waitForRequestForBlockLocator(routes *standalone.Routes, orphanBlock *externalapi.DomainBlock) error { + message, err := routes.WaitForMessageOfType(appmessage.CmdRequestBlockLocator, timeout) + if err != nil { + return err + } + requestBlockLocatorMessage := message.(*appmessage.MsgRequestBlockLocator) + + orphanBlockHash := consensushashing.BlockHash(orphanBlock) + if *requestBlockLocatorMessage.HighHash != *orphanBlockHash { + return errors.Errorf("expected blockLocator request high hash to be %s but got %s", + orphanBlockHash, requestBlockLocatorMessage.HighHash) + } + + locator := appmessage.NewMsgBlockLocator([]*externalapi.DomainHash{orphanBlockHash, activeConfig().ActiveNetParams.GenesisHash}) + return routes.OutgoingRoute.Enqueue(locator) +} + +func waitForRequestAndSend(routes *standalone.Routes, block *externalapi.DomainBlock) error { + message, err := routes.WaitForMessageOfType(appmessage.CmdRequestRelayBlocks, timeout) + if err != nil { + return err + } + + requestRelayBlockMessage := message.(*appmessage.MsgRequestRelayBlocks) + + blockHash := consensushashing.BlockHash(block) + if len(requestRelayBlockMessage.Hashes) != 1 || *requestRelayBlockMessage.Hashes[0] != *blockHash { + return errors.Errorf("expecting requested hashes to be [%s], but got %v", + blockHash, requestRelayBlockMessage.Hashes) + } + + return routes.OutgoingRoute.Enqueue(appmessage.DomainBlockToMsgBlock(block)) +} diff --git a/stability-tests/reorg/config.go b/stability-tests/reorg/config.go new file mode 100644 index 0000000..8097212 --- /dev/null +++ b/stability-tests/reorg/config.go @@ -0,0 +1,51 @@ +package main + +import ( + "os" + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "reorg.log" + defaultErrLogFilename = "reorg_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + DAGFile string `long:"dag-file" description:"Path to DAG JSON file"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + + _, err := parser.Parse() + + if err != nil { + if err, ok := err.(*flags.Error); ok && err.Type == flags.ErrHelp { + os.Exit(0) + } + return err + } + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/reorg/log.go b/stability-tests/reorg/log.go new file mode 100644 index 0000000..67d01f2 --- /dev/null +++ b/stability-tests/reorg/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("RORG") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/reorg/main.go b/stability-tests/reorg/main.go new file mode 100644 index 0000000..7e9e388 --- /dev/null +++ b/stability-tests/reorg/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + err := parseConfig() + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing config: %+v", err) + os.Exit(1) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + testReorg(cfg) +} diff --git a/stability-tests/reorg/reorg.go b/stability-tests/reorg/reorg.go new file mode 100644 index 0000000..d3e4366 --- /dev/null +++ b/stability-tests/reorg/reorg.go @@ -0,0 +1,164 @@ +package main + +import ( + "compress/gzip" + "fmt" + "math/rand" + "os" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model" + + "github.com/spectre-project/spectred/domain/consensus" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/model/testapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/dagconfig" +) + +func testReorg(cfg *configFlags) { + consensusConfig := consensus.Config{Params: dagconfig.DevnetParams} + consensusConfig.SkipProofOfWork = true + consensusConfig.DisableDifficultyAdjustment = true + + factory := consensus.NewFactory() + tc, teardown, err := factory.NewTestConsensus(&consensusConfig, "ReorgHonest") + if err != nil { + panic(err) + } + defer teardown(false) + + f, err := os.Open(cfg.DAGFile) + if err != nil { + panic(err) + } + defer f.Close() + + gzipReader, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + defer gzipReader.Close() + + _, err = tc.MineJSON(gzipReader, testapi.MineJSONBlockTypeUTXOValidBlock) + if err != nil { + panic(err) + } + + tcAttacker, teardownAttacker, err := factory.NewTestConsensus(&consensusConfig, "ReorgAttacker") + if err != nil { + panic(err) + } + defer teardownAttacker(false) + + virtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + panic(err) + } + + stagingArea := model.NewStagingArea() + virtualSelectedParentGHOSTDAGData, err := tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, virtualSelectedParent, false) + if err != nil { + panic(err) + } + + log.Infof("Selected tip blue score %d", virtualSelectedParentGHOSTDAGData.BlueScore()) + + sideChain := make([]*externalapi.DomainBlock, 0) + + for i := uint64(0); ; i++ { + tips, err := tcAttacker.Tips() + if err != nil { + panic(err) + } + + block, _, err := tcAttacker.BuildBlockWithParents(tips, nil, nil) + if err != nil { + panic(err) + } + + // We change the nonce of the first block so its hash won't be similar to any of the + // honest DAG blocks. As a result the rest of the side chain should have unique hashes + // as well. + if i == 0 { + mutableHeader := block.Header.ToMutable() + mutableHeader.SetNonce(uint64(rand.NewSource(84147).Int63())) + block.Header = mutableHeader.ToImmutable() + } + + err = tcAttacker.ValidateAndInsertBlock(block, true) + if err != nil { + panic(err) + } + + sideChain = append(sideChain, block) + + if i%100 == 0 { + log.Infof("Attacker side chain mined %d blocks", i) + } + + blockHash := consensushashing.BlockHash(block) + ghostdagData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), stagingArea, blockHash, false) + if err != nil { + panic(err) + } + + if virtualSelectedParentGHOSTDAGData.BlueWork().Cmp(ghostdagData.BlueWork()) == -1 { + break + } + } + + sideChainTipHash := consensushashing.BlockHash(sideChain[len(sideChain)-1]) + sideChainTipGHOSTDAGData, err := tcAttacker.GHOSTDAGDataStore().Get(tcAttacker.DatabaseContext(), stagingArea, sideChainTipHash, false) + if err != nil { + panic(err) + } + + log.Infof("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore()) + + doneChan := make(chan struct{}) + spawn("add-sidechain-to-honest", func() { + for i, block := range sideChain { + if i%100 == 0 { + log.Infof("Validated %d blocks from the attacker chain", i) + } + err := tc.ValidateAndInsertBlock(block, true) + if err != nil { + panic(err) + } + } + + doneChan <- struct{}{} + }) + + const timeout = 12 * time.Hour + select { + case <-doneChan: + case <-time.After(timeout): + fail("Adding the side chain took more than %s", timeout) + } + + sideChainTipGHOSTDAGData, err = tc.GHOSTDAGDataStore().Get(tc.DatabaseContext(), stagingArea, sideChainTipHash, false) + if err != nil { + panic(err) + } + + log.Infof("Side chain tip (%s) blue score %d", sideChainTipHash, sideChainTipGHOSTDAGData.BlueScore()) + + newVirtualSelectedParent, err := tc.GetVirtualSelectedParent() + if err != nil { + panic(err) + } + + if !newVirtualSelectedParent.Equal(sideChainTipHash) { + fail("No reorg happened") + } +} + +func fail(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + fmt.Fprintln(os.Stderr, msg) + log.Criticalf(msg) + backendLog.Close() + os.Exit(1) +} diff --git a/stability-tests/reorg/run/run-full-finality-window-reorg.sh b/stability-tests/reorg/run/run-full-finality-window-reorg.sh new file mode 100755 index 0000000..a72a468 --- /dev/null +++ b/stability-tests/reorg/run/run-full-finality-window-reorg.sh @@ -0,0 +1,12 @@ +reorg --dag-file ../../netsync/dags-slow/wide-dag-blocks--2^16-delay-factor--1-k--18.json.gz --profile=6061 + +TEST_EXIT_CODE=$? +echo "Exit code: $TEST_EXIT_CODE" + + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "reorg test: PASSED" + exit 0 +fi +echo "reorg test: FAILED" +exit 1 diff --git a/stability-tests/reorg/run/run.sh b/stability-tests/reorg/run/run.sh new file mode 100755 index 0000000..d6a6ec7 --- /dev/null +++ b/stability-tests/reorg/run/run.sh @@ -0,0 +1,12 @@ +reorg --dag-file ../../netsync/dags-fast/wide-dag-blocks--2^12-delay-factor--1-k--18.json.gz --profile=6061 + +TEST_EXIT_CODE=$? +echo "Exit code: $TEST_EXIT_CODE" + + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "reorg test: PASSED" + exit 0 +fi +echo "reorg test: FAILED" +exit 1 diff --git a/stability-tests/rpc-idle-clients/config.go b/stability-tests/rpc-idle-clients/config.go new file mode 100644 index 0000000..b4f7dfc --- /dev/null +++ b/stability-tests/rpc-idle-clients/config.go @@ -0,0 +1,59 @@ +package main + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" +) + +const ( + defaultLogFilename = "rpc_idle_clients.log" + defaultErrLogFilename = "rpc_idle_clients_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + rpc.Config + config.NetworkFlags + NumClients uint32 `long:"numclients" short:"n" description:"Number of RPC clients to open"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + err = rpc.ValidateRPCConfig(&cfg.Config) + if err != nil { + return err + } + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + return nil +} diff --git a/stability-tests/rpc-idle-clients/log.go b/stability-tests/rpc-idle-clients/log.go new file mode 100644 index 0000000..c4ba874 --- /dev/null +++ b/stability-tests/rpc-idle-clients/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("RPIC") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/rpc-idle-clients/main.go b/stability-tests/rpc-idle-clients/main.go new file mode 100644 index 0000000..e2567bc --- /dev/null +++ b/stability-tests/rpc-idle-clients/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "rpc-idle-clients-main", nil) + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error parsing configuration")) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + numRPCClients := cfg.NumClients + clients := make([]*rpc.Client, numRPCClients) + for i := uint32(0); i < numRPCClients; i++ { + rpcClient, err := rpc.ConnectToRPC(&cfg.Config, cfg.NetParams()) + if err != nil { + panic(errors.Wrap(err, "error connecting to RPC server")) + } + clients[i] = rpcClient + } + + const testDuration = 30 * time.Second + select { + case <-time.After(testDuration): + } + for _, client := range clients { + client.Close() + } +} diff --git a/stability-tests/rpc-idle-clients/run/run.sh b/stability-tests/rpc-idle-clients/run/run.sh new file mode 100755 index 0000000..5647279 --- /dev/null +++ b/stability-tests/rpc-idle-clients/run/run.sh @@ -0,0 +1,34 @@ +#!/bin/bash +rm -rf /tmp/spectred-temp + +NUM_CLIENTS=128 +spectred --devnet --appdir=/tmp/spectred-temp --profile=6061 --rpcmaxwebsockets=$NUM_CLIENTS & +SPECTRED_PID=$! +SPECTRED_KILLED=0 +function killSpectredIfNotKilled() { + if [ $SPECTRED_KILLED -eq 0 ]; then + kill $SPECTRED_PID + fi +} +trap "killSpectredIfNotKilled" EXIT + +sleep 1 + +rpc-idle-clients --devnet --profile=7000 -n=$NUM_CLIENTS +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_EXIT_CODE=$? +SPECTRED_KILLED=1 + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "rpc-idle-clients test: PASSED" + exit 0 +fi +echo "rpc-idle-clients test: FAILED" +exit 1 diff --git a/stability-tests/rpc-stability/README.md b/stability-tests/rpc-stability/README.md new file mode 100644 index 0000000..69ad9b1 --- /dev/null +++ b/stability-tests/rpc-stability/README.md @@ -0,0 +1,10 @@ +# RPC Stability Tester + +This tests JSON-RPC stability by sending the node commands and making +sure it does not crash. + +## Running + +1. `go install` spectred and rpc-stability. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/rpc-stability/config.go b/stability-tests/rpc-stability/config.go new file mode 100644 index 0000000..04c57c0 --- /dev/null +++ b/stability-tests/rpc-stability/config.go @@ -0,0 +1,61 @@ +package main + +import ( + "path/filepath" + + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + + "github.com/jessevdk/go-flags" +) + +const ( + defaultLogFilename = "json_stability.log" + defaultErrLogFilename = "json_stability_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + rpc.Config + config.NetworkFlags + CommandsFilePath string `long:"commands" short:"p" description:"Path to commands file"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + err = rpc.ValidateRPCConfig(&cfg.Config) + if err != nil { + return err + } + log.SetLevel(logger.LevelInfo) + common.InitBackend(backendLog, defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/rpc-stability/log.go b/stability-tests/rpc-stability/log.go new file mode 100644 index 0000000..b94e508 --- /dev/null +++ b/stability-tests/rpc-stability/log.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("JSTT") + spawn = panics.GoroutineWrapperFunc(log) +) diff --git a/stability-tests/rpc-stability/main.go b/stability-tests/rpc-stability/main.go new file mode 100644 index 0000000..00b347e --- /dev/null +++ b/stability-tests/rpc-stability/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient/grpcclient" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + defer panics.HandlePanic(log, "rpc-stability-main", nil) + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error parsing configuration")) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + rpcAddress, err := cfg.NetParams().NormalizeRPCServerAddress(cfg.RPCServer) + if err != nil { + panic(errors.Wrap(err, "error parsing RPC server address")) + } + rpcClient, err := grpcclient.Connect(rpcAddress) + if err != nil { + panic(errors.Wrap(err, "error connecting to RPC server")) + } + defer rpcClient.Disconnect() + + commandsChan, err := readCommands() + if err != nil { + panic(errors.Wrapf(err, "error reading commands from file %s", cfg.CommandsFilePath)) + } + + err = sendCommands(rpcClient, commandsChan) + if err != nil { + panic(errors.Wrap(err, "error sending commands")) + } +} diff --git a/stability-tests/rpc-stability/read.go b/stability-tests/rpc-stability/read.go new file mode 100644 index 0000000..1446e58 --- /dev/null +++ b/stability-tests/rpc-stability/read.go @@ -0,0 +1,25 @@ +package main + +import ( + "bufio" + "os" +) + +func readCommands() (<-chan string, error) { + cfg := activeConfig() + f, err := os.Open(cfg.CommandsFilePath) + if err != nil { + return nil, err + } + scanner := bufio.NewScanner(f) + + commandsChan := make(chan string) + spawn("readCommands", func() { + for scanner.Scan() { + command := scanner.Text() + commandsChan <- command + } + close(commandsChan) + }) + return commandsChan, nil +} diff --git a/stability-tests/rpc-stability/run/commands.json b/stability-tests/rpc-stability/run/commands.json new file mode 100644 index 0000000..b822581 --- /dev/null +++ b/stability-tests/rpc-stability/run/commands.json @@ -0,0 +1,5 @@ +{"getBlockDagInfoRequest": {}} +{"getBlockRequest": {"hash": "0000691a26e1cd33ed9d0587d774181726f4e38eecd722a858d3baaa1fd19250"}} +{"getBlockRequest": {"hash": "666661a26e1cd33ed9d0587d774181726f4e38eecd722a858d3baaa1fd19250"}} +{"submitBlockRequest": {"block": {"header":{"version":1,"parents":[],"hashMerkleRoot":"0000000000000000000000000000000000000000000","acceptedIdMerkleRoot":"0000000000000000000000000000000000000000000","utxoCommitment": "0000000000000000000000000000000000000000000","timestamp":1593528309396,"bits":511705087,"nonce":282366},"transactions":[{"version":1,"inputs":[],"outputs":[],"lockTime":0,"subnetworkId":"100000000000000000000000000","gas":0,"payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}]}}} +{"submitTransactionRequest": {"transaction": {"version":1,"inputs":[],"outputs":[],"lockTime":0,"subnetworkId":"100000000000000000000000000","gas":0,"payload":"AAAAAAAAAAAXqRTaF0XptUm9C/oaVplxx366MM1aS4drYXNwYS1kZXZuZXQ="}}} diff --git a/stability-tests/rpc-stability/run/run.sh b/stability-tests/rpc-stability/run/run.sh new file mode 100755 index 0000000..2431a56 --- /dev/null +++ b/stability-tests/rpc-stability/run/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash +rm -rf /tmp/spectred-temp + +spectred --devnet --appdir=/tmp/spectred-temp --profile=6061 --loglevel=debug & +SPECTRED_PID=$! + +sleep 1 + +rpc-stability --devnet -p commands.json --profile=7000 +TEST_EXIT_CODE=$? + +kill $SPECTRED_PID + +wait $SPECTRED_PID +SPECTRED_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" +echo "Spectred exit code: $SPECTRED_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ] && [ $SPECTRED_EXIT_CODE -eq 0 ]; then + echo "rpc-stability test: PASSED" + exit 0 +fi +echo "rpc-stability test: FAILED" +exit 1 diff --git a/stability-tests/rpc-stability/send.go b/stability-tests/rpc-stability/send.go new file mode 100644 index 0000000..6942651 --- /dev/null +++ b/stability-tests/rpc-stability/send.go @@ -0,0 +1,20 @@ +package main + +import ( + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/network/rpcclient/grpcclient" +) + +func sendCommands(rpcClient *grpcclient.GRPCClient, commandsChan <-chan string) error { + for command := range commandsChan { + log.Infof("Sending command %s", command) + response, err := rpcClient.PostJSON(command) + if err != nil { + return errors.Wrap(err, "error sending message") + } + + log.Infof("-> Got response: %s", response) + } + + return nil +} diff --git a/stability-tests/run/run-fast.sh b/stability-tests/run/run-fast.sh new file mode 100755 index 0000000..49ccf2e --- /dev/null +++ b/stability-tests/run/run-fast.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set -e + +BASEDIR=$(dirname "$0") +PROJECT_ROOT=$( cd "${BASEDIR}/.."; pwd) + +failedTests=() + +# echo "Running application-level-garbage" +# cd "${PROJECT_ROOT}/application-level-garbage/run" && ./run.sh || failedTests+=("application-level-garbage") +# echo "Done running application-level-garbage" + +echo "Running infra-level-garbage" +cd "${PROJECT_ROOT}/infra-level-garbage/run" && ./run.sh || failedTests+=("infra-level-garbage") +echo "Done running infra-level-garbage" + +echo "Running spectredsanity" +cd "${PROJECT_ROOT}/spectredsanity/run" && ./run.sh || failedTests+=("spectredsanity") +echo "Done running spectredsanity" + +echo "Running rpc-stability" +cd "${PROJECT_ROOT}/rpc-stability/run" && ./run.sh || failedTests+=("rpc-stability") +echo "Done running rpc-stability" + +echo "Running rpc-idle-clients" +cd "${PROJECT_ROOT}/rpc-idle-clients/run" && ./run.sh || failedTests+=("rpc-idle-clients") +echo "Done running rpc-idle-clients" + +echo "Running simple-sync" +cd "${PROJECT_ROOT}/simple-sync/run" && ./run.sh || failedTests+=("simple-sync") +echo "Done running simple-sync" + +echo "Running orphans" +cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans") +echo "Done running orphans" + +echo "Running reorg" +cd "${PROJECT_ROOT}/reorg/run" && ./run.sh || failedTests+=("reorg") +echo "Done running reorg" + +echo "Running many-tips" +cd "${PROJECT_ROOT}/many-tips/run" && ./run.sh || failedTests+=("many-tips") +echo "Done running many-tips" + +echo "Running netsync - fast" +cd "${PROJECT_ROOT}/netsync/run" && ./run-fast.sh || failedTests+=("netsync") +echo "Done running netsync - fast" + + +EXIT_CODE=0 +for t in "${failedTests[@]}"; do + EXIT_CODE=1 + echo "FAILED: ${t}" +done + +echo "Exiting with: ${EXIT_CODE}" +exit $EXIT_CODE diff --git a/stability-tests/run/run-slow.sh b/stability-tests/run/run-slow.sh new file mode 100755 index 0000000..5c02621 --- /dev/null +++ b/stability-tests/run/run-slow.sh @@ -0,0 +1,64 @@ +#!/bin/bash +set -e + +BASEDIR=$(dirname "$0") +PROJECT_ROOT=$( cd "${BASEDIR}/.."; pwd) + +failedTests=() + +# echo "Running application-level-garbage" +# cd "${PROJECT_ROOT}/application-level-garbage/run" && ./run.sh || failedTests+=("application-level-garbage") +# echo "Done running application-level-garbage" + +echo "Running infra-level-garbage" +cd "${PROJECT_ROOT}/infra-level-garbage/run" && ./run.sh || failedTests+=("infra-level-garbage") +echo "Done running infra-level-garbage" + +echo "Running spectredsanity" +cd "${PROJECT_ROOT}/spectredsanity/run" && ./run.sh || failedTests+=("spectredsanity") +echo "Done running spectredsanity" + +echo "Running rpc-stability" +cd "${PROJECT_ROOT}/rpc-stability/run" && ./run.sh || failedTests+=("rpc-stability") +echo "Done running rpc-stability" + +echo "Running rpc-idle-clients" +cd "${PROJECT_ROOT}/rpc-idle-clients/run" && ./run.sh || failedTests+=("rpc-idle-clients") +echo "Done running rpc-idle-clients" + +echo "Running simple-sync" +cd "${PROJECT_ROOT}/simple-sync/run" && ./run.sh || failedTests+=("simple-sync") +echo "Done running simple-sync" + +echo "Running orphans" +cd "${PROJECT_ROOT}/orphans/run" && ./run.sh || failedTests+=("orphans") +echo "Done running orphans" + +echo "Running many-tips" +cd "${PROJECT_ROOT}/many-tips/run" && ./run.sh || failedTests+=("many-tips") +echo "Done running many-tips" + +echo "Running daa" +cd "${PROJECT_ROOT}/daa/run" && ./run.sh || failedTests+=("daa") +echo "Done running daa" + +echo "Running reorg" +cd "${PROJECT_ROOT}/reorg/run" && ./run-full-finality-window-reorg.sh || failedTests+=("reorg") +echo "Done running reorg" + +echo "Running mempool-limits" +cd "${PROJECT_ROOT}/mempool-limits/run" && ./run.sh || failedTests+=("mempool-limits") +echo "Done running mempool-limits" + +echo "Running netsync - slow" +cd ${PROJECT_ROOT}/netsync/run"" && ./run.sh || failedTests+=("netsync") +echo "Done running netsync - slow" + +EXIT_CODE=0 +for t in "${failedTests[@]}"; do + EXIT_CODE=1 + echo "FAILED: ${t}" +done + +echo "Exiting with: ${EXIT_CODE}" +exit $EXIT_CODE diff --git a/stability-tests/run/run.sh b/stability-tests/run/run.sh new file mode 100755 index 0000000..c2d523f --- /dev/null +++ b/stability-tests/run/run.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e +TEST_EXIT_CODE=1 +BASEDIR=$(dirname "$0") +if [[ $1 == "slow" ]]; + then + echo "Running slow stability tests" + "${BASEDIR}/run-slow.sh" + TEST_EXIT_CODE=$? + echo "Done running slow stability tests" + else + echo "Running fast stability tests" + "${BASEDIR}/run-fast.sh" + TEST_EXIT_CODE=$? + echo "Done running fast stability tests" +fi + +echo "Exit code: $TEST_EXIT_CODE" +exit $TEST_EXIT_CODE \ No newline at end of file diff --git a/stability-tests/simple-sync/README.md b/stability-tests/simple-sync/README.md new file mode 100644 index 0000000..3cd5ebc --- /dev/null +++ b/stability-tests/simple-sync/README.md @@ -0,0 +1,10 @@ +# Simple Sync Stability Tester + +This tests that two nodes that are connected to each other stay +synced while one of them mines a chain. + +## Running + +1. `go install` spectred and simple-sync. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/simple-sync/cmd.go b/stability-tests/simple-sync/cmd.go new file mode 100644 index 0000000..705660d --- /dev/null +++ b/stability-tests/simple-sync/cmd.go @@ -0,0 +1,13 @@ +package main + +import ( + "os/exec" + "syscall" +) + +func killWithSigterm(cmd *exec.Cmd, commandName string) { + err := cmd.Process.Signal(syscall.SIGTERM) + if err != nil { + log.Criticalf("error sending SIGKILL to %s", commandName) + } +} diff --git a/stability-tests/simple-sync/config.go b/stability-tests/simple-sync/config.go new file mode 100644 index 0000000..379f808 --- /dev/null +++ b/stability-tests/simple-sync/config.go @@ -0,0 +1,51 @@ +package main + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/infrastructure/config" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const ( + defaultLogFilename = "simplesync.log" + defaultErrLogFilename = "simplesync_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + NumberOfBlocks uint64 `short:"n" long:"numblocks" description:"Number of blocks to mine" required:"true"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` + config.NetworkFlags +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return err + } + + err = cfg.ResolveNetwork(parser) + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/simple-sync/log.go b/stability-tests/simple-sync/log.go new file mode 100644 index 0000000..5063fce --- /dev/null +++ b/stability-tests/simple-sync/log.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("SXSA") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelDebug + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/simple-sync/main.go b/stability-tests/simple-sync/main.go new file mode 100644 index 0000000..2ab4d8d --- /dev/null +++ b/stability-tests/simple-sync/main.go @@ -0,0 +1,75 @@ +package main + +import ( + "os" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util/panics" + "github.com/spectre-project/spectred/util/profiling" +) + +func main() { + err := realMain() + + if err != nil { + log.Criticalf("An error occured: %+v", err) + backendLog.Close() + os.Exit(1) + } + backendLog.Close() +} + +func realMain() error { + defer panics.HandlePanic(log, "simple-sync-main", nil) + + err := parseConfig() + if err != nil { + return errors.Wrap(err, "error in parseConfig") + } + common.UseLogger(backendLog, log.Level()) + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + shutdown := uint64(0) + + teardown, err := startNodes() + if err != nil { + return errors.Wrap(err, "error in startNodes") + } + defer teardown() + + syncerRPCClient, err := rpc.ConnectToRPC(&rpc.Config{ + RPCServer: syncerRPCAddress, + }, activeConfig().NetParams()) + if err != nil { + return errors.Wrap(err, "error connecting to RPC server") + } + defer syncerRPCClient.Disconnect() + + syncedRPCClient, err := rpc.ConnectToRPC(&rpc.Config{ + RPCServer: syncedRPCAddress, + }, activeConfig().NetParams()) + if err != nil { + return errors.Wrap(err, "error connecting to RPC server") + } + defer syncedRPCClient.Disconnect() + + err = syncedRPCClient.RegisterForBlockAddedNotifications() + if err != nil { + return errors.Wrap(err, "error registering for blockAdded notifications") + } + + err = mineLoop(syncerRPCClient, syncedRPCClient) + if err != nil { + return errors.Wrap(err, "error in mineLoop") + } + + atomic.StoreUint64(&shutdown, 1) + + return nil +} diff --git a/stability-tests/simple-sync/mineloop.go b/stability-tests/simple-sync/mineloop.go new file mode 100644 index 0000000..f340464 --- /dev/null +++ b/stability-tests/simple-sync/mineloop.go @@ -0,0 +1,144 @@ +package main + +import ( + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/stability-tests/common/rpc" + "github.com/spectre-project/spectred/util" +) + +func mineLoop(syncerRPCClient, syncedRPCClient *rpc.Client) error { + miningAddr, err := generateAddress() + if err != nil { + return err + } + + syncerBlockCountBefore, err := syncerRPCClient.GetBlockCount() + if err != nil { + return err + } + syncedBlockCountBefore, err := syncedRPCClient.GetBlockCount() + if err != nil { + return err + } + log.Infof("Starting to mine") + totalTime := time.Duration(0) + for i := uint64(0); i < activeConfig().NumberOfBlocks; i++ { + log.Infof("Mining block %d...", i+1) + err = mineBlock(syncerRPCClient.Address(), miningAddr) + if err != nil { + // Ignore error and instead check that the block count changed correctly. + // TODO: Fix the race condition in spectreminer so it won't panic (proper shutdown handler) + log.Warnf("mineBlock returned an err: %s", err) + } + + start := time.Now() + const timeToPropagate = 60 * time.Second + select { + case <-syncedRPCClient.OnBlockAdded: + case <-time.After(timeToPropagate): + return errors.Errorf("block %d took more than %s to propagate", i+1, timeToPropagate) + } + totalTime += time.Since(start) + + syncerResult, err := syncerRPCClient.GetBlockDAGInfo() + if err != nil { + return err + } + + syncedResult, err := syncedRPCClient.GetBlockDAGInfo() + if err != nil { + return err + } + + if !areTipsAreEqual(syncedResult, syncerResult) { + return errors.Errorf("syncer node has tips %s but synced node has tips %s", syncerResult.TipHashes, syncedResult.TipHashes) + } + } + + const expectedAveragePropagationTime = time.Second + averagePropagationTime := totalTime / time.Duration(activeConfig().NumberOfBlocks) + if averagePropagationTime > expectedAveragePropagationTime { + return errors.Errorf("average block propagation time %s is higher than expected (%s)", averagePropagationTime, expectedAveragePropagationTime) + } + + log.Infof("Finished to mine") + + log.Infof("Getting syncer block count") + syncerBlockCountAfter, err := syncerRPCClient.GetBlockCount() + if err != nil { + return err + } + + log.Infof("Getting syncee block count") + syncedBlockCountAfter, err := syncedRPCClient.GetBlockCount() + if err != nil { + return err + } + if syncerBlockCountAfter.BlockCount-syncerBlockCountBefore.BlockCount != activeConfig().NumberOfBlocks { + return errors.Errorf("Expected to mine %d blocks, instead mined: %d", activeConfig().NumberOfBlocks, syncerBlockCountAfter.BlockCount-syncerBlockCountBefore.BlockCount) + } + if syncedBlockCountAfter.BlockCount-syncedBlockCountBefore.BlockCount != activeConfig().NumberOfBlocks { + return errors.Errorf("Expected syncer to have %d new blocks, instead have: %d", activeConfig().NumberOfBlocks, syncedBlockCountAfter.BlockCount-syncedBlockCountBefore.BlockCount) + } + + log.Infof("Finished the mine loop successfully") + return nil +} + +func generateAddress() (util.Address, error) { + privateKey, err := secp256k1.GenerateSchnorrKeyPair() + if err != nil { + return nil, err + } + + pubKey, err := privateKey.SchnorrPublicKey() + if err != nil { + return nil, err + } + + pubKeySerialized, err := pubKey.Serialize() + if err != nil { + return nil, err + } + + return util.NewAddressPublicKey(pubKeySerialized[:], activeConfig().ActiveNetParams.Prefix) +} + +func areTipsAreEqual(resultA, resultB *appmessage.GetBlockDAGInfoResponseMessage) bool { + if len(resultA.TipHashes) != len(resultB.TipHashes) { + return false + } + + tipsASet := make(map[string]struct{}) + for _, tip := range resultA.TipHashes { + tipsASet[tip] = struct{}{} + } + + for _, tip := range resultB.TipHashes { + if _, ok := tipsASet[tip]; !ok { + return false + } + } + + return true +} + +func mineBlock(syncerRPCAddress string, miningAddress util.Address) error { + spectreminerCmd, err := common.StartCmd("MINER", + "spectreminer", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "-s", syncerRPCAddress, + "--mine-when-not-synced", + "--miningaddr", miningAddress.EncodeAddress(), + "--numblocks", "1", + ) + if err != nil { + return err + } + return errors.Wrapf(spectreminerCmd.Wait(), "error with command '%s'", spectreminerCmd) +} diff --git a/stability-tests/simple-sync/run/run.sh b/stability-tests/simple-sync/run/run.sh new file mode 100755 index 0000000..f8a4479 --- /dev/null +++ b/stability-tests/simple-sync/run/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -e +simple-sync --simnet -n=1000 --profile=7000 +TEST_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "simple-sync test: PASSED" + exit 0 +fi +echo "simple-sync test: FAILED" +exit 1 diff --git a/stability-tests/simple-sync/start-nodes.go b/stability-tests/simple-sync/start-nodes.go new file mode 100644 index 0000000..4d4a35b --- /dev/null +++ b/stability-tests/simple-sync/start-nodes.go @@ -0,0 +1,120 @@ +package main + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +const ( + syncerRPCAddress = "localhost:9000" + syncedRPCAddress = "localhost:9100" +) + +func startNodes() (teardown func(), err error) { + const ( + syncerListen = "localhost:9001" + syncedListen = "localhost:9101" + ) + + log.Infof("Starting nodes") + syncerDataDir, err := common.TempDir("spectred-datadir-syncer") + if err != nil { + panic(errors.Wrapf(err, "error in Tempdir")) + } + log.Infof("SYNCER datadir: %s", syncerDataDir) + + syncedDataDir, err := common.TempDir("spectred-datadir-synced") + if err != nil { + panic(errors.Wrapf(err, "error in Tempdir")) + } + log.Infof("SYNCED datadir: %s", syncedDataDir) + + syncerCmd, err := common.StartCmd("SPECTRED-SYNCER", + "spectred", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "--appdir", syncerDataDir, + "--logdir", syncerDataDir, + "--rpclisten", syncerRPCAddress, + "--listen", syncerListen, + "--loglevel", "debug", + "--allow-submit-block-when-not-synced", + ) + if err != nil { + return nil, err + } + + syncedCmd, err := common.StartCmd("SPECTRED-SYNCED", + "spectred", + common.NetworkCliArgumentFromNetParams(activeConfig().NetParams()), + "--appdir", syncedDataDir, + "--logdir", syncedDataDir, + "--rpclisten", syncedRPCAddress, + "--listen", syncedListen, + "--connect", syncerListen, + "--loglevel", "debug", + ) + if err != nil { + return nil, err + } + + shutdown := uint64(0) + + processesStoppedWg := sync.WaitGroup{} + processesStoppedWg.Add(2) + spawn("startNodes-syncerCmd.Wait", func() { + err := syncerCmd.Wait() + if err != nil { + if atomic.LoadUint64(&shutdown) == 0 { + panics.Exit(log, fmt.Sprintf("syncerCmd closed unexpectedly: %s. See logs at: %s", err, syncerDataDir)) + } + if !strings.Contains(err.Error(), "signal: killed") { + panics.Exit(log, fmt.Sprintf("syncerCmd closed with an error: %s. See logs at: %s", err, syncerDataDir)) + } + } + processesStoppedWg.Done() + }) + + spawn("startNodes-syncedCmd.Wait", func() { + err = syncedCmd.Wait() + if err != nil { + if atomic.LoadUint64(&shutdown) == 0 { + panics.Exit(log, fmt.Sprintf("syncedCmd closed unexpectedly: %s. See logs at: %s", err, syncedDataDir)) + } + if !strings.Contains(err.Error(), "signal: killed") { + panics.Exit(log, fmt.Sprintf("syncedCmd closed with an error: %s. See logs at: %s", err, syncedDataDir)) + } + } + processesStoppedWg.Done() + }) + + // We let the nodes initialize and connect to each other + log.Infof("Waiting for nodes to start...") + const initTime = 2 * time.Second + time.Sleep(initTime) + + return func() { + atomic.StoreUint64(&shutdown, 1) + killWithSigterm(syncerCmd, "syncerCmd") + killWithSigterm(syncedCmd, "syncedCmd") + + processesStoppedChan := make(chan struct{}) + spawn("startNodes-processStoppedWg.Wait", func() { + processesStoppedWg.Wait() + processesStoppedChan <- struct{}{} + }) + + const timeout = 10 * time.Second + select { + case <-processesStoppedChan: + case <-time.After(timeout): + panics.Exit(log, fmt.Sprintf("Processes couldn't be closed after %s", timeout)) + } + }, nil +} diff --git a/stability-tests/spectredsanity/README.md b/stability-tests/spectredsanity/README.md new file mode 100644 index 0000000..c0df5ff --- /dev/null +++ b/stability-tests/spectredsanity/README.md @@ -0,0 +1,13 @@ +# Spectred Sanity Tool + +This tries to run kapad with different sets of arguments for sanity. + +In order to get clean run for each command, the tool injects its own +`--appdir` argument so it will be able to clean it between runs, so +it's forbidden to use `--appdir` as part of the arguments set. + +## Running + +1. `go install` spectred and spectredsanity. +2. `cd run` +3. `./run.sh` diff --git a/stability-tests/spectredsanity/commandloop.go b/stability-tests/spectredsanity/commandloop.go new file mode 100644 index 0000000..236ba57 --- /dev/null +++ b/stability-tests/spectredsanity/commandloop.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" +) + +type commandFailure struct { + cmd *exec.Cmd + err error +} + +func (cf commandFailure) String() string { + return fmt.Sprintf("command `%s` failed: %s", cf.cmd, cf.err) +} + +func commandLoop(argsChan <-chan []string) ([]commandFailure, error) { + failures := make([]commandFailure, 0) + dataDirectoryPath, err := common.TempDir("spectredsanity-spectred-datadir") + if err != nil { + return nil, errors.Wrapf(err, "error creating temp dir") + } + defer os.RemoveAll(dataDirectoryPath) + + for args := range argsChan { + err := os.RemoveAll(dataDirectoryPath) + if err != nil { + return nil, err + } + + args, err = handleDataDirArg(args, dataDirectoryPath) + if err != nil { + return nil, err + } + + cmd := exec.Command("spectred", args...) + cmd.Stdout = common.NewLogWriter(log, logger.LevelTrace, "SPECTRED-STDOUT") + cmd.Stderr = common.NewLogWriter(log, logger.LevelWarn, "SPECTRED-STDERR") + + log.Infof("Running `%s`", cmd) + errChan := make(chan error) + spawn("commandLoop-cmd.Run", func() { + errChan <- cmd.Run() + }) + + const timeout = time.Minute + select { + case err := <-errChan: + failure := commandFailure{ + cmd: cmd, + err: err, + } + log.Error(failure) + failures = append(failures, failure) + case <-time.After(timeout): + err := cmd.Process.Kill() + if err != nil { + return nil, errors.Wrapf(err, "error in Kill") + } + log.Infof("Successfully run `%s`", cmd) + } + } + return failures, nil +} + +func handleDataDirArg(args []string, dataDir string) ([]string, error) { + for _, arg := range args { + if strings.HasPrefix(arg, "--appdir") { + return nil, errors.New("invalid argument --appdir") + } + } + return append([]string{"--appdir", dataDir}, args...), nil +} diff --git a/stability-tests/spectredsanity/config.go b/stability-tests/spectredsanity/config.go new file mode 100644 index 0000000..c765b53 --- /dev/null +++ b/stability-tests/spectredsanity/config.go @@ -0,0 +1,44 @@ +package main + +import ( + "path/filepath" + + "github.com/jessevdk/go-flags" + "github.com/spectre-project/spectred/stability-tests/common" +) + +const ( + defaultLogFilename = "spectredsanity.log" + defaultErrLogFilename = "spectredsanity_err.log" +) + +var ( + // Default configuration options + defaultLogFile = filepath.Join(common.DefaultAppDir, defaultLogFilename) + defaultErrLogFile = filepath.Join(common.DefaultAppDir, defaultErrLogFilename) +) + +type configFlags struct { + CommandListFile string `long:"command-list-file" description:"Path to the command list file"` + LogLevel string `short:"d" long:"loglevel" description:"Set log level {trace, debug, info, warn, error, critical}"` + Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65536"` +} + +var cfg *configFlags + +func activeConfig() *configFlags { + return cfg +} + +func parseConfig() error { + cfg = &configFlags{} + parser := flags.NewParser(cfg, flags.PrintErrors|flags.HelpFlag) + _, err := parser.Parse() + if err != nil { + return err + } + + initLog(defaultLogFile, defaultErrLogFile) + + return nil +} diff --git a/stability-tests/spectredsanity/log.go b/stability-tests/spectredsanity/log.go new file mode 100644 index 0000000..7bd9afc --- /dev/null +++ b/stability-tests/spectredsanity/log.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/panics" +) + +var ( + backendLog = logger.NewBackend() + log = backendLog.Logger("SXSA") + spawn = panics.GoroutineWrapperFunc(log) +) + +func initLog(logFile, errLogFile string) { + level := logger.LevelInfo + if activeConfig().LogLevel != "" { + var ok bool + level, ok = logger.LevelFromString(activeConfig().LogLevel) + if !ok { + fmt.Fprintf(os.Stderr, "Log level %s doesn't exists", activeConfig().LogLevel) + os.Exit(1) + } + } + log.SetLevel(level) + common.InitBackend(backendLog, logFile, errLogFile) +} diff --git a/stability-tests/spectredsanity/main.go b/stability-tests/spectredsanity/main.go new file mode 100644 index 0000000..d5952c3 --- /dev/null +++ b/stability-tests/spectredsanity/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spectre-project/spectred/stability-tests/common" + "github.com/spectre-project/spectred/util/profiling" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/util/panics" +) + +func main() { + defer panics.HandlePanic(log, "spectredsanity-main", nil) + err := parseConfig() + if err != nil { + panic(errors.Wrap(err, "error in parseConfig")) + } + defer backendLog.Close() + common.UseLogger(backendLog, log.Level()) + + cfg := activeConfig() + if cfg.Profile != "" { + profiling.Start(cfg.Profile, log) + } + + argsChan := readArgs() + failures, err := commandLoop(argsChan) + if err != nil { + panic(errors.Wrap(err, "error in commandLoop")) + } + + if len(failures) > 0 { + fmt.Fprintf(os.Stderr, "FAILED:\n") + for _, failure := range failures { + fmt.Fprintln(os.Stderr, failure) + } + backendLog.Close() + os.Exit(1) + } + + log.Infof("All tests have passed") +} diff --git a/stability-tests/spectredsanity/read.go b/stability-tests/spectredsanity/read.go new file mode 100644 index 0000000..4c5924c --- /dev/null +++ b/stability-tests/spectredsanity/read.go @@ -0,0 +1,43 @@ +package main + +import ( + "bufio" + "io" + "os" + "strings" + + "github.com/pkg/errors" +) + +func readArgs() <-chan []string { + argsChan := make(chan []string) + spawn("readArgs", func() { + f, err := os.Open(cfg.CommandListFile) + if err != nil { + panic(errors.Wrapf(err, "error in Open")) + } + + r := bufio.NewReader(f) + for { + line, _, err := r.ReadLine() + + if err == io.EOF { + break + } + + if err != nil { + panic(errors.Wrapf(err, "error in ReadLine")) + } + + trimmedLine := strings.TrimSpace(string(line)) + if trimmedLine == "" || strings.HasPrefix(trimmedLine, "//") { + continue + } + + argsChan <- strings.Split(trimmedLine, " ") + } + + close(argsChan) + }) + return argsChan +} diff --git a/stability-tests/spectredsanity/run/commands-list b/stability-tests/spectredsanity/run/commands-list new file mode 100644 index 0000000..62b1cbc --- /dev/null +++ b/stability-tests/spectredsanity/run/commands-list @@ -0,0 +1 @@ +--devnet \ No newline at end of file diff --git a/stability-tests/spectredsanity/run/run.sh b/stability-tests/spectredsanity/run/run.sh new file mode 100755 index 0000000..d5af791 --- /dev/null +++ b/stability-tests/spectredsanity/run/run.sh @@ -0,0 +1,12 @@ +#!/bin/bash +spectredsanity --command-list-file ./commands-list --profile=7000 +TEST_EXIT_CODE=$? + +echo "Exit code: $TEST_EXIT_CODE" + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "spectredsanity test: PASSED" + exit 0 +fi +echo "spectredsanity test: FAILED" +exit 1 diff --git a/testing/integration/16_incoming_connections_test.go b/testing/integration/16_incoming_connections_test.go new file mode 100644 index 0000000..ac279b0 --- /dev/null +++ b/testing/integration/16_incoming_connections_test.go @@ -0,0 +1,69 @@ +package integration + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/spectre-project/spectred/app/appmessage" +) + +func Test16IncomingConnections(t *testing.T) { + // Much more than 16 hosts creates a risk of running out of available file descriptors for leveldb + const numBullies = 16 + harnessesParams := make([]*harnessParams, numBullies+1) + for i := 0; i < numBullies+1; i++ { + harnessesParams[i] = &harnessParams{ + p2pAddress: fmt.Sprintf("127.0.0.1:%d", 12345+i), + rpcAddress: fmt.Sprintf("127.0.0.1:%d", 22345+i), + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + } + } + + appHarnesses, teardown := setupHarnesses(t, harnessesParams) + defer teardown() + + victim, bullies := appHarnesses[0], appHarnesses[1:] + + for _, bully := range bullies { + connect(t, victim, bully) + } + + blockAddedWG := sync.WaitGroup{} + blockAddedWG.Add(numBullies) + for _, bully := range bullies { + blockAdded := false + onBlockAdded := func(_ *appmessage.BlockAddedNotificationMessage) { + if blockAdded { + t.Fatalf("Single bully reported block added twice") + } + blockAdded = true + blockAddedWG.Done() + } + + err := bully.rpcClient.RegisterForBlockAddedNotifications(onBlockAdded) + if err != nil { + t.Fatalf("Error from RegisterForBlockAddedNotifications: %+v", err) + } + } + + _ = mineNextBlock(t, victim) + + select { + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for block added notification from the bullies") + case <-ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }): + } +} + +// ReceiveFromChanWhenDone takes a blocking function and returns a channel that sends an empty struct when the function is done. +func ReceiveFromChanWhenDone(callback func()) <-chan struct{} { + ch := make(chan struct{}) + spawn("ReceiveFromChanWhenDone", func() { + callback() + close(ch) + }) + return ch +} diff --git a/testing/integration/address_exchange_test.go b/testing/integration/address_exchange_test.go new file mode 100644 index 0000000..6e79dc4 --- /dev/null +++ b/testing/integration/address_exchange_test.go @@ -0,0 +1,34 @@ +package integration + +import ( + "testing" + + "github.com/spectre-project/spectred/infrastructure/network/addressmanager" +) + +func TestAddressExchange(t *testing.T) { + appHarness1, appHarness2, appHarness3, teardown := standardSetup(t) + defer teardown() + + testAddress := "1.2.3.4:6789" + err := addressmanager.AddAddressByIP(appHarness1.app.AddressManager(), testAddress, nil) + if err != nil { + t.Fatalf("Error adding address to addressManager: %+v", err) + } + + connect(t, appHarness1, appHarness2) + connect(t, appHarness2, appHarness3) + + peerAddresses, err := appHarness3.rpcClient.GetPeerAddresses() + if err != nil { + t.Fatalf("Error getting peer addresses: %+v", err) + } + + for _, peerAddress := range peerAddresses.Addresses { + if peerAddress.Addr == testAddress { + return + } + } + + t.Errorf("Didn't find testAddress in list of addresses of appHarness3") +} diff --git a/testing/integration/basic_sync_test.go b/testing/integration/basic_sync_test.go new file mode 100644 index 0000000..72de847 --- /dev/null +++ b/testing/integration/basic_sync_test.go @@ -0,0 +1,65 @@ +package integration + +import ( + "testing" + "time" + + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + + "github.com/spectre-project/spectred/app/appmessage" +) + +func TestIntegrationBasicSync(t *testing.T) { + appHarness1, appHarness2, appHarness3, teardown := standardSetup(t) + defer teardown() + + // Connect nodes in chain: 1 <--> 2 <--> 3 + // So that node 3 doesn't directly get blocks from node 1 + connect(t, appHarness1, appHarness2) + connect(t, appHarness2, appHarness3) + + app2OnBlockAddedChan := make(chan *appmessage.RPCBlock) + setOnBlockAddedHandler(t, appHarness2, func(notification *appmessage.BlockAddedNotificationMessage) { + app2OnBlockAddedChan <- notification.Block + }) + + app3OnBlockAddedChan := make(chan *appmessage.RPCBlock) + setOnBlockAddedHandler(t, appHarness3, func(notification *appmessage.BlockAddedNotificationMessage) { + app3OnBlockAddedChan <- notification.Block + }) + + block := mineNextBlock(t, appHarness1) + + var rpcBlock *appmessage.RPCBlock + select { + case rpcBlock = <-app2OnBlockAddedChan: + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for block added notification on node directly connected to miner") + } + domainBlockFromRPC, err := appmessage.RPCBlockToDomainBlock(rpcBlock) + if err != nil { + t.Fatalf("Could not convert RPC block: %s", err) + } + rpcBlockHash := consensushashing.BlockHash(domainBlockFromRPC) + + blockHash := consensushashing.BlockHash(block) + if !rpcBlockHash.Equal(blockHash) { + t.Errorf("Expected block with hash '%s', but got '%s'", blockHash, rpcBlockHash) + } + + select { + case rpcBlock = <-app3OnBlockAddedChan: + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for block added notification on node indirectly connected to miner") + } + domainBlockFromRPC, err = appmessage.RPCBlockToDomainBlock(rpcBlock) + if err != nil { + t.Fatalf("Could not convert RPC block: %s", err) + } + rpcBlockHash = consensushashing.BlockHash(domainBlockFromRPC) + + blockHash = consensushashing.BlockHash(block) + if !rpcBlockHash.Equal(blockHash) { + t.Errorf("Expected block with hash '%s', but got '%s'", blockHash, rpcBlockHash) + } +} diff --git a/testing/integration/config_test.go b/testing/integration/config_test.go new file mode 100644 index 0000000..6b9973a --- /dev/null +++ b/testing/integration/config_test.go @@ -0,0 +1,72 @@ +package integration + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/infrastructure/config" +) + +const ( + p2pAddress1 = "127.0.0.1:54321" + p2pAddress2 = "127.0.0.1:54322" + p2pAddress3 = "127.0.0.1:54323" + p2pAddress4 = "127.0.0.1:54324" + p2pAddress5 = "127.0.0.1:54325" + + rpcAddress1 = "127.0.0.1:12345" + rpcAddress2 = "127.0.0.1:12346" + rpcAddress3 = "127.0.0.1:12347" + rpcAddress4 = "127.0.0.1:12348" + rpcAddress5 = "127.0.0.1:12349" + + miningAddress1 = "spectresim:qqqqnc0pxg7qw3qkc7l6sge8kfhsvvyt7mkw8uamtndqup27ftnd6pl8nkpjc" + miningAddress1PrivateKey = "0d81045b0deb2af36a25403c2154c87aa82d89dd337b575bae27ce7f5de53cee" + + miningAddress2 = "spectresim:qqqqnc0pxg7qw3qkc7l6sge8kfhsvvyt7mkw8uamtndqup27ftnd6pl8nkpjc" + miningAddress2PrivateKey = "0d81045b0deb2af36a25403c2154c87aa82d89dd337b575bae27ce7f5de53cee" + + miningAddress3 = "spectresim:qqq754f2gdcjcnykwuwwr60c82rh5u6mxxe7yqxljnrxz9fu0h95k5aan8sm2" + miningAddress3PrivateKey = "f6c8f31fd359cbb97007034780bc4021f6ad01c6bc10499b79849efd4cc7ca39" + + defaultTimeout = 30 * time.Second +) + +func setConfig(t *testing.T, harness *appHarness, protocolVersion uint32) { + harness.config = commonConfig() + harness.config.AppDir = randomDirectory(t) + harness.config.Listeners = []string{harness.p2pAddress} + harness.config.RPCListeners = []string{harness.rpcAddress} + harness.config.UTXOIndex = harness.utxoIndex + harness.config.AllowSubmitBlockWhenNotSynced = true + if protocolVersion != 0 { + harness.config.ProtocolVersion = protocolVersion + } + + if harness.overrideDAGParams != nil { + harness.config.ActiveNetParams = harness.overrideDAGParams + } +} + +func commonConfig() *config.Config { + commonConfig := config.DefaultConfig() + + *commonConfig.ActiveNetParams = dagconfig.SimnetParams // Copy so that we can make changes safely + commonConfig.ActiveNetParams.BlockCoinbaseMaturity = 10 + commonConfig.TargetOutboundPeers = 0 + commonConfig.DisableDNSSeed = true + commonConfig.Simnet = true + + return commonConfig +} + +func randomDirectory(t *testing.T) string { + dir, err := ioutil.TempDir("", "integration-test") + if err != nil { + t.Fatalf("Error creating temporary directory for test: %+v", err) + } + + return dir +} diff --git a/testing/integration/connect_test.go b/testing/integration/connect_test.go new file mode 100644 index 0000000..9d13cf1 --- /dev/null +++ b/testing/integration/connect_test.go @@ -0,0 +1,70 @@ +package integration + +import ( + "testing" + "time" +) + +func connect(t *testing.T, incoming, outgoing *appHarness) { + err := outgoing.rpcClient.AddPeer(incoming.p2pAddress, false) + if err != nil { + t.Fatalf("Error connecting the nodes") + } + + onConnectedChan := make(chan struct{}) + abortConnectionChan := make(chan struct{}) + defer close(abortConnectionChan) + + spawn("integration.connect-Wait for connection", func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for range ticker.C { + if isConnected(t, incoming, outgoing) { + close(onConnectedChan) + return + } + + select { + case <-abortConnectionChan: + return + default: + } + } + }) + + select { + case <-onConnectedChan: + case <-time.After(defaultTimeout): + t.Fatalf("Timed out waiting for the apps to connect") + } +} +func isConnected(t *testing.T, appHarness1, appHarness2 *appHarness) bool { + connectedPeerInfo1, err := appHarness1.rpcClient.GetConnectedPeerInfo() + if err != nil { + t.Fatalf("Error getting connected peer info for app1: %+v", err) + } + connectedPeerInfo2, err := appHarness2.rpcClient.GetConnectedPeerInfo() + if err != nil { + t.Fatalf("Error getting connected peer info for app2: %+v", err) + } + + var incomingConnected, outgoingConnected bool + app1ID, app2ID := appHarness1.app.P2PNodeID().String(), appHarness2.app.P2PNodeID().String() + + for _, connectedPeer := range connectedPeerInfo1.Infos { + if connectedPeer.ID == app2ID { + incomingConnected = true + break + } + } + + for _, connectedPeer := range connectedPeerInfo2.Infos { + if connectedPeer.ID == app1ID { + outgoingConnected = true + break + } + } + + return incomingConnected && outgoingConnected +} diff --git a/testing/integration/ibd_test.go b/testing/integration/ibd_test.go new file mode 100644 index 0000000..f6effb6 --- /dev/null +++ b/testing/integration/ibd_test.go @@ -0,0 +1,322 @@ +package integration + +import ( + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/mining" + + "github.com/spectre-project/spectred/domain/dagconfig" + + "github.com/spectre-project/spectred/app/appmessage" +) + +func TestIBD(t *testing.T) { + const numBlocks = 100 + + syncer, syncee, _, teardown := standardSetup(t) + defer teardown() + + for i := 0; i < numBlocks; i++ { + mineNextBlock(t, syncer) + } + + blockAddedWG := sync.WaitGroup{} + blockAddedWG.Add(numBlocks) + receivedBlocks := 0 + disableOnBlockAddedHandler := false + setOnBlockAddedHandler(t, syncee, func(_ *appmessage.BlockAddedNotificationMessage) { + if disableOnBlockAddedHandler { + return + } + receivedBlocks++ + blockAddedWG.Done() + }) + + // We expect this to trigger IBD + connect(t, syncer, syncee) + + select { + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for IBD to finish. Received %d blocks out of %d", receivedBlocks, numBlocks) + case <-ReceiveFromChanWhenDone(func() { blockAddedWG.Wait() }): + } + + disableOnBlockAddedHandler = true + // Wait for syncee to exit IBD + time.Sleep(time.Second) + // This should trigger resolving the syncee virtual + mineNextBlock(t, syncer) + time.Sleep(time.Second) + + tip1Hash, err := syncer.rpcClient.GetSelectedTipHash() + if err != nil { + t.Fatalf("Error getting tip for syncer") + } + + tip2Hash, err := syncee.rpcClient.GetSelectedTipHash() + if err != nil { + t.Fatalf("Error getting tip for syncee") + } + + if tip1Hash.SelectedTipHash != tip2Hash.SelectedTipHash { + t.Errorf("Tips of syncer: '%s' and syncee '%s' are not equal", tip1Hash.SelectedTipHash, tip2Hash.SelectedTipHash) + } +} + +// TestIBDWithPruning checks the IBD from a node with +// already pruned blocks. +func TestIBDWithPruning(t *testing.T) { + testSync := func(syncer, syncee *appHarness) { + utxoSetOverriden := make(chan struct{}) + err := syncee.rpcClient.RegisterPruningPointUTXOSetNotifications(func() { + close(utxoSetOverriden) + }) + + if err != nil { + t.Fatalf("RegisterPruningPointUTXOSetNotifications: %+v", err) + } + + // We expect this to trigger IBD + connect(t, syncer, syncee) + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + start := time.Now() + for range ticker.C { + if time.Since(start) > 30*time.Second { + t.Fatalf("Timeout waiting for IBD to finish.") + } + + syncerInfo, err := syncer.rpcClient.GetBlockDAGInfo() + if err != nil { + t.Fatalf("Error getting tip for syncer") + } + synceeInfo, err := syncee.rpcClient.GetBlockDAGInfo() + if err != nil { + t.Fatalf("Error getting tip for syncee") + } + + if reflect.DeepEqual(syncerInfo.TipHashes, synceeInfo.TipHashes) { + break + } + } + + const timeout = 10 * time.Second + select { + case <-utxoSetOverriden: + case <-time.After(timeout): + t.Fatalf("expected pruning point UTXO set override notification, but it didn't get one after %s", timeout) + } + + // Checking that the syncee can generate block templates before resolving the virtual + _, err = syncee.rpcClient.GetBlockTemplate(syncee.miningAddress, "") + if err != nil { + t.Fatalf("Error getting block template: %+v", err) + } + + // This should trigger resolving the syncee virtual + syncerTip := mineNextBlockWithMockTimestamps(t, syncer, rand.New(rand.NewSource(time.Now().UnixNano()))) + time.Sleep(time.Second) + + synceeSelectedTip, err := syncee.rpcClient.GetSelectedTipHash() + if err != nil { + t.Fatalf("Error getting tip for syncee") + } + + if synceeSelectedTip.SelectedTipHash != consensushashing.BlockHash(syncerTip).String() { + t.Fatalf("Unexpected selected tip: expected %s but got %s", consensushashing.BlockHash(syncerTip).String(), synceeSelectedTip.SelectedTipHash) + } + } + + const numBlocks = 100 + + overrideDAGParams := dagconfig.SimnetParams + + // Increase the target time per block so that we could mine + // blocks with timestamps that are spaced far enough apart + // to avoid failing the timestamp threshold validation of + // ibd-with-headers-proof + overrideDAGParams.TargetTimePerBlock = time.Minute + + // This is done to make a pruning depth of 6 blocks + overrideDAGParams.FinalityDuration = 2 * overrideDAGParams.TargetTimePerBlock + overrideDAGParams.K = 0 + overrideDAGParams.PruningProofM = 20 + + expectedPruningDepth := uint64(6) + if overrideDAGParams.PruningDepth() != expectedPruningDepth { + t.Fatalf("Unexpected pruning depth: expected %d but got %d", expectedPruningDepth, overrideDAGParams.PruningDepth()) + } + + harnesses, teardown := setupHarnesses(t, []*harnessParams{ + { + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + overrideDAGParams: &overrideDAGParams, + }, + { + p2pAddress: p2pAddress2, + rpcAddress: rpcAddress2, + miningAddress: miningAddress2, + miningAddressPrivateKey: miningAddress2PrivateKey, + overrideDAGParams: &overrideDAGParams, + utxoIndex: true, + }, + { + p2pAddress: p2pAddress3, + rpcAddress: rpcAddress3, + miningAddress: miningAddress3, + miningAddressPrivateKey: miningAddress3PrivateKey, + overrideDAGParams: &overrideDAGParams, + utxoIndex: true, + }, + }) + defer teardown() + + syncer, syncee1, syncee2 := harnesses[0], harnesses[1], harnesses[2] + + // Let syncee1 have two blocks that the syncer + // doesn't have to test a situation where + // the block locator will need more than one + // iteration to find the highest shared chain + // block. + const synceeOnlyBlocks = 2 + rd := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < synceeOnlyBlocks; i++ { + mineNextBlockWithMockTimestamps(t, syncee1, rd) + } + + for i := 0; i < numBlocks-1; i++ { + mineNextBlockWithMockTimestamps(t, syncer, rd) + } + + testSync(syncer, syncee1) + + // Test a situation where a node with pruned headers syncs another fresh node. + testSync(syncee1, syncee2) +} + +var currentMockTimestamp int64 = 0 + +// mineNextBlockWithMockTimestamps mines blocks with large timestamp differences +// between every two blocks. This is done to avoid the timestamp threshold validation +// of ibd-with-headers-proof +func mineNextBlockWithMockTimestamps(t *testing.T, harness *appHarness, rd *rand.Rand) *externalapi.DomainBlock { + blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress, "") + if err != nil { + t.Fatalf("Error getting block template: %+v", err) + } + + block, err := appmessage.RPCBlockToDomainBlock(blockTemplate.Block) + if err != nil { + t.Fatalf("Error converting block: %s", err) + } + + if currentMockTimestamp == 0 { + currentMockTimestamp = block.Header.TimeInMilliseconds() + } else { + currentMockTimestamp += 10_000 + } + mutableHeader := block.Header.ToMutable() + mutableHeader.SetTimeInMilliseconds(currentMockTimestamp) + block.Header = mutableHeader.ToImmutable() + + mining.SolveBlock(block, rd) + + _, err = harness.rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + t.Fatalf("Error submitting block: %s", err) + } + + return block +} + +func TestBoundedMergeDepth(t *testing.T) { + overrideDAGParams := dagconfig.SimnetParams + + overrideDAGParams.MergeDepth = 50 + + harnesses, teardown := setupHarnesses(t, []*harnessParams{ + { + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + overrideDAGParams: &overrideDAGParams, + }, + { + p2pAddress: p2pAddress2, + rpcAddress: rpcAddress2, + miningAddress: miningAddress2, + miningAddressPrivateKey: miningAddress2PrivateKey, + overrideDAGParams: &overrideDAGParams, + utxoIndex: true, + }, + { + p2pAddress: p2pAddress3, + rpcAddress: rpcAddress3, + miningAddress: miningAddress3, + miningAddressPrivateKey: miningAddress3PrivateKey, + overrideDAGParams: &overrideDAGParams, + utxoIndex: true, + }, + { + p2pAddress: p2pAddress4, + rpcAddress: rpcAddress4, + miningAddress: miningAddress3, + miningAddressPrivateKey: miningAddress3PrivateKey, + overrideDAGParams: &overrideDAGParams, + utxoIndex: true, + }, + }) + defer teardown() + + test := func(syncer, syncee *appHarness, depth uint64, shouldSync bool) { + const ibdTriggerRange = 32 + if depth <= ibdTriggerRange { + t.Fatalf("Depth is too small") + } + + for i := uint64(0); i < depth+ibdTriggerRange+1; i++ { + mineNextBlock(t, syncee) + } + + for i := uint64(0); i < ibdTriggerRange+1; i++ { + mineNextBlock(t, syncer) + } + + countBefore, err := syncee.rpcClient.GetBlockCount() + if err != nil { + t.Fatalf("GetBlockCount: %+v", err) + } + + connect(t, syncee, syncer) + + time.Sleep(5 * time.Second) + countAfter, err := syncee.rpcClient.GetBlockCount() + if err != nil { + t.Fatalf("GetBlockCount: %+v", err) + } + + if (countBefore.HeaderCount != countAfter.HeaderCount) != shouldSync { + t.Fatalf("countBefore.HeaderCount: %d, countAfter.HeaderCount: %d", countBefore.HeaderCount, countAfter.HeaderCount) + } + } + + t.Run("mergeDepth", func(t *testing.T) { + test(harnesses[0], harnesses[1], overrideDAGParams.MergeDepth, false) + }) + + t.Run("mergeDepth-1", func(t *testing.T) { + test(harnesses[2], harnesses[3], overrideDAGParams.MergeDepth-1, true) + }) +} diff --git a/testing/integration/integration.go b/testing/integration/integration.go new file mode 100644 index 0000000..6524145 --- /dev/null +++ b/testing/integration/integration.go @@ -0,0 +1,4 @@ +package integration + +// Because of a bug in Go coverage fails if you have packages with test files only. See https://github.com/golang/go/issues/27333 +// So this is a dummy non-test go file in the package. diff --git a/testing/integration/log_test.go b/testing/integration/log_test.go new file mode 100644 index 0000000..ba95323 --- /dev/null +++ b/testing/integration/log_test.go @@ -0,0 +1,14 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Copyright (c) 2017 The Decred developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package integration + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" + "github.com/spectre-project/spectred/util/panics" +) + +var log = logger.RegisterSubSystem("INTG") +var spawn = panics.GoroutineWrapperFunc(log) diff --git a/testing/integration/main_test.go b/testing/integration/main_test.go new file mode 100644 index 0000000..161fec9 --- /dev/null +++ b/testing/integration/main_test.go @@ -0,0 +1,15 @@ +package integration + +import ( + "os" + "testing" + + "github.com/spectre-project/spectred/infrastructure/logger" +) + +func TestMain(m *testing.M) { + logger.SetLogLevels(logger.LevelDebug) + logger.InitLogStdout(logger.LevelDebug) + + os.Exit(m.Run()) +} diff --git a/testing/integration/mining_test.go b/testing/integration/mining_test.go new file mode 100644 index 0000000..87f490f --- /dev/null +++ b/testing/integration/mining_test.go @@ -0,0 +1,33 @@ +package integration + +import ( + "math/rand" + "testing" + "time" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/mining" +) + +func mineNextBlock(t *testing.T, harness *appHarness) *externalapi.DomainBlock { + blockTemplate, err := harness.rpcClient.GetBlockTemplate(harness.miningAddress, "integration") + if err != nil { + t.Fatalf("Error getting block template: %+v", err) + } + + block, err := appmessage.RPCBlockToDomainBlock(blockTemplate.Block) + if err != nil { + t.Fatalf("Error converting block: %s", err) + } + + rd := rand.New(rand.NewSource(time.Now().UnixNano())) + mining.SolveBlock(block, rd) + + _, err = harness.rpcClient.SubmitBlockAlsoIfNonDAA(block) + if err != nil { + t.Fatalf("Error submitting block: %s", err) + } + + return block +} diff --git a/testing/integration/notifications_test.go b/testing/integration/notifications_test.go new file mode 100644 index 0000000..e528382 --- /dev/null +++ b/testing/integration/notifications_test.go @@ -0,0 +1,14 @@ +package integration + +import ( + "testing" + + "github.com/spectre-project/spectred/app/appmessage" +) + +func setOnBlockAddedHandler(t *testing.T, harness *appHarness, handler func(notification *appmessage.BlockAddedNotificationMessage)) { + err := harness.rpcClient.RegisterForBlockAddedNotifications(handler) + if err != nil { + t.Fatalf("Error from RegisterForBlockAddedNotifications: %s", err) + } +} diff --git a/testing/integration/rpc_test.go b/testing/integration/rpc_test.go new file mode 100644 index 0000000..550bb8b --- /dev/null +++ b/testing/integration/rpc_test.go @@ -0,0 +1,116 @@ +package integration + +import ( + "runtime" + "testing" + "time" + + "github.com/spectre-project/spectred/infrastructure/config" + + "github.com/spectre-project/spectred/infrastructure/network/rpcclient" +) + +const rpcTimeout = 10 * time.Second + +type testRPCClient struct { + *rpcclient.RPCClient +} + +func newTestRPCClient(rpcAddress string) (*testRPCClient, error) { + rpcClient, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + return nil, err + } + rpcClient.SetTimeout(rpcTimeout) + + return &testRPCClient{ + RPCClient: rpcClient, + }, nil +} + +func connectAndClose(rpcAddress string) error { + client, err := rpcclient.NewRPCClient(rpcAddress) + if err != nil { + return err + } + defer client.Close() + return nil +} + +func TestRPCClientGoroutineLeak(t *testing.T) { + _, teardown := setupHarness(t, &harnessParams{ + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + }) + defer teardown() + numGoroutinesBefore := runtime.NumGoroutine() + for i := 1; i < 100; i++ { + err := connectAndClose(rpcAddress1) + if err != nil { + t.Fatalf("Failed to set up an RPC client: %s", err) + } + time.Sleep(10 * time.Millisecond) + if runtime.NumGoroutine() > numGoroutinesBefore+10 { + t.Fatalf("Number of goroutines is increasing for each RPC client open (%d -> %d), which indicates a memory leak", + numGoroutinesBefore, runtime.NumGoroutine()) + } + } +} + +func TestRPCMaxInboundConnections(t *testing.T) { + harness, teardown := setupHarness(t, &harnessParams{ + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + }) + defer teardown() + + // Close the default RPC client so that it won't interfere with the test + err := harness.rpcClient.Close() + if err != nil { + t.Fatalf("Failed to close the default harness RPCClient: %s", err) + } + + // Connect `RPCMaxInboundConnections` clients. We expect this to succeed immediately + rpcClients := []*testRPCClient{} + doneChan := make(chan error) + go func() { + for i := 0; i < config.DefaultMaxRPCClients; i++ { + rpcClient, err := newTestRPCClient(harness.rpcAddress) + if err != nil { + doneChan <- err + } + rpcClients = append(rpcClients, rpcClient) + } + doneChan <- nil + }() + select { + case err = <-doneChan: + if err != nil { + t.Fatalf("newTestRPCClient: %s", err) + } + case <-time.After(time.Second * 5): + t.Fatalf("Timeout for connecting %d RPC connections elapsed", config.DefaultMaxRPCClients) + } + + // Try to connect another client. We expect this to fail + // We set a timeout to account for reconnection mechanisms + go func() { + rpcClient, err := newTestRPCClient(harness.rpcAddress) + if err != nil { + doneChan <- err + } + rpcClients = append(rpcClients, rpcClient) + doneChan <- nil + }() + select { + case err = <-doneChan: + if err == nil { + t.Fatalf("newTestRPCClient unexpectedly succeeded") + } + case <-time.After(time.Second * 15): + } +} diff --git a/testing/integration/selected_parent_chain_test.go b/testing/integration/selected_parent_chain_test.go new file mode 100644 index 0000000..ffa7217 --- /dev/null +++ b/testing/integration/selected_parent_chain_test.go @@ -0,0 +1,106 @@ +package integration + +import ( + "testing" + + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" +) + +func TestVirtualSelectedParentChain(t *testing.T) { + // Setup a couple of spectred instances + spectred1, spectred2, _, teardown := standardSetup(t) + defer teardown() + + // Register to virtual selected parent chain changes + onVirtualSelectedParentChainChangedChan := make(chan *appmessage.VirtualSelectedParentChainChangedNotificationMessage) + err := spectred1.rpcClient.RegisterForVirtualSelectedParentChainChangedNotifications(true, + func(notification *appmessage.VirtualSelectedParentChainChangedNotificationMessage) { + onVirtualSelectedParentChainChangedChan <- notification + }) + if err != nil { + t.Fatalf("Failed to register for virtual selected parent chain change notifications: %s", err) + } + + // In spectred1, mine a chain over the genesis and make sure + // each chain changed notifications contains only one entry + // in `added` and nothing in `removed` + chain1TipHash := consensushashing.BlockHash(spectred1.config.NetParams().GenesisBlock) + chain1TipHashString := chain1TipHash.String() + const blockAmountToMine = 10 + for i := 0; i < blockAmountToMine; i++ { + minedBlock := mineNextBlock(t, spectred1) + notification := <-onVirtualSelectedParentChainChangedChan + if len(notification.RemovedChainBlockHashes) > 0 { + t.Fatalf("RemovedChainBlockHashes is unexpectedly not empty") + } + if len(notification.AddedChainBlockHashes) != 1 { + t.Fatalf("Unexpected length of AddedChainBlockHashes. Want: %d, got: %d", + 1, len(notification.AddedChainBlockHashes)) + } + + minedBlockHash := consensushashing.BlockHash(minedBlock) + minedBlockHashString := minedBlockHash.String() + if minedBlockHashString != notification.AddedChainBlockHashes[0] { + t.Fatalf("Unexpected block hash in AddedChainBlockHashes. Want: %s, got: %s", + minedBlockHashString, notification.AddedChainBlockHashes[0]) + } + chain1TipHashString = minedBlockHashString + } + + // In spectred2, mine a different chain of `blockAmountToMine` + 1 + // blocks over the genesis + var chain2Tip *externalapi.DomainBlock + for i := 0; i < blockAmountToMine+1; i++ { + chain2Tip = mineNextBlock(t, spectred2) + } + + // Connect the two spectreds. This should trigger sync + // between the two nodes + connect(t, spectred1, spectred2) + + chain2TipHash := consensushashing.BlockHash(chain2Tip) + chain2TipHashString := chain2TipHash.String() + + // For the first `blockAmountToMine - 1` blocks we don't expect + // the chain to change at all, thus there will be no notifications + + // Either the next block or the one after it will cause a reorg + reorgNotification := <-onVirtualSelectedParentChainChangedChan + + // Make sure that the reorg notification contains exactly + // `blockAmountToMine` blocks in its `removed` + if len(reorgNotification.RemovedChainBlockHashes) != blockAmountToMine { + t.Fatalf("Unexpected length of reorgNotification.RemovedChainBlockHashes. Want: %d, got: %d", + blockAmountToMine, len(reorgNotification.RemovedChainBlockHashes)) + } + + // Get the virtual selected parent chain from the tip of + // the first chain + virtualSelectedParentChainFromChain1Tip, err := spectred1.rpcClient.GetVirtualSelectedParentChainFromBlock( + chain1TipHashString, true) + if err != nil { + t.Fatalf("GetVirtualSelectedParentChainFromBlock failed: %s", err) + } + + // Make sure that `blockAmountToMine` blocks were removed + // and `blockAmountToMine + 1` blocks were added + if len(virtualSelectedParentChainFromChain1Tip.RemovedChainBlockHashes) != blockAmountToMine { + t.Fatalf("Unexpected length of virtualSelectedParentChainFromChain1Tip.RemovedChainBlockHashes. Want: %d, got: %d", + blockAmountToMine, len(virtualSelectedParentChainFromChain1Tip.RemovedChainBlockHashes)) + } + if len(virtualSelectedParentChainFromChain1Tip.AddedChainBlockHashes) != blockAmountToMine+1 { + t.Fatalf("Unexpected length of virtualSelectedParentChainFromChain1Tip.AddedChainBlockHashes. Want: %d, got: %d", + blockAmountToMine+1, len(virtualSelectedParentChainFromChain1Tip.AddedChainBlockHashes)) + } + + // Make sure that the last block in `added` is the tip + // of chain2 + lastAddedChainBlock := virtualSelectedParentChainFromChain1Tip.AddedChainBlockHashes[len(virtualSelectedParentChainFromChain1Tip.AddedChainBlockHashes)-1] + if lastAddedChainBlock != chain2TipHashString { + t.Fatalf("Unexpected last added chain block. Want: %s, got: %s", + chain2TipHashString, lastAddedChainBlock) + } +} diff --git a/testing/integration/setup_test.go b/testing/integration/setup_test.go new file mode 100644 index 0000000..6c343ff --- /dev/null +++ b/testing/integration/setup_test.go @@ -0,0 +1,140 @@ +package integration + +import ( + "path/filepath" + "testing" + + "github.com/spectre-project/spectred/domain/dagconfig" + + "github.com/spectre-project/spectred/infrastructure/db/database/ldb" + + "github.com/spectre-project/spectred/infrastructure/db/database" + + "github.com/spectre-project/spectred/app" + "github.com/spectre-project/spectred/infrastructure/config" +) + +type appHarness struct { + app *app.ComponentManager + rpcClient *testRPCClient + p2pAddress string + rpcAddress string + miningAddress string + miningAddressPrivateKey string + config *config.Config + database database.Database + utxoIndex bool + overrideDAGParams *dagconfig.Params +} + +type harnessParams struct { + p2pAddress string + rpcAddress string + miningAddress string + miningAddressPrivateKey string + utxoIndex bool + overrideDAGParams *dagconfig.Params + protocolVersion uint32 +} + +// setupHarness creates a single appHarness with given parameters +func setupHarness(t *testing.T, params *harnessParams) (harness *appHarness, teardownFunc func()) { + harness = &appHarness{ + p2pAddress: params.p2pAddress, + rpcAddress: params.rpcAddress, + miningAddress: params.miningAddress, + miningAddressPrivateKey: params.miningAddressPrivateKey, + utxoIndex: params.utxoIndex, + overrideDAGParams: params.overrideDAGParams, + } + + setConfig(t, harness, params.protocolVersion) + setDatabaseContext(t, harness) + setApp(t, harness) + harness.app.Start() + setRPCClient(t, harness) + + return harness, func() { + teardownHarness(t, harness) + } +} + +// setupHarnesses creates multiple appHarnesses, according to number of parameters passed +func setupHarnesses(t *testing.T, harnessesParams []*harnessParams) (harnesses []*appHarness, teardownFunc func()) { + var teardowns []func() + for _, params := range harnessesParams { + harness, teardownFunc := setupHarness(t, params) + harnesses = append(harnesses, harness) + teardowns = append(teardowns, teardownFunc) + } + + return harnesses, func() { + for _, teardownFunc := range teardowns { + teardownFunc() + } + } +} + +// standardSetup creates a standard setup of 3 appHarnesses that should work for most tests +func standardSetup(t *testing.T) (appHarness1, appHarness2, appHarness3 *appHarness, teardownFunc func()) { + harnesses, teardown := setupHarnesses(t, []*harnessParams{ + { + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + }, + { + p2pAddress: p2pAddress2, + rpcAddress: rpcAddress2, + miningAddress: miningAddress2, + miningAddressPrivateKey: miningAddress2PrivateKey, + }, { + p2pAddress: p2pAddress3, + rpcAddress: rpcAddress3, + miningAddress: miningAddress3, + miningAddressPrivateKey: miningAddress3PrivateKey, + }, + }) + + return harnesses[0], harnesses[1], harnesses[2], teardown +} + +func setRPCClient(t *testing.T, harness *appHarness) { + var err error + harness.rpcClient, err = newTestRPCClient(harness.rpcAddress) + if err != nil { + t.Fatalf("Error getting RPC client %+v", err) + } +} + +func teardownHarness(t *testing.T, harness *appHarness) { + harness.rpcClient.Close() + harness.app.Stop() + + err := harness.database.Close() + if err != nil { + t.Errorf("Error closing database context: %+v", err) + } +} + +func setApp(t *testing.T, harness *appHarness) { + var err error + harness.app, err = app.NewComponentManager(harness.config, harness.database, make(chan struct{})) + if err != nil { + t.Fatalf("Error creating app: %+v", err) + } +} + +func setDatabaseContext(t *testing.T, harness *appHarness) { + var err error + harness.database, err = openDB(harness.config) + if err != nil { + t.Fatalf("Error openning database: %+v", err) + } +} + +func openDB(cfg *config.Config) (database.Database, error) { + dbPath := filepath.Join(cfg.AppDir, "db") + return ldb.NewLevelDB(dbPath, 8) +} diff --git a/testing/integration/tx_relay_test.go b/testing/integration/tx_relay_test.go new file mode 100644 index 0000000..789110c --- /dev/null +++ b/testing/integration/tx_relay_test.go @@ -0,0 +1,177 @@ +package integration + +import ( + "encoding/hex" + "strings" + "testing" + "time" + + "github.com/spectre-project/spectred/app/protocol/flowcontext" + + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util" +) + +func TestTxRelay(t *testing.T) { + payer, mediator, payee, teardown := standardSetup(t) + defer teardown() + + // Connect nodes in chain: payer <--> mediator <--> payee + // So that payee doesn't directly get transactions from payer + connect(t, payer, mediator) + connect(t, mediator, payee) + + payeeBlockAddedChan := make(chan *appmessage.RPCBlockHeader) + setOnBlockAddedHandler(t, payee, func(notification *appmessage.BlockAddedNotificationMessage) { + payeeBlockAddedChan <- notification.Block.Header + }) + // skip the first block because it's paying to genesis script + mineNextBlock(t, payer) + waitForPayeeToReceiveBlock(t, payeeBlockAddedChan) + // use the second block to get money to pay with + secondBlock := mineNextBlock(t, payer) + waitForPayeeToReceiveBlock(t, payeeBlockAddedChan) + + // Mine BlockCoinbaseMaturity more blocks for our money to mature + for i := uint64(0); i < payer.config.ActiveNetParams.BlockCoinbaseMaturity; i++ { + mineNextBlock(t, payer) + waitForPayeeToReceiveBlock(t, payeeBlockAddedChan) + } + + // Sleep for `TransactionIDPropagationInterval` to make sure that our transaction will + // be propagated + time.Sleep(flowcontext.TransactionIDPropagationInterval) + + msgTx := generateTx(t, secondBlock.Transactions[transactionhelper.CoinbaseTransactionIndex], payer, payee) + domainTransaction := appmessage.MsgTxToDomainTransaction(msgTx) + rpcTransaction := appmessage.DomainTransactionToRPCTransaction(domainTransaction) + response, err := payer.rpcClient.SubmitTransaction(rpcTransaction, false) + if err != nil { + t.Fatalf("Error submitting transaction: %+v", err) + } + txID := response.TransactionID + + txAddedToMempoolChan := make(chan struct{}) + + mempoolAddressQuery := []string{payee.miningAddress, payer.miningAddress} + + spawn("TestTxRelay-WaitForTransactionPropagation", func() { + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + + for range ticker.C { + + getMempoolEntryResponse, err := payee.rpcClient.GetMempoolEntry(txID, true, false) + if err != nil { + if strings.Contains(err.Error(), "not found") { + continue + } + + t.Fatalf("Error getting mempool entry: %+v", err) + } + mempoolEntry := getMempoolEntryResponse.Entry + if mempoolEntry.IsOrphan { + t.Fatalf("transaction %s is an orphan, although it shouldn't be", mempoolEntry.Transaction.VerboseData.TransactionID) + } + + getMempoolEntriesByAddressesResponse, err := payee.rpcClient.GetMempoolEntriesByAddresses(mempoolAddressQuery, true, false) + if err != nil { + t.Fatalf("Error getting mempool entry: %+v", err) + } + for _, mempoolEntryByAddress := range getMempoolEntriesByAddressesResponse.Entries { + if payee.miningAddress == mempoolEntryByAddress.Address { + if len(mempoolEntryByAddress.Sending) > 1 { + t.Fatal("Error payee is sending") + } + if len(mempoolEntryByAddress.Receiving) < 1 { + t.Fatal("Error payee is not reciving") + } + } + if payer.miningAddress == mempoolEntryByAddress.Address { + if len(mempoolEntryByAddress.Sending) < 1 { + t.Fatal("Error payer is not sending") + } + if len(mempoolEntryByAddress.Receiving) > 1 { + t.Fatal("Error payer is reciving") + } + } + for _, mempoolEntry := range mempoolEntryByAddress.Receiving { + if mempoolEntry.IsOrphan { + t.Fatalf("transaction %s is an orphan, although it shouldn't be", mempoolEntry.Transaction.VerboseData.TransactionID) + } + } + for _, mempoolEntry := range mempoolEntryByAddress.Sending { + if mempoolEntry.IsOrphan { + t.Fatalf("transaction %s is an orphan, although it shouldn't be", mempoolEntry.Transaction.VerboseData.TransactionID) + } + } + } + + close(txAddedToMempoolChan) + return + } + }) + + select { + case <-txAddedToMempoolChan: + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for transaction to be accepted into mempool") + } +} + +func waitForPayeeToReceiveBlock(t *testing.T, payeeBlockAddedChan chan *appmessage.RPCBlockHeader) { + select { + case <-payeeBlockAddedChan: + case <-time.After(defaultTimeout): + t.Fatalf("Timeout waiting for block added") + } +} + +func generateTx(t *testing.T, firstBlockCoinbase *externalapi.DomainTransaction, payer, payee *appHarness) *appmessage.MsgTx { + txIns := make([]*appmessage.TxIn, 1) + txIns[0] = appmessage.NewTxIn(appmessage.NewOutpoint(consensushashing.TransactionID(firstBlockCoinbase), 0), []byte{}, 0, 1) + + payeeAddress, err := util.DecodeAddress(payee.miningAddress, util.Bech32PrefixSpectreSim) + if err != nil { + t.Fatalf("Error decoding payeeAddress: %+v", err) + } + toScript, err := txscript.PayToAddrScript(payeeAddress) + if err != nil { + t.Fatalf("Error generating script: %+v", err) + } + + txOuts := []*appmessage.TxOut{appmessage.NewTxOut(firstBlockCoinbase.Outputs[0].Value-1000, toScript)} + + msgTx := appmessage.NewNativeMsgTx(constants.MaxTransactionVersion, txIns, txOuts) + + privateKeyBytes, err := hex.DecodeString(payer.miningAddressPrivateKey) + if err != nil { + t.Fatalf("Error decoding private key: %+v", err) + } + privateKey, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + t.Fatalf("Error deserializing private key: %+v", err) + } + + fromScript := firstBlockCoinbase.Outputs[0].ScriptPublicKey + fromAmount := firstBlockCoinbase.Outputs[0].Value + + tx := appmessage.MsgTxToDomainTransaction(msgTx) + tx.Inputs[0].UTXOEntry = utxo.NewUTXOEntry(fromAmount, fromScript, false, 500) + signatureScript, err := txscript.SignatureScript(tx, 0, consensushashing.SigHashAll, privateKey, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("Error signing transaction: %+v", err) + } + msgTx.TxIn[0].SignatureScript = signatureScript + + return msgTx +} diff --git a/testing/integration/utxo_index_test.go b/testing/integration/utxo_index_test.go new file mode 100644 index 0000000..161563e --- /dev/null +++ b/testing/integration/utxo_index_test.go @@ -0,0 +1,228 @@ +package integration + +import ( + "encoding/hex" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/utxo" + + "github.com/spectre-project/go-secp256k1" + "github.com/spectre-project/spectred/app/appmessage" + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/consensushashing" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionid" + "github.com/spectre-project/spectred/domain/consensus/utils/txscript" + "github.com/spectre-project/spectred/util" +) + +func TestUTXOIndex(t *testing.T) { + // Setup a single spectred instance + harnessParams := &harnessParams{ + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + utxoIndex: true, + } + spectred, teardown := setupHarness(t, harnessParams) + defer teardown() + + // skip the first block because it's paying to genesis script, + // which contains no outputs + mineNextBlock(t, spectred) + + // Register for UTXO changes + const blockAmountToMine = 100 + onUTXOsChangedChan := make(chan *appmessage.UTXOsChangedNotificationMessage, blockAmountToMine) + err := spectred.rpcClient.RegisterForUTXOsChangedNotifications([]string{miningAddress1}, func( + notification *appmessage.UTXOsChangedNotificationMessage) { + + onUTXOsChangedChan <- notification + }) + if err != nil { + t.Fatalf("Failed to register for UTXO change notifications: %s", err) + } + + // Mine some blocks + for i := 0; i < blockAmountToMine; i++ { + mineNextBlock(t, spectred) + } + + //check if rewards corrosponds to circulating supply. + getCoinSupplyResponse, err := spectred.rpcClient.GetCoinSupply() + if err != nil { + t.Fatalf("Error Retriving Coin supply: %s", err) + } + + rewardsMinedSompi := uint64(blockAmountToMine * constants.SompiPerSpectre * 15) + getBlockCountResponse, err := spectred.rpcClient.GetBlockCount() + if err != nil { + t.Fatalf("Error Retriving BlockCount: %s", err) + } + rewardsMinedViaBlockCountSompi := uint64( + (getBlockCountResponse.BlockCount - 2) * constants.SompiPerSpectre * 15, // -2 because of genesis and virtual. + ) + + if getCoinSupplyResponse.CirculatingSompi != rewardsMinedSompi { + t.Fatalf("Error: Circulating supply Mismatch - Circulating Sompi: %d Sompi Mined: %d", getCoinSupplyResponse.CirculatingSompi, rewardsMinedSompi) + } else if getCoinSupplyResponse.CirculatingSompi != rewardsMinedViaBlockCountSompi { + t.Fatalf("Error: Circulating supply Mismatch - Circulating Sompi: %d Sompi Mined via Block count: %d", getCoinSupplyResponse.CirculatingSompi, rewardsMinedViaBlockCountSompi) + } + + // Collect the UTXO and make sure there's nothing in Removed + // Note that we expect blockAmountToMine-1 messages because + // the last block won't be accepted until the next block is + // mined + var notificationEntries []*appmessage.UTXOsByAddressesEntry + for i := 0; i < blockAmountToMine; i++ { + notification := <-onUTXOsChangedChan + if len(notification.Removed) > 0 { + t.Fatalf("Unexpectedly received that a UTXO has been removed") + } + for _, added := range notification.Added { + notificationEntries = append(notificationEntries, added) + } + } + + // Submit a few transactions that spends some UTXOs + const transactionAmountToSpend = 5 + for i := 0; i < transactionAmountToSpend; i++ { + rpcTransaction := buildTransactionForUTXOIndexTest(t, notificationEntries[i]) + _, err = spectred.rpcClient.SubmitTransaction(rpcTransaction, false) + if err != nil { + t.Fatalf("Error submitting transaction: %s", err) + } + } + + // Mine a block to include the above transactions + mineNextBlock(t, spectred) + + // Make sure this block removed the UTXOs we spent + notification := <-onUTXOsChangedChan + if len(notification.Removed) != transactionAmountToSpend { + t.Fatalf("Unexpected amount of removed UTXOs. Want: %d, got: %d", + transactionAmountToSpend, len(notification.Removed)) + } + for i := 0; i < transactionAmountToSpend; i++ { + entry := notificationEntries[i] + + found := false + for _, removed := range notification.Removed { + if *removed.Outpoint == *entry.Outpoint { + found = true + break + } + } + if !found { + t.Fatalf("Missing entry amongst removed UTXOs: %s:%d", + entry.Outpoint.TransactionID, entry.Outpoint.Index) + } + } + for _, added := range notification.Added { + notificationEntries = append(notificationEntries, added) + } + + // Remove the UTXOs we spent from `notificationEntries` + notificationEntries = notificationEntries[transactionAmountToSpend:] + + // Get all the UTXOs and make sure the response is equivalent + // to the data collected via notifications + utxosByAddressesResponse, err := spectred.rpcClient.GetUTXOsByAddresses([]string{miningAddress1}) + if err != nil { + t.Fatalf("Failed to get UTXOs: %s", err) + } + if len(notificationEntries) != len(utxosByAddressesResponse.Entries) { + t.Fatalf("Unexpected amount of UTXOs. Want: %d, got: %d", + len(notificationEntries), len(utxosByAddressesResponse.Entries)) + } + for _, notificationEntry := range notificationEntries { + var foundResponseEntry *appmessage.UTXOsByAddressesEntry + for _, responseEntry := range utxosByAddressesResponse.Entries { + if *notificationEntry.Outpoint == *responseEntry.Outpoint { + foundResponseEntry = responseEntry + break + } + } + if foundResponseEntry == nil { + t.Fatalf("Missing entry in UTXOs response: %s:%d", + notificationEntry.Outpoint.TransactionID, notificationEntry.Outpoint.Index) + } + if notificationEntry.UTXOEntry.Amount != foundResponseEntry.UTXOEntry.Amount { + t.Fatalf("Unexpected UTXOEntry for outpoint %s:%d. Want: %+v, got: %+v", + notificationEntry.Outpoint.TransactionID, notificationEntry.Outpoint.Index, + notificationEntry.UTXOEntry, foundResponseEntry.UTXOEntry) + } + if notificationEntry.UTXOEntry.BlockDAAScore != foundResponseEntry.UTXOEntry.BlockDAAScore { + t.Fatalf("Unexpected UTXOEntry for outpoint %s:%d. Want: %+v, got: %+v", + notificationEntry.Outpoint.TransactionID, notificationEntry.Outpoint.Index, + notificationEntry.UTXOEntry, foundResponseEntry.UTXOEntry) + } + if notificationEntry.UTXOEntry.IsCoinbase != foundResponseEntry.UTXOEntry.IsCoinbase { + t.Fatalf("Unexpected UTXOEntry for outpoint %s:%d. Want: %+v, got: %+v", + notificationEntry.Outpoint.TransactionID, notificationEntry.Outpoint.Index, + notificationEntry.UTXOEntry, foundResponseEntry.UTXOEntry) + } + if *notificationEntry.UTXOEntry.ScriptPublicKey != *foundResponseEntry.UTXOEntry.ScriptPublicKey { + t.Fatalf("Unexpected UTXOEntry for outpoint %s:%d. Want: %+v, got: %+v", + notificationEntry.Outpoint.TransactionID, notificationEntry.Outpoint.Index, + notificationEntry.UTXOEntry, foundResponseEntry.UTXOEntry) + } + } +} + +func buildTransactionForUTXOIndexTest(t *testing.T, entry *appmessage.UTXOsByAddressesEntry) *appmessage.RPCTransaction { + transactionIDBytes, err := hex.DecodeString(entry.Outpoint.TransactionID) + if err != nil { + t.Fatalf("Error decoding transaction ID: %s", err) + } + transactionID, err := transactionid.FromBytes(transactionIDBytes) + if err != nil { + t.Fatalf("Error decoding transaction ID: %s", err) + } + + txIns := make([]*appmessage.TxIn, 1) + txIns[0] = appmessage.NewTxIn(appmessage.NewOutpoint(transactionID, entry.Outpoint.Index), []byte{}, 0, 1) + + payeeAddress, err := util.DecodeAddress(miningAddress1, util.Bech32PrefixSpectreSim) + if err != nil { + t.Fatalf("Error decoding payeeAddress: %+v", err) + } + toScript, err := txscript.PayToAddrScript(payeeAddress) + if err != nil { + t.Fatalf("Error generating script: %+v", err) + } + + txOuts := []*appmessage.TxOut{appmessage.NewTxOut(entry.UTXOEntry.Amount-1000, toScript)} + + fromScriptCode, err := hex.DecodeString(entry.UTXOEntry.ScriptPublicKey.Script) + if err != nil { + t.Fatalf("Error decoding script public key: %s", err) + } + fromScript := &externalapi.ScriptPublicKey{Script: fromScriptCode, Version: 0} + fromAmount := entry.UTXOEntry.Amount + + msgTx := appmessage.NewNativeMsgTx(constants.MaxTransactionVersion, txIns, txOuts) + + privateKeyBytes, err := hex.DecodeString(miningAddress1PrivateKey) + if err != nil { + t.Fatalf("Error decoding private key: %+v", err) + } + privateKey, err := secp256k1.DeserializeSchnorrPrivateKeyFromSlice(privateKeyBytes) + if err != nil { + t.Fatalf("Error deserializing private key: %+v", err) + } + + tx := appmessage.MsgTxToDomainTransaction(msgTx) + tx.Inputs[0].UTXOEntry = utxo.NewUTXOEntry(fromAmount, fromScript, false, 500) + + signatureScript, err := txscript.SignatureScript(tx, 0, consensushashing.SigHashAll, privateKey, + &consensushashing.SighashReusedValues{}) + if err != nil { + t.Fatalf("Error signing transaction: %+v", err) + } + msgTx.TxIn[0].SignatureScript = signatureScript + + domainTransaction := appmessage.MsgTxToDomainTransaction(msgTx) + return appmessage.DomainTransactionToRPCTransaction(domainTransaction) +} diff --git a/testing/integration/virtual_selected_parent_blue_score_test.go b/testing/integration/virtual_selected_parent_blue_score_test.go new file mode 100644 index 0000000..7a5a15e --- /dev/null +++ b/testing/integration/virtual_selected_parent_blue_score_test.go @@ -0,0 +1,78 @@ +package integration + +import ( + "testing" + + "github.com/spectre-project/spectred/app/appmessage" +) + +func TestVirtualSelectedParentBlueScoreAndVirtualDAAScore(t *testing.T) { + // Setup a single spectred instance + harnessParams := &harnessParams{ + p2pAddress: p2pAddress1, + rpcAddress: rpcAddress1, + miningAddress: miningAddress1, + miningAddressPrivateKey: miningAddress1PrivateKey, + utxoIndex: true, + } + spectred, teardown := setupHarness(t, harnessParams) + defer teardown() + + // Make sure that the initial selected parent blue score is 0 + response, err := spectred.rpcClient.GetVirtualSelectedParentBlueScore() + if err != nil { + t.Fatalf("Error getting virtual selected parent blue score: %s", err) + } + if response.BlueScore != 0 { + t.Fatalf("Unexpected virtual selected parent blue score. Want: %d, got: %d", + 0, response.BlueScore) + } + + // Register to virtual selected parent blue score changes + onVirtualSelectedParentBlueScoreChangedChan := make(chan *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage) + err = spectred.rpcClient.RegisterForVirtualSelectedParentBlueScoreChangedNotifications( + func(notification *appmessage.VirtualSelectedParentBlueScoreChangedNotificationMessage) { + onVirtualSelectedParentBlueScoreChangedChan <- notification + }) + if err != nil { + t.Fatalf("Failed to register for virtual selected parent "+ + "blue score change notifications: %s", err) + } + + // Register to virtual DAA score changes + onVirtualDaaScoreChangedChan := make(chan *appmessage.VirtualDaaScoreChangedNotificationMessage) + err = spectred.rpcClient.RegisterForVirtualDaaScoreChangedNotifications( + func(notification *appmessage.VirtualDaaScoreChangedNotificationMessage) { + onVirtualDaaScoreChangedChan <- notification + }) + if err != nil { + t.Fatalf("Failed to register for virtual DAA score change notifications: %s", err) + } + + // Mine some blocks and make sure that the notifications + // report correct values + const blockAmountToMine = 100 + for i := 0; i < blockAmountToMine; i++ { + mineNextBlock(t, spectred) + blueScoreChangedNotification := <-onVirtualSelectedParentBlueScoreChangedChan + if blueScoreChangedNotification.VirtualSelectedParentBlueScore != 1+uint64(i) { + t.Fatalf("Unexpected virtual selected parent blue score. Want: %d, got: %d", + 1+uint64(i), blueScoreChangedNotification.VirtualSelectedParentBlueScore) + } + daaScoreChangedNotification := <-onVirtualDaaScoreChangedChan + if daaScoreChangedNotification.VirtualDaaScore > 1+uint64(i) { + t.Fatalf("Unexpected virtual DAA score. Want: %d, got: %d", + 1+uint64(i), daaScoreChangedNotification.VirtualDaaScore) + } + } + + // Make sure that the blue score after all that mining is as expected + response, err = spectred.rpcClient.GetVirtualSelectedParentBlueScore() + if err != nil { + t.Fatalf("Error getting virtual selected parent blue score: %s", err) + } + if response.BlueScore != blockAmountToMine { + t.Fatalf("Unexpected virtual selected parent blue score. Want: %d, got: %d", + blockAmountToMine, response.BlueScore) + } +} diff --git a/util/README.md b/util/README.md new file mode 100644 index 0000000..7cafa43 --- /dev/null +++ b/util/README.md @@ -0,0 +1,7 @@ +# util + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](http://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/spectre-project/util) + +Package util provides spectre-specific convenience functions and +types. diff --git a/util/address.go b/util/address.go new file mode 100644 index 0000000..eede069 --- /dev/null +++ b/util/address.go @@ -0,0 +1,351 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util + +import ( + "github.com/pkg/errors" + "golang.org/x/crypto/blake2b" + + "github.com/spectre-project/spectred/util/bech32" +) + +var ( + // ErrUnknownAddressType describes an error where an address can not + // decoded as a specific address type due to the string encoding + // begining with an identifier byte unknown to any standard or + // registered (via dagconfig.Register) network. + ErrUnknownAddressType = errors.New("unknown address type") +) + +const ( + // PubKey addresses always have the version byte set to 0. + pubKeyAddrID = 0x00 + + // PubKey addresses always have the version byte set to 1. + pubKeyECDSAAddrID = 0x01 + + // ScriptHash addresses always have the version byte set to 8. + scriptHashAddrID = 0x08 +) + +// Bech32Prefix is the human-readable prefix for a Bech32 address. +type Bech32Prefix int + +// Constants that define Bech32 address prefixes. Every network is assigned +// a unique prefix. +const ( + // Unknown/Erroneous prefix + Bech32PrefixUnknown Bech32Prefix = iota + + // Prefix for the main network. + Bech32PrefixSpectre + + // Prefix for the dev network. + Bech32PrefixSpectreDev + + // Prefix for the test network. + Bech32PrefixSpectreTest + + // Prefix for the simulation network. + Bech32PrefixSpectreSim +) + +// Map from strings to Bech32 address prefix constants for parsing purposes. +var stringsToBech32Prefixes = map[string]Bech32Prefix{ + "spectre": Bech32PrefixSpectre, + "spectredev": Bech32PrefixSpectreDev, + "spectretest": Bech32PrefixSpectreTest, + "spectresim": Bech32PrefixSpectreSim, +} + +// ParsePrefix attempts to parse a Bech32 address prefix. +func ParsePrefix(prefixString string) (Bech32Prefix, error) { + prefix, ok := stringsToBech32Prefixes[prefixString] + if !ok { + return Bech32PrefixUnknown, errors.Errorf("could not parse prefix %s", prefixString) + } + + return prefix, nil +} + +// Converts from Bech32 address prefixes to their string values +func (prefix Bech32Prefix) String() string { + for key, value := range stringsToBech32Prefixes { + if prefix == value { + return key + } + } + + return "" +} + +// encodeAddress returns a human-readable payment address given a network prefix +// and a payload which encodes the spectre network and address type. It is used +// in both pay-to-pubkey (P2PK) and pay-to-script-hash (P2SH) address +// encoding. +func encodeAddress(prefix Bech32Prefix, payload []byte, version byte) string { + return bech32.Encode(prefix.String(), payload, version) +} + +// Address is an interface type for any type of destination a transaction +// output may spend to. This includes pay-to-pubkey (P2PK) +// and pay-to-script-hash (P2SH). Address is designed to be generic +// enough that other kinds of addresses may be added in the future without +// changing the decoding and encoding API. +type Address interface { + // String returns the string encoding of the transaction output + // destination. + // + // Please note that String differs subtly from EncodeAddress: String + // will return the value as a string without any conversion, while + // EncodeAddress may convert destination types (for example, + // converting pubkeys to P2PK addresses) before encoding as a + // payment address string. + String() string + + // EncodeAddress returns the string encoding of the payment address + // associated with the Address value. See the comment on String + // for how this method differs from String. + EncodeAddress() string + + // ScriptAddress returns the raw bytes of the address to be used + // when inserting the address into a txout's script. + ScriptAddress() []byte + + // Prefix returns the prefix for this address + Prefix() Bech32Prefix + + // IsForPrefix returns whether or not the address is associated with the + // passed spectre network. + IsForPrefix(prefix Bech32Prefix) bool +} + +// DecodeAddress decodes the string encoding of an address and returns +// the Address if addr is a valid encoding for a known address type. +// +// If any expectedPrefix except Bech32PrefixUnknown is passed, it is compared to the +// prefix extracted from the address, and if the two do not match - an error is returned +func DecodeAddress(addr string, expectedPrefix Bech32Prefix) (Address, error) { + prefixString, decoded, version, err := bech32.Decode(addr) + if err != nil { + return nil, errors.Errorf("decoded address is of unknown format: %s", err) + } + + prefix, err := ParsePrefix(prefixString) + if err != nil { + return nil, errors.Errorf("decoded address's prefix could not be parsed: %s", err) + } + if expectedPrefix != Bech32PrefixUnknown && expectedPrefix != prefix { + return nil, errors.Errorf("decoded address is of wrong network. Expected %s but got %s", expectedPrefix, + prefix) + } + + switch version { + case pubKeyAddrID: + return newAddressPubKey(prefix, decoded) + case pubKeyECDSAAddrID: + return newAddressPubKeyECDSA(prefix, decoded) + case scriptHashAddrID: + return newAddressScriptHashFromHash(prefix, decoded) + default: + return nil, ErrUnknownAddressType + } +} + +// PublicKeySize is the public key size for a schnorr public key +const PublicKeySize = 32 + +// AddressPublicKey is an Address for a pay-to-pubkey (P2PK) +// transaction. +type AddressPublicKey struct { + prefix Bech32Prefix + publicKey [PublicKeySize]byte +} + +// NewAddressPublicKey returns a new AddressPublicKey. publicKey must be 32 +// bytes. +func NewAddressPublicKey(publicKey []byte, prefix Bech32Prefix) (*AddressPublicKey, error) { + return newAddressPubKey(prefix, publicKey) +} + +// newAddressPubKey is the internal API to create a pubkey address +// with a known leading identifier byte for a network, rather than looking +// it up through its parameters. This is useful when creating a new address +// structure from a string encoding where the identifier byte is already +// known. +func newAddressPubKey(prefix Bech32Prefix, publicKey []byte) (*AddressPublicKey, error) { + // Check for a valid pubkey length. + if len(publicKey) != PublicKeySize { + return nil, errors.Errorf("publicKey must be %d bytes", PublicKeySize) + } + + addr := &AddressPublicKey{prefix: prefix} + copy(addr.publicKey[:], publicKey) + return addr, nil +} + +// EncodeAddress returns the string encoding of a pay-to-pubkey +// address. Part of the Address interface. +func (a *AddressPublicKey) EncodeAddress() string { + return encodeAddress(a.prefix, a.publicKey[:], pubKeyAddrID) +} + +// ScriptAddress returns the bytes to be included in a txout script to pay +// to a pubkey. Part of the Address interface. +func (a *AddressPublicKey) ScriptAddress() []byte { + return a.publicKey[:] +} + +// IsForPrefix returns whether or not the pay-to-pubkey address is associated +// with the passed spectre network. +func (a *AddressPublicKey) IsForPrefix(prefix Bech32Prefix) bool { + return a.prefix == prefix +} + +// Prefix returns the prefix for this address +func (a *AddressPublicKey) Prefix() Bech32Prefix { + return a.prefix +} + +// String returns a human-readable string for the pay-to-pubkey address. +// This is equivalent to calling EncodeAddress, but is provided so the type can +// be used as a fmt.Stringer. +func (a *AddressPublicKey) String() string { + return a.EncodeAddress() +} + +// PublicKeySizeECDSA is the public key size for an ECDSA public key +const PublicKeySizeECDSA = 33 + +// AddressPublicKeyECDSA is an Address for a pay-to-pubkey (P2PK) +// ECDSA transaction. +type AddressPublicKeyECDSA struct { + prefix Bech32Prefix + publicKey [PublicKeySizeECDSA]byte +} + +// NewAddressPublicKeyECDSA returns a new AddressPublicKeyECDSA. publicKey must be 33 +// bytes. +func NewAddressPublicKeyECDSA(publicKey []byte, prefix Bech32Prefix) (*AddressPublicKeyECDSA, error) { + return newAddressPubKeyECDSA(prefix, publicKey) +} + +// newAddressPubKeyECDSA is the internal API to create an ECDSA pubkey address +// with a known leading identifier byte for a network, rather than looking +// it up through its parameters. This is useful when creating a new address +// structure from a string encoding where the identifier byte is already +// known. +func newAddressPubKeyECDSA(prefix Bech32Prefix, publicKey []byte) (*AddressPublicKeyECDSA, error) { + // Check for a valid pubkey length. + if len(publicKey) != PublicKeySizeECDSA { + return nil, errors.Errorf("publicKey must be %d bytes", PublicKeySizeECDSA) + } + + addr := &AddressPublicKeyECDSA{prefix: prefix} + copy(addr.publicKey[:], publicKey) + return addr, nil +} + +// EncodeAddress returns the string encoding of a pay-to-pubkey +// address. Part of the Address interface. +func (a *AddressPublicKeyECDSA) EncodeAddress() string { + return encodeAddress(a.prefix, a.publicKey[:], pubKeyECDSAAddrID) +} + +// ScriptAddress returns the bytes to be included in a txout script to pay +// to a pubkey. Part of the Address interface. +func (a *AddressPublicKeyECDSA) ScriptAddress() []byte { + return a.publicKey[:] +} + +// IsForPrefix returns whether or not the pay-to-pubkey address is associated +// with the passed spectre network. +func (a *AddressPublicKeyECDSA) IsForPrefix(prefix Bech32Prefix) bool { + return a.prefix == prefix +} + +// Prefix returns the prefix for this address +func (a *AddressPublicKeyECDSA) Prefix() Bech32Prefix { + return a.prefix +} + +// String returns a human-readable string for the pay-to-pubkey address. +// This is equivalent to calling EncodeAddress, but is provided so the type can +// be used as a fmt.Stringer. +func (a *AddressPublicKeyECDSA) String() string { + return a.EncodeAddress() +} + +// AddressScriptHash is an Address for a pay-to-script-publicKey (P2SH) +// transaction. +type AddressScriptHash struct { + prefix Bech32Prefix + hash [blake2b.Size256]byte +} + +// NewAddressScriptHash returns a new AddressScriptHash. +func NewAddressScriptHash(serializedScript []byte, prefix Bech32Prefix) (*AddressScriptHash, error) { + scriptHash := HashBlake2b(serializedScript) + return newAddressScriptHashFromHash(prefix, scriptHash) +} + +// NewAddressScriptHashFromHash returns a new AddressScriptHash. scriptHash +// must be 20 bytes. +func NewAddressScriptHashFromHash(scriptHash []byte, prefix Bech32Prefix) (*AddressScriptHash, error) { + return newAddressScriptHashFromHash(prefix, scriptHash) +} + +// newAddressScriptHashFromHash is the internal API to create a script hash +// address with a known leading identifier byte for a network, rather than +// looking it up through its parameters. This is useful when creating a new +// address structure from a string encoding where the identifer byte is already +// known. +func newAddressScriptHashFromHash(prefix Bech32Prefix, scriptHash []byte) (*AddressScriptHash, error) { + // Check for a valid script hash length. + if len(scriptHash) != blake2b.Size256 { + return nil, errors.Errorf("scriptHash must be %d bytes", blake2b.Size256) + } + + addr := &AddressScriptHash{prefix: prefix} + copy(addr.hash[:], scriptHash) + return addr, nil +} + +// EncodeAddress returns the string encoding of a pay-to-script-hash +// address. Part of the Address interface. +func (a *AddressScriptHash) EncodeAddress() string { + return encodeAddress(a.prefix, a.hash[:], scriptHashAddrID) +} + +// ScriptAddress returns the bytes to be included in a txout script to pay +// to a script hash. Part of the Address interface. +func (a *AddressScriptHash) ScriptAddress() []byte { + return a.hash[:] +} + +// IsForPrefix returns whether or not the pay-to-script-hash address is associated +// with the passed spectre network. +func (a *AddressScriptHash) IsForPrefix(prefix Bech32Prefix) bool { + return a.prefix == prefix +} + +// Prefix returns the prefix for this address +func (a *AddressScriptHash) Prefix() Bech32Prefix { + return a.prefix +} + +// String returns a human-readable string for the pay-to-script-hash address. +// This is equivalent to calling EncodeAddress, but is provided so the type can +// be used as a fmt.Stringer. +func (a *AddressScriptHash) String() string { + return a.EncodeAddress() +} + +// HashBlake2b returns the underlying array of the script hash. This can be useful +// when an array is more appropiate than a slice (for example, when used as map +// keys). +func (a *AddressScriptHash) HashBlake2b() *[blake2b.Size256]byte { + return &a.hash +} diff --git a/util/address_test.go b/util/address_test.go new file mode 100644 index 0000000..50b69b6 --- /dev/null +++ b/util/address_test.go @@ -0,0 +1,459 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util_test + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/crypto/blake2b" + + "github.com/spectre-project/spectred/util" +) + +func TestAddresses(t *testing.T) { + tests := []struct { + name string + addr string + encoded string + valid bool + result util.Address + f func() (util.Address, error) + passedPrefix util.Bech32Prefix + expectedPrefix util.Bech32Prefix + }{ + // Positive P2PK tests. + { + name: "mainnet p2pk", + addr: "spectre:qr35ennsep3hxfe7lnz5ee7j5jgmkjswsn35ennsep3hxfe7ln35cmeu3evl2", + encoded: "spectre:qr35ennsep3hxfe7lnz5ee7j5jgmkjswsn35ennsep3hxfe7ln35cmeu3evl2", + valid: true, + result: util.TstAddressPubKey( + util.Bech32PrefixSpectre, + [util.PublicKeySize]byte{ + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xc5, 0x4c, 0xe7, 0xd2, 0xa4, 0x91, 0xbb, 0x4a, 0x0e, 0x84, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, + }), + f: func() (util.Address, error) { + publicKey := []byte{ + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xc5, 0x4c, 0xe7, 0xd2, 0xa4, 0x91, 0xbb, 0x4a, 0x0e, 0x84, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c} + return util.NewAddressPublicKey(publicKey, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixUnknown, + expectedPrefix: util.Bech32PrefixSpectre, + }, + { + name: "mainnet p2pk 2", + addr: "spectre:qq80qvqs0lfxuzmt7sz3909ze6camq9d4t35ennsep3hxfe7ln35c6un5lvve", + encoded: "spectre:qq80qvqs0lfxuzmt7sz3909ze6camq9d4t35ennsep3hxfe7ln35c6un5lvve", + valid: true, + result: util.TstAddressPubKey( + util.Bech32PrefixSpectre, + [util.PublicKeySize]byte{ + 0x0e, 0xf0, 0x30, 0x10, 0x7f, 0xd2, 0x6e, 0x0b, 0x6b, 0xf4, + 0x05, 0x12, 0xbc, 0xa2, 0xce, 0xb1, 0xdd, 0x80, 0xad, 0xaa, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, + }), + f: func() (util.Address, error) { + publicKey := []byte{ + 0x0e, 0xf0, 0x30, 0x10, 0x7f, 0xd2, 0x6e, 0x0b, 0x6b, 0xf4, + 0x05, 0x12, 0xbc, 0xa2, 0xce, 0xb1, 0xdd, 0x80, 0xad, 0xaa, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, + } + return util.NewAddressPublicKey(publicKey, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + { + name: "testnet p2pk", + addr: "spectretest:qputx94qseratdmjs0j395mq8u03er0x3l35ennsep3hxfe7ln35cjlr90qqm", + encoded: "spectretest:qputx94qseratdmjs0j395mq8u03er0x3l35ennsep3hxfe7ln35cjlr90qqm", + valid: true, + result: util.TstAddressPubKey( + util.Bech32PrefixSpectreTest, + [util.PublicKeySize]byte{ + 0x78, 0xb3, 0x16, 0xa0, 0x86, 0x47, 0xd5, 0xb7, 0x72, 0x83, + 0xe5, 0x12, 0xd3, 0x60, 0x3f, 0x1f, 0x1c, 0x8d, 0xe6, 0x8f, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, + }), + f: func() (util.Address, error) { + publicKey := []byte{ + 0x78, 0xb3, 0x16, 0xa0, 0x86, 0x47, 0xd5, 0xb7, 0x72, 0x83, + 0xe5, 0x12, 0xd3, 0x60, 0x3f, 0x1f, 0x1c, 0x8d, 0xe6, 0x8f, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, + } + return util.NewAddressPublicKey(publicKey, util.Bech32PrefixSpectreTest) + }, + passedPrefix: util.Bech32PrefixSpectreTest, + expectedPrefix: util.Bech32PrefixSpectreTest, + }, + + // ECDSA P2PK tests. + { + name: "mainnet ecdsa p2pk", + addr: "spectre:q835ennsep3hxfe7lnz5ee7j5jgmkjswsn35ennsep3hxfe7ln35e2segr9lxpf", + encoded: "spectre:q835ennsep3hxfe7lnz5ee7j5jgmkjswsn35ennsep3hxfe7ln35e2segr9lxpf", + valid: true, + result: util.TstAddressPubKeyECDSA( + util.Bech32PrefixSpectre, + [util.PublicKeySizeECDSA]byte{ + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xc5, 0x4c, 0xe7, 0xd2, 0xa4, 0x91, 0xbb, 0x4a, 0x0e, 0x84, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, 0xaa, + }), + f: func() (util.Address, error) { + publicKey := []byte{ + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xc5, 0x4c, 0xe7, 0xd2, 0xa4, 0x91, 0xbb, 0x4a, 0x0e, 0x84, + 0xe3, 0x4c, 0xce, 0x70, 0xc8, 0x63, 0x73, 0x27, 0x3e, 0xfc, + 0xe3, 0x4c, 0xaa} + return util.NewAddressPublicKeyECDSA(publicKey, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixUnknown, + expectedPrefix: util.Bech32PrefixSpectre, + }, + + // Negative P2PK tests. + { + name: "p2pk wrong public key length", + addr: "", + valid: false, + f: func() (util.Address, error) { + publicKey := []byte{ + 0x00, 0x0e, 0xf0, 0x30, 0x10, 0x7f, 0xd2, 0x6e, 0x0b, 0x6b, + 0xf4, 0x05, 0x12, 0xbc, 0xa2, 0xce, 0xb1, 0xdd, 0x80, 0xad, + 0xaa} + return util.NewAddressPublicKey(publicKey, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + { + name: "p2pk bad checksum", + addr: "spectre:qr35ennsep3hxfe7lnz5ee7j5jgmkjswss74as46gx", + valid: false, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + + // Positive P2SH tests. + { + name: "mainnet p2sh", + addr: "spectre:prq20q4qd9ulr044cauyy9wtpeupqpjv67pn2vyc6acly7xqkrjdzdz5l0c8t", + encoded: "spectre:prq20q4qd9ulr044cauyy9wtpeupqpjv67pn2vyc6acly7xqkrjdzdz5l0c8t", + valid: true, + result: util.TstAddressScriptHash( + util.Bech32PrefixSpectre, + [blake2b.Size256]byte{ + 0xc0, 0xa7, 0x82, 0xa0, 0x69, 0x79, 0xf1, 0xbe, + 0xb5, 0xc7, 0x78, 0x42, 0x15, 0xcb, 0x0e, 0x78, + 0x10, 0x06, 0x4c, 0xd7, 0x83, 0x35, 0x30, 0x98, + 0xd7, 0x71, 0xf2, 0x78, 0xc0, 0xb0, 0xe4, 0xd1, + }), + f: func() (util.Address, error) { + script := []byte{ + 0x52, 0x41, 0x04, 0x91, 0xbb, 0xa2, 0x51, 0x09, 0x12, 0xa5, + 0xbd, 0x37, 0xda, 0x1f, 0xb5, 0xb1, 0x67, 0x30, 0x10, 0xe4, + 0x3d, 0x2c, 0x6d, 0x81, 0x2c, 0x51, 0x4e, 0x91, 0xbf, 0xa9, + 0xf2, 0xeb, 0x12, 0x9e, 0x1c, 0x18, 0x33, 0x29, 0xdb, 0x55, + 0xbd, 0x86, 0x8e, 0x20, 0x9a, 0xac, 0x2f, 0xbc, 0x02, 0xcb, + 0x33, 0xd9, 0x8f, 0xe7, 0x4b, 0xf2, 0x3f, 0x0c, 0x23, 0x5d, + 0x61, 0x26, 0xb1, 0xd8, 0x33, 0x4f, 0x86, 0x41, 0x04, 0x86, + 0x5c, 0x40, 0x29, 0x3a, 0x68, 0x0c, 0xb9, 0xc0, 0x20, 0xe7, + 0xb1, 0xe1, 0x06, 0xd8, 0xc1, 0x91, 0x6d, 0x3c, 0xef, 0x99, + 0xaa, 0x43, 0x1a, 0x56, 0xd2, 0x53, 0xe6, 0x92, 0x56, 0xda, + 0xc0, 0x9e, 0xf1, 0x22, 0xb1, 0xa9, 0x86, 0x81, 0x8a, 0x7c, + 0xb6, 0x24, 0x53, 0x2f, 0x06, 0x2c, 0x1d, 0x1f, 0x87, 0x22, + 0x08, 0x48, 0x61, 0xc5, 0xc3, 0x29, 0x1c, 0xcf, 0xfe, 0xf4, + 0xec, 0x68, 0x74, 0x41, 0x04, 0x8d, 0x24, 0x55, 0xd2, 0x40, + 0x3e, 0x08, 0x70, 0x8f, 0xc1, 0xf5, 0x56, 0x00, 0x2f, 0x1b, + 0x6c, 0xd8, 0x3f, 0x99, 0x2d, 0x08, 0x50, 0x97, 0xf9, 0x97, + 0x4a, 0xb0, 0x8a, 0x28, 0x83, 0x8f, 0x07, 0x89, 0x6f, 0xba, + 0xb0, 0x8f, 0x39, 0x49, 0x5e, 0x15, 0xfa, 0x6f, 0xad, 0x6e, + 0xdb, 0xfb, 0x1e, 0x75, 0x4e, 0x35, 0xfa, 0x1c, 0x78, 0x44, + 0xc4, 0x1f, 0x32, 0x2a, 0x18, 0x63, 0xd4, 0x62, 0x13, 0x53, + 0xae} + return util.NewAddressScriptHash(script, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + { + name: "mainnet p2sh 2", + addr: "spectre:pr5vxqxg0xrwl2zvxlq9rxffqx00sm44kn5vxqxg0xrwl2zvxl5vxjzlvqth5", + encoded: "spectre:pr5vxqxg0xrwl2zvxlq9rxffqx00sm44kn5vxqxg0xrwl2zvxl5vxjzlvqth5", + valid: true, + result: util.TstAddressScriptHash( + util.Bech32PrefixSpectre, + [blake2b.Size256]byte{ + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xc0, 0x51, 0x99, 0x29, 0x01, 0x9e, 0xf8, 0x6e, 0xb5, 0xb4, + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xe8, 0xc3, + }), + f: func() (util.Address, error) { + hash := []byte{ + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xc0, 0x51, 0x99, 0x29, 0x01, 0x9e, 0xf8, 0x6e, 0xb5, 0xb4, + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xe8, 0xc3, + } + return util.NewAddressScriptHashFromHash(hash, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + { + name: "testnet p2sh", + addr: "spectretest:przhjdpv93xfygpqtckdc2zkzuzqeyj2pt5vxqxg0xrwl2zvxl5vx4tm0lqsu", + encoded: "spectretest:przhjdpv93xfygpqtckdc2zkzuzqeyj2pt5vxqxg0xrwl2zvxl5vx4tm0lqsu", + valid: true, + result: util.TstAddressScriptHash( + util.Bech32PrefixSpectreTest, + [blake2b.Size256]byte{ + 0xc5, 0x79, 0x34, 0x2c, 0x2c, 0x4c, 0x92, 0x20, 0x20, 0x5e, + 0x2c, 0xdc, 0x28, 0x56, 0x17, 0x04, 0x0c, 0x92, 0x4a, 0x0a, + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xe8, 0xc3, + }), + f: func() (util.Address, error) { + hash := []byte{ + 0xc5, 0x79, 0x34, 0x2c, 0x2c, 0x4c, 0x92, 0x20, 0x20, 0x5e, + 0x2c, 0xdc, 0x28, 0x56, 0x17, 0x04, 0x0c, 0x92, 0x4a, 0x0a, + 0xe8, 0xc3, 0x00, 0xc8, 0x79, 0x86, 0xef, 0xa8, 0x4c, 0x37, + 0xe8, 0xc3, + } + return util.NewAddressScriptHashFromHash(hash, util.Bech32PrefixSpectreTest) + }, + passedPrefix: util.Bech32PrefixSpectreTest, + expectedPrefix: util.Bech32PrefixSpectreTest, + }, + + // Negative P2SH tests. + { + name: "p2sh wrong hash length", + addr: "", + valid: false, + f: func() (util.Address, error) { + hash := []byte{ + 0x00, 0xf8, 0x15, 0xb0, 0x36, 0xd9, 0xbb, 0xbc, 0xe5, 0xe9, + 0xf2, 0xa0, 0x0a, 0xbd, 0x1b, 0xf3, 0xdc, 0x91, 0xe9, 0x55, + 0x10} + return util.NewAddressScriptHashFromHash(hash, util.Bech32PrefixSpectre) + }, + passedPrefix: util.Bech32PrefixSpectre, + expectedPrefix: util.Bech32PrefixSpectre, + }, + } + + for _, test := range tests { + // Decode addr and compare error against valid. + decoded, err := util.DecodeAddress(test.addr, test.passedPrefix) + if (err == nil) != test.valid { + t.Errorf("%v: decoding test failed: %v", test.name, err) + return + } + + if err == nil { + // Ensure the stringer returns the same address as the + // original. + if decodedStringer, ok := decoded.(fmt.Stringer); ok { + addr := test.addr + + if addr != decodedStringer.String() { + t.Errorf("%v: String on decoded value does not match expected value: %v != %v", + test.name, test.addr, decodedStringer.String()) + return + } + } + + // Encode again and compare against the original. + encoded := decoded.EncodeAddress() + if test.encoded != encoded { + t.Errorf("%v: decoding and encoding produced different addressess: %v != %v", + test.name, test.encoded, encoded) + return + } + + // Perform type-specific calculations. + var saddr []byte + switch decoded.(type) { + case *util.AddressPublicKey: + saddr = util.TstAddressSAddrP2PK(encoded) + + case *util.AddressPublicKeyECDSA: + saddr = util.TstAddressSAddrP2PKECDSA(encoded) + + case *util.AddressScriptHash: + saddr = util.TstAddressSAddrP2SH(encoded) + } + + // Check script address, as well as the HashBlake2b method for P2SH addresses. + if !bytes.Equal(saddr, decoded.ScriptAddress()) { + t.Errorf("%v: script addresses do not match:\n%x != \n%x", + test.name, saddr, decoded.ScriptAddress()) + return + } + switch a := decoded.(type) { + case *util.AddressPublicKey: + if h := a.ScriptAddress()[:]; !bytes.Equal(saddr, h) { + t.Errorf("%v: hashes do not match:\n%x != \n%x", + test.name, saddr, h) + return + } + + case *util.AddressScriptHash: + if h := a.HashBlake2b()[:]; !bytes.Equal(saddr, h) { + t.Errorf("%v: hashes do not match:\n%x != \n%x", + test.name, saddr, h) + return + } + } + + // Ensure the address is for the expected network. + if !decoded.IsForPrefix(test.expectedPrefix) { + t.Errorf("%v: calculated network does not match expected", + test.name) + return + } + } + + if !test.valid { + // If address is invalid, but a creation function exists, + // verify that it returns a nil addr and non-nil error. + if test.f != nil { + _, err := test.f() + if err == nil { + t.Errorf("%v: address is invalid but creating new address succeeded", + test.name) + return + } + } + continue + } + + // Valid test, compare address created with f against expected result. + addr, err := test.f() + if err != nil { + t.Errorf("%v: address is valid but creating new address failed with error %v", + test.name, err) + return + } + + if !reflect.DeepEqual(addr, test.result) { + t.Errorf("%v: created address does not match expected result", + test.name) + return + } + + if !reflect.DeepEqual(addr, decoded) { + t.Errorf("%v: created address does not match the decoded address", + test.name) + return + } + } +} + +func TestDecodeAddressErrorConditions(t *testing.T) { + tests := []struct { + address string + prefix util.Bech32Prefix + errorMessage string + }{ + { + "bitcoincash:qpzry9x8gf2tvdw0s3jn54khce6mua7lcw20ayyn", + util.Bech32PrefixUnknown, + "decoded address's prefix could not be parsed", + }, + { + "spectresim:raskzctpv9skzctpv9skzctpv9skzctpvy37ct7zafpv9skzctpvyz6wm06l9", + util.Bech32PrefixSpectreSim, + "unknown address type", + }, + { + "spectresim:raskzcgcjkvxs7m", + util.Bech32PrefixSpectreSim, + "unknown address type", + }, + { + "spectretest:qqq65mvpxcmajeq44n2n8vfn6u9f8l4zsys5p0c0t8", + util.Bech32PrefixSpectre, + "decoded address is of wrong network", + }, + } + + for _, test := range tests { + _, err := util.DecodeAddress(test.address, test.prefix) + if err == nil { + t.Errorf("decodeAddress unexpectedly succeeded") + } else if !strings.Contains(err.Error(), test.errorMessage) { + t.Errorf("received mismatched error. Expected '%s' but got '%s'", + test.errorMessage, err) + } + } +} + +func TestParsePrefix(t *testing.T) { + tests := []struct { + prefixStr string + expectedPrefix util.Bech32Prefix + expectedError bool + }{ + {"spectre", util.Bech32PrefixSpectre, false}, + {"spectretest", util.Bech32PrefixSpectreTest, false}, + {"spectresim", util.Bech32PrefixSpectreSim, false}, + {"blabla", util.Bech32PrefixUnknown, true}, + {"unknown", util.Bech32PrefixUnknown, true}, + {"", util.Bech32PrefixUnknown, true}, + } + + for _, test := range tests { + result, err := util.ParsePrefix(test.prefixStr) + if (err != nil) != test.expectedError { + t.Errorf("TestParsePrefix: %s: expected error status: %t, but got %t", + test.prefixStr, test.expectedError, err != nil) + } + + if result != test.expectedPrefix { + t.Errorf("TestParsePrefix: %s: expected prefix: %d, but got %d", + test.prefixStr, test.expectedPrefix, result) + } + } +} + +func TestPrefixToString(t *testing.T) { + tests := []struct { + prefix util.Bech32Prefix + expectedPrefixStr string + }{ + {util.Bech32PrefixSpectre, "spectre"}, + {util.Bech32PrefixSpectreTest, "spectretest"}, + {util.Bech32PrefixSpectreSim, "spectresim"}, + {util.Bech32PrefixUnknown, ""}, + } + + for _, test := range tests { + result := test.prefix.String() + + if result != test.expectedPrefixStr { + t.Errorf("TestPrefixToString: %s: expected string: %s, but got %s", + test.prefix, test.expectedPrefixStr, result) + } + } +} diff --git a/util/amount.go b/util/amount.go new file mode 100644 index 0000000..3e80761 --- /dev/null +++ b/util/amount.go @@ -0,0 +1,125 @@ +// Copyright (c) 2013, 2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util + +import ( + "math" + "strconv" + + "github.com/pkg/errors" + "github.com/spectre-project/spectred/domain/consensus/utils/constants" +) + +// AmountUnit describes a method of converting an Amount to something +// other than the base unit of a spectre. The value of the AmountUnit +// is the exponent component of the decadic multiple to convert from +// an amount in spectre to an amount counted in units. +type AmountUnit int + +// These constants define various units used when describing a spectre +// monetary amount. +const ( + AmountMegaSPR AmountUnit = 6 + AmountKiloSPR AmountUnit = 3 + AmountSPR AmountUnit = 0 + AmountMilliSPR AmountUnit = -3 + AmountMicroSPR AmountUnit = -6 + AmountSompi AmountUnit = -8 +) + +// String returns the unit as a string. For recognized units, the SI +// prefix is used, or "Sompi" for the base unit. For all unrecognized +// units, "1eN SPR" is returned, where N is the AmountUnit. +func (u AmountUnit) String() string { + switch u { + case AmountMegaSPR: + return "MSPR" + case AmountKiloSPR: + return "kSPR" + case AmountSPR: + return "SPR" + case AmountMilliSPR: + return "mSPR" + case AmountMicroSPR: + return "μSPR" + case AmountSompi: + return "Sompi" + default: + return "1e" + strconv.FormatInt(int64(u), 10) + " SPR" + } +} + +// Amount represents the base spectre monetary unit (colloquially referred +// to as a `Sompi'). A single Amount is equal to 1e-8 of a spectre. +type Amount uint64 + +// round converts a floating point number, which may or may not be representable +// as an integer, to the Amount integer type by rounding to the nearest integer. +// This is performed by adding or subtracting 0.5 depending on the sign, and +// relying on integer truncation to round the value to the nearest Amount. +func round(f float64) Amount { + if f < 0 { + return Amount(f - 0.5) + } + return Amount(f + 0.5) +} + +// NewAmount creates an Amount from a floating point value representing +// some value in spectre. NewAmount errors if f is NaN or +-Infinity, but +// does not check that the amount is within the total amount of spectre +// producible as f may not refer to an amount at a single moment in time. +// +// NewAmount is for specifically for converting SPR to Sompi. +// For creating a new Amount with an int64 value which denotes a quantity of Sompi, +// do a simple type conversion from type int64 to Amount. +// TODO: Refactor NewAmount. When amounts are more than 1e9 SPR, the precision +// can be higher than one sompi (1e9 and 1e9+1e-8 will result as the same number) +func NewAmount(f float64) (Amount, error) { + // The amount is only considered invalid if it cannot be represented + // as an integer type. This may happen if f is NaN or +-Infinity. + switch { + case math.IsNaN(f): + fallthrough + case math.IsInf(f, 1): + fallthrough + case math.IsInf(f, -1): + return 0, errors.New("invalid spectre amount") + } + + return round(f * constants.SompiPerSpectre), nil +} + +// ToUnit converts a monetary amount counted in spectre base units to a +// floating point value representing an amount of spectre. +func (a Amount) ToUnit(u AmountUnit) float64 { + return float64(a) / math.Pow10(int(u+8)) +} + +// ToSPR is the equivalent of calling ToUnit with AmountSPR. +func (a Amount) ToSPR() float64 { + return a.ToUnit(AmountSPR) +} + +// Format formats a monetary amount counted in spectre base units as a +// string for a given unit. The conversion will succeed for any unit, +// however, known units will be formated with an appended label describing +// the units with SI notation, or "Sompi" for the base unit. +func (a Amount) Format(u AmountUnit) string { + units := " " + u.String() + return strconv.FormatFloat(a.ToUnit(u), 'f', -int(u+8), 64) + units +} + +// String is the equivalent of calling Format with AmountSPR. +func (a Amount) String() string { + return a.Format(AmountSPR) +} + +// MulF64 multiplies an Amount by a floating point value. While this is not +// an operation that must typically be done by a full node or wallet, it is +// useful for services that build on top of spectre (for example, calculating +// a fee by multiplying by a percentage). +func (a Amount) MulF64(f float64) Amount { + return round(float64(a) * f) +} diff --git a/util/amount_test.go b/util/amount_test.go new file mode 100644 index 0000000..b182376 --- /dev/null +++ b/util/amount_test.go @@ -0,0 +1,257 @@ +// Copyright (c) 2013, 2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util_test + +import ( + "math" + "testing" + + "github.com/spectre-project/spectred/domain/consensus/utils/constants" + + . "github.com/spectre-project/spectred/util" +) + +func TestAmountCreation(t *testing.T) { + tests := []struct { + name string + amount float64 + valid bool + expected Amount + }{ + // Positive tests. + { + name: "zero", + amount: 0, + valid: true, + expected: 0, + }, + { + name: "max producible", + amount: 1161e6, + valid: true, + expected: Amount(constants.MaxSompi), + }, + { + name: "one hundred", + amount: 100, + valid: true, + expected: 100 * constants.SompiPerSpectre, + }, + { + name: "fraction", + amount: 0.01234567, + valid: true, + expected: 1234567, + }, + { + name: "rounding up", + amount: 54.999999999999943157, + valid: true, + expected: 55 * constants.SompiPerSpectre, + }, + { + name: "rounding down", + amount: 55.000000000000056843, + valid: true, + expected: 55 * constants.SompiPerSpectre, + }, + + // Negative tests. + { + name: "not-a-number", + amount: math.NaN(), + valid: false, + }, + { + name: "-infinity", + amount: math.Inf(-1), + valid: false, + }, + { + name: "+infinity", + amount: math.Inf(1), + valid: false, + }, + } + + for _, test := range tests { + a, err := NewAmount(test.amount) + switch { + case test.valid && err != nil: + t.Errorf("%v: Positive test Amount creation failed with: %v", test.name, err) + continue + case !test.valid && err == nil: + t.Errorf("%v: Negative test Amount creation succeeded (value %v) when should fail", test.name, a) + continue + } + + if a != test.expected { + t.Errorf("%v: Created amount %v does not match expected %v", test.name, a, test.expected) + continue + } + } +} + +func TestAmountUnitConversions(t *testing.T) { + tests := []struct { + name string + amount Amount + unit AmountUnit + converted float64 + s string + }{ + { + name: "MSPR", + amount: Amount(constants.MaxSompi), + unit: AmountMegaSPR, + converted: 1161, + s: "1161 MSPR", + }, + { + name: "kSPR", + amount: 44433322211100, + unit: AmountKiloSPR, + converted: 444.33322211100, + s: "444.333222111 kSPR", + }, + { + name: "SPR", + amount: 44433322211100, + unit: AmountSPR, + converted: 444333.22211100, + s: "444333.222111 SPR", + }, + { + name: "mSPR", + amount: 44433322211100, + unit: AmountMilliSPR, + converted: 444333222.11100, + s: "444333222.111 mSPR", + }, + { + + name: "μSPR", + amount: 44433322211100, + unit: AmountMicroSPR, + converted: 444333222111.00, + s: "444333222111 μSPR", + }, + { + + name: "sompi", + amount: 44433322211100, + unit: AmountSompi, + converted: 44433322211100, + s: "44433322211100 Sompi", + }, + { + + name: "non-standard unit", + amount: 44433322211100, + unit: AmountUnit(-1), + converted: 4443332.2211100, + s: "4443332.22111 1e-1 SPR", + }, + } + + for _, test := range tests { + f := test.amount.ToUnit(test.unit) + if f != test.converted { + t.Errorf("%v: converted value %v does not match expected %v", test.name, f, test.converted) + continue + } + + s := test.amount.Format(test.unit) + if s != test.s { + t.Errorf("%v: format '%v' does not match expected '%v'", test.name, s, test.s) + continue + } + + // Verify that Amount.ToSPR works as advertised. + f1 := test.amount.ToUnit(AmountSPR) + f2 := test.amount.ToSPR() + if f1 != f2 { + t.Errorf("%v: ToSPR does not match ToUnit(AmountSPR): %v != %v", test.name, f1, f2) + } + + // Verify that Amount.String works as advertised. + s1 := test.amount.Format(AmountSPR) + s2 := test.amount.String() + if s1 != s2 { + t.Errorf("%v: String does not match Format(AmountSPR): %v != %v", test.name, s1, s2) + } + } +} + +func TestAmountMulF64(t *testing.T) { + tests := []struct { + name string + amt Amount + mul float64 + res Amount + }{ + { + name: "Multiply 0.1 SPR by 2", + amt: 100e5, // 0.1 SPR + mul: 2, + res: 200e5, // 0.2 SPR + }, + { + name: "Multiply 0.2 SPR by 0.02", + amt: 200e5, // 0.2 SPR + mul: 1.02, + res: 204e5, // 0.204 SPR + }, + { + name: "Round down", + amt: 49, // 49 Sompi + mul: 0.01, + res: 0, + }, + { + name: "Round up", + amt: 50, // 50 Sompi + mul: 0.01, + res: 1, // 1 Sompi + }, + { + name: "Multiply by 0.", + amt: 1e8, // 1 SPR + mul: 0, + res: 0, // 0 SPR + }, + { + name: "Multiply 1 by 0.5.", + amt: 1, // 1 Sompi + mul: 0.5, + res: 1, // 1 Sompi + }, + { + name: "Multiply 100 by 66%.", + amt: 100, // 100 Sompi + mul: 0.66, + res: 66, // 66 Sompi + }, + { + name: "Multiply 100 by 66.6%.", + amt: 100, // 100 Sompi + mul: 0.666, + res: 67, // 67 Sompi + }, + { + name: "Multiply 100 by 2/3.", + amt: 100, // 100 Sompi + mul: 2.0 / 3, + res: 67, // 67 Sompi + }, + } + + for _, test := range tests { + a := test.amt.MulF64(test.mul) + if a != test.res { + t.Errorf("%v: expected %v got %v", test.name, test.res, a) + } + } +} diff --git a/util/appdata.go b/util/appdata.go new file mode 100644 index 0000000..19df502 --- /dev/null +++ b/util/appdata.go @@ -0,0 +1,106 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util + +import ( + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +// appDir returns an operating system specific directory to be used for +// storing application data for an application. See AppDir for more +// details. This unexported version takes an operating system argument +// primarily to enable the testing package to properly test the function by +// forcing an operating system that is not the currently one. +func appDir(goos, appName string, roaming bool) string { + if appName == "" || appName == "." { + return "." + } + + // The caller really shouldn't prepend the appName with a period, but + // if they do, handle it gracefully by trimming it. + appName = strings.TrimPrefix(appName, ".") + appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:] + appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:] + + // Get the OS specific home directory via the Go standard lib. + var homeDir string + usr, err := user.Current() + if err == nil { + homeDir = usr.HomeDir + } + + // Fall back to standard HOME environment variable that works + // for most POSIX OSes if the directory from the Go standard + // lib failed. + if err != nil || homeDir == "" { + homeDir = os.Getenv("HOME") + } + + switch goos { + // Attempt to use the LOCALAPPDATA or APPDATA environment variable on + // Windows. + case "windows": + // Windows XP and before didn't have a LOCALAPPDATA, so fallback + // to regular APPDATA when LOCALAPPDATA is not set. + appData := os.Getenv("LOCALAPPDATA") + if roaming || appData == "" { + appData = os.Getenv("APPDATA") + } + + if appData != "" { + return filepath.Join(appData, appNameUpper) + } + + case "darwin": + if homeDir != "" { + return filepath.Join(homeDir, "Library", + "Application Support", appNameUpper) + } + + case "plan9": + if homeDir != "" { + return filepath.Join(homeDir, appNameLower) + } + + default: + if homeDir != "" { + return filepath.Join(homeDir, "."+appNameLower) + } + } + + // Fall back to the current directory if all else fails. + return "." +} + +// AppDir returns an operating system specific directory to be used for +// storing application data for an application. +// +// The appName parameter is the name of the application the data directory is +// being requested for. This function will prepend a period to the appName for +// POSIX style operating systems since that is standard practice. An empty +// appName or one with a single dot is treated as requesting the current +// directory so only "." will be returned. Further, the first character +// of appName will be made lowercase for POSIX style operating systems and +// uppercase for Mac and Windows since that is standard practice. +// +// The roaming parameter only applies to Windows where it specifies the roaming +// application data profile (%APPDATA%) should be used instead of the local one +// (%LOCALAPPDATA%) that is used by default. +// +// Example results: +// +// dir := AppDir("myapp", false) +// POSIX (Linux/BSD): ~/.myapp +// Mac OS: $HOME/Library/Application Support/Myapp +// Windows: %LOCALAPPDATA%\Myapp +// Plan 9: $home/myapp +func AppDir(appName string, roaming bool) string { + return appDir(runtime.GOOS, appName, roaming) +} diff --git a/util/appdata_test.go b/util/appdata_test.go new file mode 100644 index 0000000..87eae5a --- /dev/null +++ b/util/appdata_test.go @@ -0,0 +1,133 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util_test + +import ( + "os" + "os/user" + "path/filepath" + "runtime" + "testing" + "unicode" + + "github.com/spectre-project/spectred/util" +) + +// TestAppDataDir tests the API for AppDir to ensure it gives expected +// results for various operating systems. +func TestAppDataDir(t *testing.T) { + // App name plus upper and lowercase variants. + appName := "myapp" + appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:] + appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:] + + // When we're on Windows, set the expected local and roaming directories + // per the environment vars. When we aren't on Windows, the function + // should return the current directory when forced to provide the + // Windows path since the environment variables won't exist. + winLocal := "." + winRoaming := "." + if runtime.GOOS == "windows" { + localAppData := os.Getenv("LOCALAPPDATA") + roamingAppData := os.Getenv("APPDATA") + if localAppData == "" { + localAppData = roamingAppData + } + winLocal = filepath.Join(localAppData, appNameUpper) + winRoaming = filepath.Join(roamingAppData, appNameUpper) + } + + // Get the home directory to use for testing expected results. + var homeDir string + usr, err := user.Current() + if err != nil { + t.Errorf("user.Current: %v", err) + return + } + homeDir = usr.HomeDir + + // Mac app data directory. + macAppData := filepath.Join(homeDir, "Library", "Application Support") + + tests := []struct { + goos string + appName string + roaming bool + want string + }{ + // Various combinations of application name casing, leading + // period, operating system, and roaming flags. + {"windows", appNameLower, false, winLocal}, + {"windows", appNameUpper, false, winLocal}, + {"windows", "." + appNameLower, false, winLocal}, + {"windows", "." + appNameUpper, false, winLocal}, + {"windows", appNameLower, true, winRoaming}, + {"windows", appNameUpper, true, winRoaming}, + {"windows", "." + appNameLower, true, winRoaming}, + {"windows", "." + appNameUpper, true, winRoaming}, + {"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)}, + {"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)}, + {"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)}, + {"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)}, + {"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)}, + {"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)}, + {"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)}, + {"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)}, + {"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + {"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)}, + {"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)}, + + // No application name provided, so expect current directory. + {"windows", "", false, "."}, + {"windows", "", true, "."}, + {"linux", "", false, "."}, + {"darwin", "", false, "."}, + {"openbsd", "", false, "."}, + {"freebsd", "", false, "."}, + {"netbsd", "", false, "."}, + {"plan9", "", false, "."}, + {"unrecognized", "", false, "."}, + + // Single dot provided for application name, so expect current + // directory. + {"windows", ".", false, "."}, + {"windows", ".", true, "."}, + {"linux", ".", false, "."}, + {"darwin", ".", false, "."}, + {"openbsd", ".", false, "."}, + {"freebsd", ".", false, "."}, + {"netbsd", ".", false, "."}, + {"plan9", ".", false, "."}, + {"unrecognized", ".", false, "."}, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + ret := util.TstAppDataDir(test.goos, test.appName, test.roaming) + if ret != test.want { + t.Errorf("appDir #%d (%s) does not match - "+ + "expected got %s, want %s", i, test.goos, ret, + test.want) + continue + } + } +} diff --git a/util/bech32/README.md b/util/bech32/README.md new file mode 100644 index 0000000..8237074 --- /dev/null +++ b/util/bech32/README.md @@ -0,0 +1,13 @@ +# bech32 + +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](https://choosealicense.com/licenses/isc/) +[![GoDoc](https://godoc.org/github.com/spectre-project/spectred/util/bech32?status.png)](http://godoc.org/github.com/spectre-project/spectred/util/bech32) + +Package bech32 provides a Go implementation of the bech32 format. + +## Examples + +* [Bech32 decode Example](http://godoc.org/github.com/spectre-project/spectred/util/bech32#example-Bech32Decode) + Demonstrates how to decode a bech32 encoded string. +* [Bech32 encode Example](http://godoc.org/github.com/spectre-project/spectred/util/bech32#example-BechEncode) + Demonstrates how to encode data into a bech32 string. diff --git a/util/bech32/bech32.go b/util/bech32/bech32.go new file mode 100644 index 0000000..d3d1f77 --- /dev/null +++ b/util/bech32/bech32.go @@ -0,0 +1,295 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package bech32 + +import ( + "fmt" + "github.com/pkg/errors" + "strings" +) + +const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" +const checksumLength = 8 + +// For use in convertBits. Represents a number of bits to convert to or from and whether +// to add padding. +type conversionType struct { + fromBits uint8 + toBits uint8 + pad bool +} + +// Conversion types to use in convertBits. +var fiveToEightBits = conversionType{fromBits: 5, toBits: 8, pad: false} +var eightToFiveBits = conversionType{fromBits: 8, toBits: 5, pad: true} + +var generator = []int{0x98f2bc8e61, 0x79b76d99e2, 0xf33e5fb3c4, 0xae2eabe2a8, 0x1e4f43e470} + +// Encode prepends the version byte, converts to uint5, and encodes to Bech32. +func Encode(prefix string, payload []byte, version byte) string { + data := make([]byte, len(payload)+1) + data[0] = version + copy(data[1:], payload) + + converted := convertBits(data, eightToFiveBits) + + return encode(prefix, converted) +} + +// Decode decodes a string that was encoded with Encode. +func Decode(encoded string) (string, []byte, byte, error) { + prefix, decoded, err := decode(encoded) + if err != nil { + return "", nil, 0, err + } + + converted := convertBits(decoded, fiveToEightBits) + version := converted[0] + payload := converted[1:] + + return prefix, payload, version, nil +} + +// Decode decodes a Bech32 encoded string, returning the prefix +// and the data part excluding the checksum. +func decode(encoded string) (string, []byte, error) { + // The minimum allowed length for a Bech32 string is 10 characters, + // since it needs a non-empty prefix, a separator, and an 8 character + // checksum. + if len(encoded) < checksumLength+2 { + return "", nil, errors.Errorf("invalid bech32 string length %d", + len(encoded)) + } + // Only ASCII characters between 33 and 126 are allowed. + for i := 0; i < len(encoded); i++ { + if encoded[i] < 33 || encoded[i] > 126 { + return "", nil, errors.Errorf("invalid character in "+ + "string: '%c'", encoded[i]) + } + } + + // The characters must be either all lowercase or all uppercase. + lower := strings.ToLower(encoded) + upper := strings.ToUpper(encoded) + if encoded != lower && encoded != upper { + return "", nil, errors.Errorf("string not all lowercase or all " + + "uppercase") + } + + // We'll work with the lowercase string from now on. + encoded = lower + + // The string is invalid if the last ':' is non-existent, it is the + // first character of the string (no human-readable part) or one of the + // last 8 characters of the string (since checksum cannot contain ':'), + // or if the string is more than 90 characters in total. + colonIndex := strings.LastIndexByte(encoded, ':') + if colonIndex < 1 || colonIndex+checksumLength+1 > len(encoded) { + return "", nil, errors.Errorf("invalid index of ':'") + } + + // The prefix part is everything before the last ':'. + prefix := encoded[:colonIndex] + data := encoded[colonIndex+1:] + + // Each character corresponds to the byte with value of the index in + // 'charset'. + decoded, err := decodeFromBase32(data) + if err != nil { + return "", nil, errors.Errorf("failed converting data to bytes: "+ + "%s", err) + } + + if !verifyChecksum(prefix, decoded) { + checksum := encoded[len(encoded)-checksumLength:] + expected := encodeToBase32(calculateChecksum(prefix, + decoded[:len(decoded)-checksumLength])) + + return "", nil, errors.Errorf("checksum failed. Expected %s, got %s", + expected, checksum) + } + + // We exclude the last 8 bytes, which is the checksum. + return prefix, decoded[:len(decoded)-checksumLength], nil +} + +// Encode encodes a byte slice into a bech32 string with the +// prefix. Note that the bytes must each encode 5 bits (base32). +func encode(prefix string, data []byte) string { + // Calculate the checksum of the data and append it at the end. + checksum := calculateChecksum(prefix, data) + combined := append(data, checksum...) + + // The resulting bech32 string is the concatenation of the prefix, the + // separator ':', data and checksum. Everything after the separator is + // represented using the specified charset. + base32String := encodeToBase32(combined) + + return fmt.Sprintf("%s:%s", prefix, base32String) +} + +// decodeFromBase32 converts each character in the string 'chars' to the value of the +// index of the correspoding character in 'charset'. +func decodeFromBase32(base32String string) ([]byte, error) { + decoded := make([]byte, 0, len(base32String)) + for i := 0; i < len(base32String); i++ { + index := strings.IndexByte(charset, base32String[i]) + if index < 0 { + return nil, errors.Errorf("invalid character not part of "+ + "charset: %c", base32String[i]) + } + decoded = append(decoded, byte(index)) + } + return decoded, nil +} + +// Converts the byte slice 'data' to a string where each byte in 'data' +// encodes the index of a character in 'charset'. +// IMPORTANT: this function expects the data to be in uint5 format. +// CAUTION: for legacy reasons, in case of an error this function returns +// an empty string instead of an error. +func encodeToBase32(data []byte) string { + result := make([]byte, 0, len(data)) + for _, b := range data { + if int(b) >= len(charset) { + return "" + } + result = append(result, charset[b]) + } + return string(result) +} + +// convertBits converts a byte slice where each byte is encoding fromBits bits, +// to a byte slice where each byte is encoding toBits bits. +func convertBits(data []byte, conversionType conversionType) []byte { + // The final bytes, each byte encoding toBits bits. + var regrouped []byte + + // Keep track of the next byte we create and how many bits we have + // added to it out of the toBits goal. + nextByte := byte(0) + filledBits := uint8(0) + + for _, b := range data { + // Discard unused bits. + b = b << (8 - conversionType.fromBits) + + // How many bits remaining to extract from the input data. + remainingFromBits := conversionType.fromBits + for remainingFromBits > 0 { + // How many bits remaining to be added to the next byte. + remainingToBits := conversionType.toBits - filledBits + + // The number of bytes to next extract is the minimum of + // remainingFromBits and remainingToBits. + toExtract := remainingFromBits + if remainingToBits < toExtract { + toExtract = remainingToBits + } + + // Add the next bits to nextByte, shifting the already + // added bits to the left. + nextByte = (nextByte << toExtract) | (b >> (8 - toExtract)) + + // Discard the bits we just extracted and get ready for + // next iteration. + b = b << toExtract + remainingFromBits -= toExtract + filledBits += toExtract + + // If the nextByte is completely filled, we add it to + // our regrouped bytes and start on the next byte. + if filledBits == conversionType.toBits { + regrouped = append(regrouped, nextByte) + filledBits = 0 + nextByte = 0 + } + } + } + + // We pad any unfinished group if specified. + if conversionType.pad && filledBits > 0 { + nextByte = nextByte << (conversionType.toBits - filledBits) + regrouped = append(regrouped, nextByte) + filledBits = 0 + nextByte = 0 + } + + return regrouped +} + +// The checksum is a 40 bits BCH codes defined over GF(2^5). +// It ensures the detection of up to 6 errors in the address and 8 in a row. +// Combined with the length check, this provides very strong guarantee against errors. +// For more details please refer to the Bech32 Address Serialization section +// of the spec. +func calculateChecksum(prefix string, payload []byte) []byte { + prefixLower5Bits := prefixToUint5Array(prefix) + payloadInts := ints(payload) + templateZeroes := []int{0, 0, 0, 0, 0, 0, 0, 0} + + // prefixLower5Bits + 0 + payloadInts + templateZeroes + concat := append(prefixLower5Bits, 0) + concat = append(concat, payloadInts...) + concat = append(concat, templateZeroes...) + + polyModResult := polyMod(concat) + var res []byte + for i := 0; i < checksumLength; i++ { + res = append(res, byte((polyModResult>>uint(5*(checksumLength-1-i)))&31)) + } + + return res +} + +// For more details please refer to the Bech32 Address Serialization section +// of the spec. +func verifyChecksum(prefix string, payload []byte) bool { + prefixLower5Bits := prefixToUint5Array(prefix) + payloadInts := ints(payload) + + // prefixLower5Bits + 0 + payloadInts + dataToVerify := append(prefixLower5Bits, 0) + dataToVerify = append(dataToVerify, payloadInts...) + + return polyMod(dataToVerify) == 0 +} + +func prefixToUint5Array(prefix string) []int { + prefixLower5Bits := make([]int, len(prefix)) + for i := 0; i < len(prefix); i++ { + char := prefix[i] + charLower5Bits := int(char & 31) + prefixLower5Bits[i] = charLower5Bits + } + + return prefixLower5Bits +} + +func ints(payload []byte) []int { + payloadInts := make([]int, len(payload)) + for i, b := range payload { + payloadInts[i] = int(b) + } + + return payloadInts +} + +// For more details please refer to the Bech32 Address Serialization section +// of the spec. +func polyMod(values []int) int { + checksum := 1 + for _, value := range values { + topBits := checksum >> 35 + checksum = ((checksum & 0x07ffffffff) << 5) ^ value + for i := 0; i < len(generator); i++ { + if ((topBits >> uint(i)) & 1) == 1 { + checksum ^= generator[i] + } + } + } + + return checksum ^ 1 +} diff --git a/util/bech32/bech32_test.go b/util/bech32/bech32_test.go new file mode 100644 index 0000000..ad023ab --- /dev/null +++ b/util/bech32/bech32_test.go @@ -0,0 +1,61 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package bech32_test + +import ( + "testing" + + "github.com/spectre-project/spectred/util/bech32" +) + +var checkEncodingStringTests = []struct { + prefix string + version byte + in string + out string +}{ + {"a", 0, "", "a:qqeq69uvrh"}, + {"a", 8, "", "a:pq99546ray"}, + {"a", 120, "", "a:0qf6jrhtdq"}, + {"b", 8, " ", "b:pqsqzsjd64fv"}, + {"b", 8, "-", "b:pqksmhczf8ud"}, + {"b", 8, "0", "b:pqcq53eqrk0e"}, + {"b", 8, "1", "b:pqcshg75y0vf"}, + {"b", 8, "-1", "b:pqknzl4e9y0zy"}, + {"b", 8, "11", "b:pqcnzt888ytdg"}, + {"b", 8, "abc", "b:ppskycc8txxxn2w"}, + {"b", 8, "1234598760", "b:pqcnyve5x5unsdekxqeusxeyu2"}, + {"b", 8, "abcdefghijklmnopqrstuvwxyz", "b:ppskycmyv4nxw6rfdf4kcmtwdac8zunnw36hvamc09aqtpppz8lk"}, + {"b", 8, "000000000000000000000000000000000000000000", "b:pqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrqvpsxqcrq7ag684l3"}, +} + +func TestBech32(t *testing.T) { + for x, test := range checkEncodingStringTests { + // test encoding + encoded := bech32.Encode(test.prefix, []byte(test.in), test.version) + if encoded != test.out { + t.Errorf("Encode test #%d failed: got %s, want: %s", x, encoded, test.out) + } + + // test decoding + prefix, decoded, version, err := bech32.Decode(test.out) + if err != nil { + t.Errorf("Decode test #%d failed with err: %v", x, err) + } else if prefix != test.prefix { + t.Errorf("Decode test #%d failed: got prefix: %s want: %s", x, prefix, test.prefix) + } else if version != test.version { + t.Errorf("Decode test #%d failed: got version: %d want: %d", x, version, test.version) + } else if string(decoded) != test.in { + t.Errorf("Decode test #%d failed: got: %s want: %s", x, decoded, test.in) + } + } +} + +func TestDecodeError(t *testing.T) { + _, _, _, err := bech32.Decode("™") + if err == nil { + t.Errorf("decode unexpectedly succeeded") + } +} diff --git a/util/bech32/doc.go b/util/bech32/doc.go new file mode 100644 index 0000000..ea994ca --- /dev/null +++ b/util/bech32/doc.go @@ -0,0 +1,8 @@ +/* +Package bech32 provides a Go implementation of the bech32 format. + +Bech32 strings consist of a prefix, followed by the separator :, +then a checksummed data part encoded using the 32 characters +"qpzry9x8gf2tvdw0s3jn54khce6mua7l". +*/ +package bech32 diff --git a/util/bech32/example_test.go b/util/bech32/example_test.go new file mode 100644 index 0000000..35cfb3d --- /dev/null +++ b/util/bech32/example_test.go @@ -0,0 +1,43 @@ +// Copyright (c) 2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package bech32_test + +import ( + "encoding/hex" + "fmt" + + "github.com/spectre-project/spectred/util/bech32" +) + +// This example demonstrates how to decode a bech32 encoded string. +func ExampleDecode() { + encoded := "customprefix!:::::q:ppzxzarpyp6x7grzv5sx2mnrdajx2epqd9h8gmeqgfjkx6pnxgc3swlew4" + prefix, decoded, version, err := bech32.Decode(encoded) + if err != nil { + fmt.Println("Error:", err) + } + + // Show the decoded data. + fmt.Println("Decoded prefix:", prefix) + fmt.Println("Decoded version:", version) + fmt.Println("Decoded Data:", hex.EncodeToString(decoded)) + + // Output: + // Decoded prefix: customprefix!:::::q + // Decoded version: 8 + // Decoded Data: 4461746120746f20626520656e636f64656420696e746f20426563683332 +} + +// This example demonstrates how to encode data into a bech32 string. +func ExampleEncode() { + data := []byte("Data to be encoded into Bech32") + encoded := bech32.Encode("customprefix!:::::q", data, 8) + + // Show the encoded data. + fmt.Println("Encoded Data:", encoded) + + // Output: + // Encoded Data: customprefix!:::::q:ppzxzarpyp6x7grzv5sx2mnrdajx2epqd9h8gmeqgfjkx6pnxgc3swlew4 +} diff --git a/util/bech32/internal_test.go b/util/bech32/internal_test.go new file mode 100644 index 0000000..898ed09 --- /dev/null +++ b/util/bech32/internal_test.go @@ -0,0 +1,76 @@ +package bech32 + +import ( + "strings" + "testing" +) + +func TestBech32(t *testing.T) { + tests := []struct { + str string + valid bool + }{ + {"prefix:x64nx6hz", true}, + {"p:gpf8m4h7", true}, + {"bitcoincash:qpzry9x8gf2tvdw0s3jn54khce6mua7lcw20ayyn", true}, + {"bchtest:testnetaddress4d6njnut", true}, + {"bchreg:555555555555555555555555555555555555555555555udxmlmrz", true}, + {"A:3X3DXU9W", true}, + {"an83characterlonghumanreadablepartthatcontainscharctr:andtheexcludedcharactersbio:pk68j20a", true}, + {"abcdef:qpzry9x8gf2tvdw0s3jn54khce6mua7:nw2t26kg", true}, + {"::qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq40ku0e3z", true}, + {"split:checkupstagehandshakeupstreamerranterredcaperred3za27wc5", true}, + {"aaa:bbb", false}, // too short + {"split:checkupstagehandshakeupstreamerranterredCaperred3za27wc5", false}, // mixed uppercase and lowercase + {"split:checkupstagehandshakeupstreamerranterredcaperred3za28wc5", false}, // invalid checksum + {"s lit:checkupstagehandshakeupstreamerranterredcaperred3za27wc5", false}, // invalid character (space) in prefix + {"spl" + string(rune(127)) + "t:checkupstagehandshakeupstreamerranterredcaperred3za27wc5", false}, // invalid character (DEL) in prefix + {"split:cheosgds2s3c", false}, // invalid character (o) in data part + {"split:te5peu7", false}, // too short data part + {":checkupstagehandshakeupstreamerranterredcaperred3za27wc5", false}, // empty prefix + {"::qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq40ku0e3z", false}, // too long + {"bitcoincash:qr6m7j9njldwwzlg9v7v53unlr4jkmx6eylep8ekg2", true}, + {"bchtest:pr6m7j9njldwwzlg9v7v53unlr4jkmx6eyvwc0uz5t", true}, + {"prefix:0r6m7j9njldwwzlg9v7v53unlr4jkmx6ey3qnjwsrf", true}, + } + + for _, test := range tests { + str := test.str + prefix, decoded, err := decode(str) + if !test.valid { + // Invalid string decoding should result in error. + if err == nil { + t.Errorf("expected decoding to fail for "+ + "invalid string %v", test.str) + } + continue + } + + // Valid string decoding should result in no error. + if err != nil { + t.Errorf("expected string to be valid bech32: %v", err) + } + + // Check that it encodes to the same string + encoded := encode(prefix, decoded) + if encoded != strings.ToLower(str) { + t.Errorf("expected data to encode to %v, but got %v", + str, encoded) + } + + // Flip a bit in the string an make sure it is caught. + pos := strings.LastIndexAny(str, "1") + flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:] + _, _, err = decode(flipped) + if err == nil { + t.Error("expected decoding to fail") + } + } +} + +func TestEncodeToBech32NotUInt5(t *testing.T) { + encoded := encodeToBase32([]byte("™")) + if encoded != "" { + t.Errorf("encodeToBase32 unexpectedly succeeded") + } +} diff --git a/util/binaryserializer/binaryserializer.go b/util/binaryserializer/binaryserializer.go new file mode 100644 index 0000000..7f9040f --- /dev/null +++ b/util/binaryserializer/binaryserializer.go @@ -0,0 +1,146 @@ +package binaryserializer + +import ( + "encoding/binary" + "io" + + "github.com/pkg/errors" +) + +// maxItems is the number of buffers to keep in the free +// list to use for binary serialization and deserialization. +const maxItems = 1024 + +// Borrow returns a byte slice from the free list with a length of 8. A new +// buffer is allocated if there are not any available on the free list. +func Borrow() []byte { + var buf []byte + select { + case buf = <-binaryFreeList: + default: + buf = make([]byte, 8) + } + return buf[:8] +} + +// Return puts the provided byte slice back on the free list. The buffer MUST +// have been obtained via the Borrow function and therefore have a cap of 8. +func Return(buf []byte) { + select { + case binaryFreeList <- buf: + default: + // Let it go to the garbage collector. + } +} + +// Uint8 reads a single byte from the provided reader using a buffer from the +// free list and returns it as a uint8. +func Uint8(r io.Reader) (uint8, error) { + buf := Borrow()[:1] + if _, err := io.ReadFull(r, buf); err != nil { + Return(buf) + return 0, errors.WithStack(err) + } + rv := buf[0] + Return(buf) + return rv, nil +} + +// Uint16 reads two bytes from the provided reader using a buffer from the +// free list, converts it to a number using the provided byte order, and returns +// the resulting uint16. +func Uint16(r io.Reader) (uint16, error) { + buf := Borrow()[:2] + if _, err := io.ReadFull(r, buf); err != nil { + Return(buf) + return 0, errors.WithStack(err) + } + rv := binary.LittleEndian.Uint16(buf) + Return(buf) + return rv, nil +} + +// Uint32 reads four bytes from the provided reader using a buffer from the +// free list, converts it to a number using the provided byte order, and returns +// the resulting uint32. +func Uint32(r io.Reader) (uint32, error) { + buf := Borrow()[:4] + if _, err := io.ReadFull(r, buf); err != nil { + Return(buf) + return 0, errors.WithStack(err) + } + rv := binary.LittleEndian.Uint32(buf) + Return(buf) + return rv, nil +} + +// Uint64 reads eight bytes from the provided reader using a buffer from the +// free list, converts it to a number using the provided byte order, and returns +// the resulting uint64. +func Uint64(r io.Reader) (uint64, error) { + buf := Borrow()[:8] + if _, err := io.ReadFull(r, buf); err != nil { + Return(buf) + return 0, errors.WithStack(err) + } + rv := binary.LittleEndian.Uint64(buf) + Return(buf) + return rv, nil +} + +// PutUint8 copies the provided uint8 into a buffer from the free list and +// writes the resulting byte to the given writer. +func PutUint8(w io.Writer, val uint8) error { + buf := Borrow()[:1] + buf[0] = val + _, err := w.Write(buf) + Return(buf) + return errors.WithStack(err) +} + +// PutUint16 serializes the provided uint16 using the given byte order into a +// buffer from the free list and writes the resulting two bytes to the given +// writer. +func PutUint16(w io.Writer, val uint16) error { + buf := Borrow()[:2] + binary.LittleEndian.PutUint16(buf, val) + _, err := w.Write(buf) + Return(buf) + return errors.WithStack(err) +} + +// PutUint32 serializes the provided uint32 using the given byte order into a +// buffer from the free list and writes the resulting four bytes to the given +// writer. +func PutUint32(w io.Writer, val uint32) error { + var buf [4]byte + binary.LittleEndian.PutUint32(buf[:], val) + _, err := w.Write(buf[:]) + return errors.WithStack(err) +} + +// PutUint64 serializes the provided uint64 using the given byte order into a +// buffer from the free list and writes the resulting eight bytes to the given +// writer. +func PutUint64(w io.Writer, val uint64) error { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + _, err := w.Write(buf[:]) + return errors.WithStack(err) +} + +// binaryFreeList provides a free list of buffers to use for serializing and +// deserializing primitive integer values to and from io.Readers and io.Writers. +// +// It defines a concurrent safe free list of byte slices (up to the +// maximum number defined by the maxItems constant) that have a +// cap of 8 (thus it supports up to a uint64). It is used to provide temporary +// buffers for serializing and deserializing primitive numbers to and from their +// binary encoding in order to greatly reduce the number of allocations +// required. +// +// For convenience, functions are provided for each of the primitive unsigned +// integers that automatically obtain a buffer from the free list, perform the +// necessary binary conversion, read from or write to the given io.Reader or +// io.Writer, and return the buffer to the free list. +var binaryFreeList = make(chan []byte, maxItems) diff --git a/util/binaryserializer/binaryserializer_test.go b/util/binaryserializer/binaryserializer_test.go new file mode 100644 index 0000000..5b2a645 --- /dev/null +++ b/util/binaryserializer/binaryserializer_test.go @@ -0,0 +1,58 @@ +package binaryserializer + +import ( + "reflect" + "testing" + "unsafe" +) + +func TestBinaryFreeList(t *testing.T) { + + expectedCapacity := 8 + expectedLength := 8 + + first := Borrow() + if cap(first) != expectedCapacity { + t.Errorf("MsgTx.TestBinaryFreeList: Expected capacity for first %d, but got %d", + expectedCapacity, cap(first)) + } + if len(first) != expectedLength { + t.Errorf("MsgTx.TestBinaryFreeList: Expected length for first %d, but got %d", + expectedLength, len(first)) + } + Return(first) + + // Borrow again, and check that the underlying array is re-used for second + second := Borrow() + if cap(second) != expectedCapacity { + t.Errorf("TestBinaryFreeList: Expected capacity for second %d, but got %d", + expectedCapacity, cap(second)) + } + if len(second) != expectedLength { + t.Errorf("TestBinaryFreeList: Expected length for second %d, but got %d", + expectedLength, len(second)) + } + + firstArrayAddress := underlyingArrayAddress(first) + secondArrayAddress := underlyingArrayAddress(second) + + if firstArrayAddress != secondArrayAddress { + t.Errorf("First underlying array is at address %d and second at address %d, "+ + "which means memory was not re-used", firstArrayAddress, secondArrayAddress) + } + + Return(second) + + // test there's no crash when channel is full because borrowed too much + buffers := make([][]byte, maxItems+1) + for i := 0; i < maxItems+1; i++ { + buffers[i] = Borrow() + } + for i := 0; i < maxItems+1; i++ { + Return(buffers[i]) + } +} + +func underlyingArrayAddress(buf []byte) uint64 { + return uint64((*reflect.SliceHeader)(unsafe.Pointer(&buf)).Data) +} diff --git a/util/blake2b.go b/util/blake2b.go new file mode 100644 index 0000000..fbe54f1 --- /dev/null +++ b/util/blake2b.go @@ -0,0 +1,15 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package util + +import ( + "golang.org/x/crypto/blake2b" +) + +// HashBlake2b calculates the hash blake2b(b). +func HashBlake2b(buf []byte) []byte { + hashedBuf := blake2b.Sum256(buf) + return hashedBuf[:] +} diff --git a/util/difficulty/difficulty.go b/util/difficulty/difficulty.go new file mode 100644 index 0000000..1ac87ca --- /dev/null +++ b/util/difficulty/difficulty.go @@ -0,0 +1,181 @@ +package difficulty + +import ( + "math/big" + "time" +) + +var ( + // bigOne is 1 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigOne = big.NewInt(1) + + // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid + // the overhead of creating it multiple times. + oneLsh256 = new(big.Int).Lsh(bigOne, 256) +) + +// CompactToBig converts a compact representation of a whole number N to an +// unsigned 32-bit number. The representation is similar to IEEE754 floating +// point numbers. +// +// Like IEEE754 floating point, there are three basic components: the sign, +// the exponent, and the mantissa. They are broken out as follows: +// +// - the most significant 8 bits represent the unsigned base 256 exponent +// +// - bit 23 (the 24th bit) represents the sign bit +// +// - the least significant 23 bits represent the mantissa +// +// ------------------------------------------------- +// | Exponent | Sign | Mantissa | +// ------------------------------------------------- +// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] | +// ------------------------------------------------- +// +// The formula to calculate N is: +// +// N = (-1^sign) * mantissa * 256^(exponent-3) +func CompactToBig(compact uint32) *big.Int { + destination := big.NewInt(0) + CompactToBigWithDestination(compact, destination) + return destination +} + +// CompactToBigWithDestination is a version of CompactToBig that +// takes a destination parameter. This is useful for saving memory, +// as then the destination big.Int can be reused. +// See CompactToBig for further details. +func CompactToBigWithDestination(compact uint32, destination *big.Int) { + // Extract the mantissa, sign bit, and exponent. + mantissa := compact & 0x007fffff + isNegative := compact&0x00800000 != 0 + exponent := uint(compact >> 24) + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes to represent the full 256-bit number. So, + // treat the exponent as the number of bytes and shift the mantissa + // right or left accordingly. This is equivalent to: + // N = mantissa * 256^(exponent-3) + if exponent <= 3 { + mantissa >>= 8 * (3 - exponent) + destination.SetInt64(int64(mantissa)) + } else { + destination.SetInt64(int64(mantissa)) + destination.Lsh(destination, 8*(exponent-3)) + } + + // Make it negative if the sign bit is set. + if isNegative { + destination.Neg(destination) + } +} + +// BigToCompact converts a whole number N to a compact representation using +// an unsigned 32-bit number. The compact representation only provides 23 bits +// of precision, so values larger than (2^23 - 1) only encode the most +// significant digits of the number. See CompactToBig for details. +func BigToCompact(n *big.Int) uint32 { + // No need to do any work if it's zero. + if n.Sign() == 0 { + return 0 + } + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes. So, shift the number right or left + // accordingly. This is equivalent to: + // mantissa = mantissa / 256^(exponent-3) + var mantissa uint32 + exponent := uint(len(n.Bytes())) + if exponent <= 3 { + mantissa = uint32(n.Bits()[0]) + mantissa <<= 8 * (3 - exponent) + } else { + // Use a copy to avoid modifying the caller's original number. + tn := new(big.Int).Set(n) + mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0]) + } + + // When the mantissa already has the sign bit set, the number is too + // large to fit into the available 23-bits, so divide the number by 256 + // and increment the exponent accordingly. + if mantissa&0x00800000 != 0 { + mantissa >>= 8 + exponent++ + } + + // Pack the exponent, sign bit, and mantissa into an unsigned 32-bit + // int and return it. + compact := uint32(exponent<<24) | mantissa + if n.Sign() < 0 { + compact |= 0x00800000 + } + return compact +} + +// CalcWork calculates a work value from difficulty bits. Spectre increases +// the difficulty for generating a block by decreasing the value which the +// generated hash must be less than. This difficulty target is stored in each +// block header using a compact representation as described in the documentation +// for CompactToBig. Since a lower target difficulty value equates to higher +// actual difficulty, the work value which will be accumulated must be the +// inverse of the difficulty. Also, in order to avoid potential division by +// zero and really small floating point numbers, the result adds 1 to the +// denominator and multiplies the numerator by 2^256. +func CalcWork(bits uint32) *big.Int { + // Return a work value of zero if the passed difficulty bits represent + // a negative number. Note this should not happen in practice with valid + // blocks, but an invalid block could trigger it. + difficultyNum := CompactToBig(bits) + if difficultyNum.Sign() <= 0 { + return big.NewInt(0) + } + + // (1 << 256) / (difficultyNum + 1) + denominator := new(big.Int).Add(difficultyNum, bigOne) + return new(big.Int).Div(oneLsh256, denominator) +} + +func getHashrate(target *big.Int, TargetTimePerBlock time.Duration) *big.Int { + // From: https://bitcoin.stackexchange.com/a/5557/40800 + // difficulty = hashrate / (2^256 / max_target / seconds_per_block) + // hashrate = difficulty * (2^256 / max_target / seconds_per_block) + // difficulty = max_target / target + // hashrate = (max_target / target) * (2^256 / max_target / seconds_per_block) + // hashrate = 2^256 / (target * seconds_per_block) + + tmp := new(big.Int) + divisor := new(big.Int).Set(target) + divisor.Mul(divisor, tmp.SetInt64(TargetTimePerBlock.Milliseconds())) + divisor.Div(divisor, tmp.SetInt64(int64(time.Second/time.Millisecond))) // Scale it up to seconds. + divisor.Div(oneLsh256, divisor) + return divisor +} + +// GetHashrateString returns the expected hashrate of the network on a certain difficulty target. +func GetHashrateString(target *big.Int, TargetTimePerBlock time.Duration) string { + hashrate := getHashrate(target, TargetTimePerBlock) + in := hashrate.Text(10) + var postfix string + switch { + case len(in) <= 3: + return in + " H/s" + case len(in) <= 6: + postfix = " KH/s" + case len(in) <= 9: + postfix = " MH/s" + case len(in) <= 12: + postfix = " GH/s" + case len(in) <= 15: + postfix = " TH/s" + case len(in) <= 18: + postfix = " PH/s" + case len(in) <= 21: + postfix = " EH/s" + default: + return in + " H/s" + } + highPrecision := len(in) - ((len(in)-1)/3)*3 + return in[:highPrecision] + "." + in[highPrecision:highPrecision+2] + postfix +} diff --git a/util/difficulty/difficulty_test.go b/util/difficulty/difficulty_test.go new file mode 100644 index 0000000..06f10dc --- /dev/null +++ b/util/difficulty/difficulty_test.go @@ -0,0 +1,115 @@ +package difficulty_test + +import ( + "fmt" + "math" + "math/big" + "testing" + + "github.com/spectre-project/spectred/domain/consensus" + + "github.com/spectre-project/spectred/domain/consensus/utils/testutils" + "github.com/spectre-project/spectred/domain/dagconfig" + "github.com/spectre-project/spectred/util/difficulty" +) + +func TestGetHashrateString(t *testing.T) { + var results = map[string]string{ + dagconfig.MainnetParams.Name: "143.10 KH/s", + dagconfig.TestnetParams.Name: "131.15 KH/s", + dagconfig.DevnetParams.Name: "830 H/s", + dagconfig.SimnetParams.Name: "2.47 KH/s", + } + testutils.ForAllNets(t, false, func(t *testing.T, consensusConfig *consensus.Config) { + targetGenesis := difficulty.CompactToBig(consensusConfig.GenesisBlock.Header.Bits()) + hashrate := difficulty.GetHashrateString(targetGenesis, consensusConfig.TargetTimePerBlock) + if hashrate != results[consensusConfig.Name] { + t.Errorf("Expected %s, found %s", results[consensusConfig.Name], hashrate) + } + }) +} + +// TestBigToCompact ensures BigToCompact converts big integers to the expected +// compact representation. +func TestBigToCompact(t *testing.T) { + tests := []struct { + in string + out uint32 + }{ + {"0000000000000000000000000000000000000000000000000000000000000000", 0}, + {"-1", 25231360}, + {"9223372036854775807", 142606335}, + {"922337203685477580712312312123487", 237861256}, + {"128", 0x02008000}, + } + + for x, test := range tests { + n := new(big.Int) + n.SetString(test.in, 10) + r := difficulty.BigToCompact(n) + if r != test.out { + t.Errorf("TestBigToCompact test #%d failed: got %d want %d\n", + x, r, test.out) + return + } + } +} + +// TestCompactToBig ensures CompactToBig converts numbers using the compact +// representation to the expected big integers. +func TestCompactToBig(t *testing.T) { + tests := []struct { + before uint32 + intHex string + after uint32 + }{ + {math.MaxUint32, "-7fffff000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000", math.MaxUint32}, + {0x00000000, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x0989680, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x87fffff, "0000000000000000000000000000000000000000000000007fffff0000000000", 0x87fffff}, + {0x1810000, "-000000000000000000000000000000000000000000000000000000000000001", 0x1810000}, + {0xe2d7988, "0000000000000000000000000000000000002d79880000000000000000000000", 0xe2d7988}, + {0x00123456, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x01003456, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x02000056, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x03000000, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x04000000, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x00923456, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x01803456, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x02800056, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x03800000, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x04800000, "0000000000000000000000000000000000000000000000000000000000000000", 0x00000000}, + {0x01123456, "0000000000000000000000000000000000000000000000000000000000000012", 0x01120000}, + {0x02008000, "0000000000000000000000000000000000000000000000000000000000000080", 0x02008000}, + {0x01fedcba, "-00000000000000000000000000000000000000000000000000000000000007e", 0x01fe0000}, + {0x02123456, "0000000000000000000000000000000000000000000000000000000000001234", 0x02123400}, + {0x03123456, "0000000000000000000000000000000000000000000000000000000000123456", 0x03123456}, + {0x04123456, "0000000000000000000000000000000000000000000000000000000012345600", 0x04123456}, + {0x04923456, "-000000000000000000000000000000000000000000000000000000012345600", 0x04923456}, + {0x05009234, "0000000000000000000000000000000000000000000000000000000092340000", 0x05009234}, + {0x20123456, "1234560000000000000000000000000000000000000000000000000000000000", 0x20123456}, + {0xff123456, "123456000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", 0xff123456}, + } + + for i, test := range tests { + n := difficulty.CompactToBig(test.before) + convertBack := difficulty.BigToCompact(n) + got := fmt.Sprintf("%064x", n) + if got != test.intHex { + t.Errorf("TestCompactToBig test #%d failed: got %s want %s, input: 0x%08x", + i, got, test.intHex, test.before) + } + if convertBack != test.after { + t.Errorf("TestCompactToBig test #%d failed: got: 0x%08x want 0x%08x input: 0x%08x", i, convertBack, test.after, test.before) + } + } +} diff --git a/util/doc.go b/util/doc.go new file mode 100644 index 0000000..cef2be5 --- /dev/null +++ b/util/doc.go @@ -0,0 +1,37 @@ +/* +Package util provides spectre-specific convenience functions and types. + +# Block Overview + +A Block defines a spectre block that provides easier and more efficient +manipulation of raw blocks. It also memoizes hashes for the +block and its transactions on their first access so subsequent accesses don't +have to repeat the relatively expensive hashing operations. + +# Tx Overview + +A Tx defines a spectre transaction that provides more efficient manipulation of +raw transactions. It memoizes the hash for the transaction on its +first access so subsequent accesses don't have to repeat the relatively +expensive hashing operations. + +# Address Overview + +The Address interface provides an abstraction for a spectre address. While the +most common type is a pay-to-pubkey, spectre already supports others and +may well support more in the future. This package currently provides +implementations for the pay-to-pubkey, and pay-to-script-hash address +types. + +To decode/encode an address: + + addrString := "spectre:qqj9fg59mptxkr9j0y53j5mwurcmda5mtza9n6v9pm9uj8h0wgk6uma5pvumr" + defaultPrefix := util.Bech32PrefixSpectre + addr, err := util.DecodeAddress(addrString, defaultPrefix) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(addr.EncodeAddress()) +*/ +package util diff --git a/util/example_test.go b/util/example_test.go new file mode 100644 index 0000000..8d07c29 --- /dev/null +++ b/util/example_test.go @@ -0,0 +1,112 @@ +package util_test + +import ( + "fmt" + "math" + "math/big" + + "github.com/spectre-project/spectred/util/difficulty" + + "github.com/spectre-project/spectred/util" +) + +func ExampleAmount() { + + a := util.Amount(0) + fmt.Println("Zero Sompi:", a) + + a = util.Amount(1e8) + fmt.Println("100,000,000 Sompi:", a) + + a = util.Amount(1e5) + fmt.Println("100,000 Sompi:", a) + // Output: + // Zero Sompi: 0 SPR + // 100,000,000 Sompi: 1 SPR + // 100,000 Sompi: 0.001 SPR +} + +func ExampleNewAmount() { + amountOne, err := util.NewAmount(1) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(amountOne) //Output 1 + + amountFraction, err := util.NewAmount(0.01234567) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(amountFraction) //Output 2 + + amountZero, err := util.NewAmount(0) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(amountZero) //Output 3 + + amountNaN, err := util.NewAmount(math.NaN()) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(amountNaN) //Output 4 + + // Output: 1 SPR + // 0.01234567 SPR + // 0 SPR + // invalid spectre amount +} + +func ExampleAmount_unitConversions() { + amount := util.Amount(44433322211100) + + fmt.Println("Sompi to kSPR:", amount.Format(util.AmountKiloSPR)) + fmt.Println("Sompi to SPR:", amount) + fmt.Println("Sompi to MilliSPR:", amount.Format(util.AmountMilliSPR)) + fmt.Println("Sompi to MicroSPR:", amount.Format(util.AmountMicroSPR)) + fmt.Println("Sompi to Sompi:", amount.Format(util.AmountSompi)) + + // Output: + // Sompi to kSPR: 444.333222111 kSPR + // Sompi to SPR: 444333.222111 SPR + // Sompi to MilliSPR: 444333222.111 mSPR + // Sompi to MicroSPR: 444333222111 μSPR + // Sompi to Sompi: 44433322211100 Sompi +} + +// This example demonstrates how to convert the compact "bits" in a block header +// which represent the target difficulty to a big integer and display it using +// the typical hex notation. +func ExampleCompactToBig() { + bits := uint32(419465580) + targetDifficulty := difficulty.CompactToBig(bits) + + // Display it in hex. + fmt.Printf("%064x\n", targetDifficulty.Bytes()) + + // Output: + // 0000000000000000896c00000000000000000000000000000000000000000000 +} + +// This example demonstrates how to convert a target difficulty into the compact +// "bits" in a block header which represent that target difficulty . +func ExampleBigToCompact() { + // Convert the target difficulty from block 300000 in the bitcoin + // main chain to compact form. + t := "0000000000000000896c00000000000000000000000000000000000000000000" + targetDifficulty, success := new(big.Int).SetString(t, 16) + if !success { + fmt.Println("invalid target difficulty") + return + } + bits := difficulty.BigToCompact(targetDifficulty) + + fmt.Println(bits) + + // Output: + // 419465580 +} diff --git a/util/internal_test.go b/util/internal_test.go new file mode 100644 index 0000000..2d5576c --- /dev/null +++ b/util/internal_test.go @@ -0,0 +1,68 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the util package rather than than the +util_test package so it can bridge access to the internals to properly test +cases which are either not possible or can't reliably be tested via the public +interface. The functions are only exported while the tests are being run. +*/ + +package util + +import ( + "github.com/spectre-project/spectred/util/bech32" + "golang.org/x/crypto/blake2b" +) + +// TstAppDataDir makes the internal appDir function available to the test +// package. +func TstAppDataDir(goos, appName string, roaming bool) string { + return appDir(goos, appName, roaming) +} + +func TstAddressPubKey(prefix Bech32Prefix, hash [PublicKeySize]byte) *AddressPublicKey { + return &AddressPublicKey{ + prefix: prefix, + publicKey: hash, + } +} + +func TstAddressPubKeyECDSA(prefix Bech32Prefix, hash [PublicKeySizeECDSA]byte) *AddressPublicKeyECDSA { + return &AddressPublicKeyECDSA{ + prefix: prefix, + publicKey: hash, + } +} + +// TstAddressScriptHash makes an AddressScriptHash, setting the +// unexported fields with the parameters hash and netID. +func TstAddressScriptHash(prefix Bech32Prefix, hash [blake2b.Size256]byte) *AddressScriptHash { + + return &AddressScriptHash{ + prefix: prefix, + hash: hash, + } +} + +// TstAddressSAddr returns the expected script address bytes for +// P2PK spectre addresses. +func TstAddressSAddrP2PK(addr string) []byte { + _, decoded, _, _ := bech32.Decode(addr) + return decoded[:PublicKeySize] +} + +// TstAddressSAddr returns the expected script address bytes for +// ECDSA P2PK spectre addresses. +func TstAddressSAddrP2PKECDSA(addr string) []byte { + _, decoded, _, _ := bech32.Decode(addr) + return decoded[:PublicKeySizeECDSA] +} + +// TstAddressSAddrP2SH returns the expected script address bytes for +// P2SH spectre addresses. +func TstAddressSAddrP2SH(addr string) []byte { + _, decoded, _, _ := bech32.Decode(addr) + return decoded[:blake2b.Size256] +} diff --git a/util/locks/log.go b/util/locks/log.go new file mode 100644 index 0000000..3884303 --- /dev/null +++ b/util/locks/log.go @@ -0,0 +1,7 @@ +package locks + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var log = logger.RegisterSubSystem("UTIL") diff --git a/util/math/min.go b/util/math/min.go new file mode 100644 index 0000000..edbeac1 --- /dev/null +++ b/util/math/min.go @@ -0,0 +1,25 @@ +package math + +// MinInt returns the smaller of x or y. +func MinInt(x, y int) int { + if x < y { + return x + } + return y +} + +// MaxInt64 returns the bigger of x or y. +func MaxInt64(x, y int64) int64 { + if x > y { + return x + } + return y +} + +// MinUint32 returns the smaller of x or y. +func MinUint32(x, y uint32) uint32 { + if x < y { + return x + } + return y +} diff --git a/util/math/min_test.go b/util/math/min_test.go new file mode 100644 index 0000000..1765b8c --- /dev/null +++ b/util/math/min_test.go @@ -0,0 +1,65 @@ +package math_test + +import ( + "math" + "testing" + + utilMath "github.com/spectre-project/spectred/util/math" +) + +const ( + MaxInt = int(^uint(0) >> 1) + MinInt = -MaxInt - 1 +) + +func TestMinInt(t *testing.T) { + tests := []struct { + inputs [2]int + expected int + }{ + {[2]int{MaxInt, 0}, 0}, + {[2]int{1, 2}, 1}, + {[2]int{MaxInt, MaxInt}, MaxInt}, + {[2]int{MaxInt, MaxInt - 1}, MaxInt - 1}, + {[2]int{MaxInt, MinInt}, MinInt}, + {[2]int{MinInt, 0}, MinInt}, + {[2]int{MinInt, MinInt}, MinInt}, + {[2]int{0, MinInt + 1}, MinInt + 1}, + {[2]int{0, MinInt}, MinInt}, + } + + for i, test := range tests { + result := utilMath.MinInt(test.inputs[0], test.inputs[1]) + if result != test.expected { + t.Fatalf("%d: Expected %d, instead found: %d", i, test.expected, result) + } + reverseResult := utilMath.MinInt(test.inputs[1], test.inputs[0]) + if result != reverseResult { + t.Fatalf("%d: Expected result and reverseResult to be the same, instead: %d!=%d", i, result, reverseResult) + } + } +} + +func TestMinUint32(t *testing.T) { + tests := []struct { + inputs [2]uint32 + expected uint32 + }{ + {[2]uint32{math.MaxUint32, 0}, 0}, + {[2]uint32{1, 2}, 1}, + {[2]uint32{math.MaxUint32, math.MaxUint32}, math.MaxUint32}, + {[2]uint32{math.MaxUint32, math.MaxUint32 - 1}, math.MaxUint32 - 1}, + } + + for _, test := range tests { + result := utilMath.MinUint32(test.inputs[0], test.inputs[1]) + if result != test.expected { + t.Fatalf("Expected %d, instead found: %d", test.expected, result) + + } + reverseResult := utilMath.MinUint32(test.inputs[1], test.inputs[0]) + if result != reverseResult { + t.Fatalf("Expected result and reverseResult to be the same, instead: %d!=%d", result, reverseResult) + } + } +} diff --git a/util/mstime/mstime.go b/util/mstime/mstime.go new file mode 100644 index 0000000..2da74aa --- /dev/null +++ b/util/mstime/mstime.go @@ -0,0 +1,122 @@ +package mstime + +import ( + "github.com/pkg/errors" + "time" +) + +const ( + nanosecondsInMillisecond = int64(time.Millisecond / time.Nanosecond) + millisecondsInSecond = int64(time.Second / time.Millisecond) +) + +// Time is a wrapper for time.Time that guarantees all of its methods will return a millisecond precisioned times. +type Time struct { + time time.Time +} + +// UnixMilliseconds returns t as a Unix time, the number of milliseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixMilliseconds() int64 { + return t.time.UnixNano() / nanosecondsInMillisecond +} + +// UnixSeconds returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixSeconds() int64 { + return t.time.Unix() +} + +// String returns the time formatted using the format string +// +// "2006-01-02 15:04:05.999999999 -0700 MST" +func (t Time) String() string { + return t.time.String() +} + +// Clock returns the hour, minute, and second within the day specified by t. +func (t Time) Clock() (hour, min, sec int) { + return t.time.Clock() +} + +// Millisecond returns the millisecond offset within the second specified by t, +// in the range [0, 999]. +func (t Time) Millisecond() int { + return t.time.Nanosecond() / int(nanosecondsInMillisecond) +} + +// Date returns the year, month, and day in which t occurs. +func (t Time) Date() (year int, month time.Month, day int) { + return t.time.Date() +} + +// After reports whether the time instant t is after u. +func (t Time) After(u Time) bool { + return t.time.After(u.time) +} + +// Before reports whether the time instant t is before u. +func (t Time) Before(u Time) bool { + return t.time.Before(u.time) +} + +// Add returns the time t+d. +// It panics if d has a precision greater than one millisecond (the duration has a non zero microseconds part). +func (t Time) Add(d time.Duration) Time { + validateDurationPrecision(d) + return newMSTime(t.time.Add(d)) +} + +// Sub returns the duration t-u. If the result exceeds the maximum (or minimum) +// value that can be stored in a Duration, the maximum (or minimum) duration +// will be returned. +// To compute t-d for a duration d, use t.Add(-d). +func (t Time) Sub(u Time) time.Duration { + return t.time.Sub(u.time) +} + +// IsZero reports whether t represents the zero time instant, +// January 1, year 1, 00:00:00 UTC. +func (t Time) IsZero() bool { + return t.time.IsZero() +} + +// ToNativeTime converts t to time.Time +func (t Time) ToNativeTime() time.Time { + return t.time +} + +// Now returns the current local time, with precision of one millisecond. +func Now() Time { + return ToMSTime(time.Now()) +} + +// UnixMilliseconds returns the local Time corresponding to the given Unix time, +// ms milliseconds since January 1, 1970 UTC. +func UnixMilliseconds(ms int64) Time { + seconds := ms / millisecondsInSecond + nanoseconds := (ms - seconds*millisecondsInSecond) * nanosecondsInMillisecond + return newMSTime(time.Unix(ms/millisecondsInSecond, nanoseconds)) +} + +// Since returns the time elapsed since t. +// It is shorthand for Now().Sub(t). +func Since(t Time) time.Duration { + return Now().Sub(t) +} + +// ToMSTime converts t to Time. +// See Time for details. +func ToMSTime(t time.Time) Time { + return newMSTime(t.Round(time.Millisecond)) +} + +func newMSTime(t time.Time) Time { + return Time{time: t} +} + +func validateDurationPrecision(d time.Duration) { + if d.Nanoseconds()%nanosecondsInMillisecond != 0 { + panic(errors.Errorf("duration %s has lower precision than millisecond", d)) + } +} diff --git a/util/mstime/mstime_test.go b/util/mstime/mstime_test.go new file mode 100644 index 0000000..cfd5822 --- /dev/null +++ b/util/mstime/mstime_test.go @@ -0,0 +1,74 @@ +package mstime + +import ( + "testing" + "time" +) + +func TestToMSTime(t *testing.T) { + nativeTime1 := time.Unix(100, 5e6+800) + if wantNano, gotNano := int64(100e9+5e6), ToMSTime(nativeTime1).time.UnixNano(); gotNano != wantNano { + t.Fatalf("expected UnixNano %d but got %d", wantNano, gotNano) + } + + nativeTime2 := time.Unix(500, 8e6) + if wantNano, gotNano := int64(500e9+8e6), ToMSTime(nativeTime2).time.UnixNano(); gotNano != wantNano { + t.Fatalf("expected UnixNano %d but got %d", wantNano, gotNano) + } +} + +func TestNow(t *testing.T) { + if Now().time.UnixNano()%1e6 != 0 { + t.Fatalf("Now() has higher precision than one millisecond") + } +} + +func TestAdd(t *testing.T) { + tests := []struct { + unixMilli int64 + duration time.Duration + expectsPanics bool + expectedUnixMilli int64 + }{ + { + unixMilli: 100, + duration: time.Nanosecond, + expectsPanics: true, + }, + { + unixMilli: 100, + duration: time.Second + time.Nanosecond, + expectsPanics: true, + expectedUnixMilli: 1100, + }, + { + unixMilli: 100, + duration: time.Second, + expectsPanics: false, + expectedUnixMilli: 1100, + }, + } + for i, test := range tests { + func() { + defer func() { + r := recover() + if test.expectsPanics && r == nil { + t.Fatalf("test #%d didn't panic when it was expected to", i) + } + if !test.expectsPanics && r != nil { + t.Fatalf("test #%d panicked when it was not expected to", i) + } + }() + mtime := UnixMilliseconds(100).Add(test.duration) + if mtime.UnixMilliseconds() != test.expectedUnixMilli { + t.Fatalf("test #%d expected UnixMilliseconds to be %d but got %d", i, test.expectedUnixMilli, mtime.UnixMilliseconds()) + } + }() + } + defer func() { + if r := recover(); r == nil { + t.Fatalf("Add didn't panic when ") + } + }() + UnixMilliseconds(100).Add(time.Nanosecond) +} diff --git a/util/network/network.go b/util/network/network.go new file mode 100644 index 0000000..bc7c61d --- /dev/null +++ b/util/network/network.go @@ -0,0 +1,52 @@ +package network + +import ( + "net" +) + +// NormalizeAddresses returns a new slice with all the passed peer addresses +// normalized with the given default port, and all duplicates removed. +func NormalizeAddresses(addrs []string, defaultPort string) ([]string, error) { + for i, addr := range addrs { + var err error + addrs[i], err = NormalizeAddress(addr, defaultPort) + if err != nil { + return nil, err + } + } + + return removeDuplicateAddresses(addrs), nil +} + +// NormalizeAddress returns addr with the passed default port appended if +// there is not already a port specified. +func NormalizeAddress(addr, defaultPort string) (string, error) { + _, _, err := net.SplitHostPort(addr) + // net.SplitHostPort returns an error if the given host is missing a + // port, but theoretically it can return an error for other reasons, + // and this is why we check addrWithPort for validity. + if err != nil { + addrWithPort := net.JoinHostPort(addr, defaultPort) + _, _, err := net.SplitHostPort(addrWithPort) + if err != nil { + return "", err + } + + return addrWithPort, nil + } + return addr, nil +} + +// removeDuplicateAddresses returns a new slice with all duplicate entries in +// addrs removed. +func removeDuplicateAddresses(addrs []string) []string { + result := make([]string, 0, len(addrs)) + seen := map[string]struct{}{} + for _, val := range addrs { + if _, ok := seen[val]; !ok { + result = append(result, val) + seen[val] = struct{}{} + } + } + return result +} diff --git a/util/network/parse_listeners.go b/util/network/parse_listeners.go new file mode 100644 index 0000000..ab798c3 --- /dev/null +++ b/util/network/parse_listeners.go @@ -0,0 +1,82 @@ +package network + +import ( + "net" + "runtime" + "strings" + + "github.com/pkg/errors" +) + +// simpleAddr implements the net.Addr interface with two struct fields +type simpleAddr struct { + net, addr string +} + +// String returns the address. +// +// This is part of the net.Addr interface. +func (a simpleAddr) String() string { + return a.addr +} + +// Network returns the network. +// +// This is part of the net.Addr interface. +func (a simpleAddr) Network() string { + return a.net +} + +// Ensure simpleAddr implements the net.Addr interface. +var _ net.Addr = simpleAddr{} + +// ParseListeners determines whether each listen address is IPv4 and IPv6 and +// returns a slice of appropriate net.Addrs to listen on with TCP. It also +// properly detects addresses which apply to "all interfaces" and adds the +// address as both IPv4 and IPv6. +func ParseListeners(addrs []string) ([]net.Addr, error) { + netAddrs := make([]net.Addr, 0, len(addrs)*2) + for _, addr := range addrs { + host, _, err := net.SplitHostPort(addr) + if err != nil { + // Shouldn't happen due to already being normalized. + return nil, err + } + + // Empty host or host of * on plan9 is both IPv4 and IPv6. + if host == "" || (host == "*" && runtime.GOOS == "plan9") { + netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) + netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) + continue + } + + // Strip IPv6 zone id if present since net.ParseIP does not + // handle it. + zoneIndex := strings.LastIndex(host, "%") + if zoneIndex > 0 { + host = host[:zoneIndex] + } + + // Parse the IP. + ip := net.ParseIP(host) + if ip == nil { + hostAddrs, err := net.LookupHost(host) + if err != nil { + return nil, err + } + ip = net.ParseIP(hostAddrs[0]) + if ip == nil { + return nil, errors.Errorf("Cannot resolve IP address for host '%s'", host) + } + } + + // To4 returns nil when the IP is not an IPv4 address, so use + // this determine the address type. + if ip.To4() == nil { + netAddrs = append(netAddrs, simpleAddr{net: "tcp6", addr: addr}) + } else { + netAddrs = append(netAddrs, simpleAddr{net: "tcp4", addr: addr}) + } + } + return netAddrs, nil +} diff --git a/util/panics/log.go b/util/panics/log.go new file mode 100644 index 0000000..85b3e59 --- /dev/null +++ b/util/panics/log.go @@ -0,0 +1,7 @@ +package panics + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var utilLog = logger.RegisterSubSystem("UTIL") diff --git a/util/panics/panics.go b/util/panics/panics.go new file mode 100644 index 0000000..58c73d0 --- /dev/null +++ b/util/panics/panics.go @@ -0,0 +1,85 @@ +package panics + +import ( + "fmt" + "os" + "runtime/debug" + "sync/atomic" + "time" + + "github.com/spectre-project/spectred/infrastructure/logger" +) + +const exitHandlerTimeout = 5 * time.Second + +// HandlePanic recovers panics and then initiates a clean shutdown. +func HandlePanic(log *logger.Logger, goroutineName string, goroutineStackTrace []byte) { + err := recover() + if err == nil { + return + } + + reason := fmt.Sprintf("Fatal error in goroutine `%s`: %+v", goroutineName, err) + exit(log, reason, debug.Stack(), goroutineStackTrace) +} + +var goroutineLastID uint64 + +// GoroutineWrapperFunc returns a goroutine wrapper function that handles panics and writes them to the log. +func GoroutineWrapperFunc(log *logger.Logger) func(name string, spawnedFunction func()) { + return func(name string, f func()) { + stackTrace := debug.Stack() + go func() { + handleSpawnedFunction(log, stackTrace, name, f) + }() + } +} + +// AfterFuncWrapperFunc returns a time.AfterFunc wrapper function that handles panics. +func AfterFuncWrapperFunc(log *logger.Logger) func(name string, d time.Duration, f func()) *time.Timer { + return func(name string, d time.Duration, f func()) *time.Timer { + stackTrace := debug.Stack() + return time.AfterFunc(d, func() { + handleSpawnedFunction(log, stackTrace, name, f) + }) + } +} + +// Exit prints the given reason to log and initiates a clean shutdown. +func Exit(log *logger.Logger, reason string) { + exit(log, reason, nil, nil) +} + +// Exit prints the given reason, prints either of the given stack traces (if not nil), +// waits for them to finish writing, and exits. +func exit(log *logger.Logger, reason string, currentThreadStackTrace []byte, goroutineStackTrace []byte) { + exitHandlerDone := make(chan struct{}) + go func() { + log.Criticalf("Exiting: %s", reason) + if goroutineStackTrace != nil { + log.Criticalf("Goroutine stack trace: %s", goroutineStackTrace) + } + if currentThreadStackTrace != nil { + log.Criticalf("Stack trace: %s", currentThreadStackTrace) + } + log.Backend().Close() + close(exitHandlerDone) + }() + + select { + case <-time.After(exitHandlerTimeout): + fmt.Fprintln(os.Stderr, "Couldn't exit gracefully.") + case <-exitHandlerDone: + } + fmt.Println("Exiting...") + os.Exit(1) +} + +func handleSpawnedFunction(log *logger.Logger, stackTrace []byte, spawnedFunctionName string, spawnedFunction func()) { + goroutineID := atomic.AddUint64(&goroutineLastID, 1) + goroutineName := fmt.Sprintf("%s %d", spawnedFunctionName, goroutineID) + utilLog.Tracef("Started goroutine `%s`", goroutineName) + defer utilLog.Tracef("Ended goroutine `%s`", goroutineName) + defer HandlePanic(log, goroutineName, stackTrace) + spawnedFunction() +} diff --git a/util/profiling/profiling.go b/util/profiling/profiling.go new file mode 100644 index 0000000..1a6580b --- /dev/null +++ b/util/profiling/profiling.go @@ -0,0 +1,79 @@ +package profiling + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/spectre-project/spectred/infrastructure/logger" + + // Required for profiling + _ "net/http/pprof" + + "runtime" + "runtime/pprof" + + "github.com/spectre-project/spectred/util/panics" +) + +// heapDumpFileName is the name of the heap dump file. We want every run to have its own +// file, so we append the timestamp of the program launch time to the file name (note the +// custom format for compliance with file name rules on all OSes). +var heapDumpFileName = fmt.Sprintf("heap-%s.pprof", time.Now().Format("01-02-2006T15.04.05")) + +// Start starts the profiling server +func Start(port string, log *logger.Logger) { + spawn := panics.GoroutineWrapperFunc(log) + spawn("profiling.Start", func() { + listenAddr := net.JoinHostPort("", port) + log.Infof("Profile server listening on %s", listenAddr) + profileRedirect := http.RedirectHandler("/debug/pprof", http.StatusSeeOther) + http.Handle("/", profileRedirect) + log.Error(http.ListenAndServe(listenAddr, nil)) + }) +} + +// TrackHeap tracks the size of the heap and dumps a profile if it passes a limit +func TrackHeap(appDir string, log *logger.Logger) { + spawn := panics.GoroutineWrapperFunc(log) + spawn("profiling.TrackHeap", func() { + dumpFolder := filepath.Join(appDir, "dumps") + err := os.MkdirAll(dumpFolder, 0700) + if err != nil { + log.Errorf("Could not create heap dumps folder at %s", dumpFolder) + return + } + const limitInGigabytes = 7 // We want to support 8 GB RAM, so we profile at 7 + trackHeapSize(limitInGigabytes*1024*1024*1024, dumpFolder, log) + }) +} + +func trackHeapSize(heapLimit uint64, dumpFolder string, log *logger.Logger) { + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for range ticker.C { + memStats := &runtime.MemStats{} + runtime.ReadMemStats(memStats) + // If we passed the expected heap limit, dump the heap profile to a file + if memStats.HeapAlloc > heapLimit { + dumpHeapProfile(heapLimit, dumpFolder, memStats, log) + } + } +} + +func dumpHeapProfile(heapLimit uint64, dumpFolder string, memStats *runtime.MemStats, log *logger.Logger) { + heapFile := filepath.Join(dumpFolder, heapDumpFileName) + log.Infof("Saving heap statistics into %s (HeapAlloc=%d > %d=heapLimit)", heapFile, memStats.HeapAlloc, heapLimit) + f, err := os.Create(heapFile) + defer f.Close() + if err != nil { + log.Infof("Could not create heap profile: %s", err) + return + } + if err := pprof.WriteHeapProfile(f); err != nil { + log.Infof("Could not write heap profile: %s", err) + } +} diff --git a/util/random/random.go b/util/random/random.go new file mode 100644 index 0000000..78d3cf9 --- /dev/null +++ b/util/random/random.go @@ -0,0 +1,16 @@ +package random + +import ( + "crypto/rand" + "encoding/binary" +) + +// Uint64 returns a cryptographically random uint64 value. +func Uint64() (uint64, error) { + var buf [8]byte + _, err := rand.Read(buf[:]) + if err != nil { + return 0, err + } + return binary.LittleEndian.Uint64(buf[:]), nil +} diff --git a/util/random/random_test.go b/util/random/random_test.go new file mode 100644 index 0000000..db1d6cd --- /dev/null +++ b/util/random/random_test.go @@ -0,0 +1,76 @@ +package random + +import ( + "crypto/rand" + "fmt" + "github.com/pkg/errors" + "io" + "testing" +) + +// fakeRandReader implements the io.Reader interface and is used to force +// errors in the RandomUint64 function. +type fakeRandReader struct { + n int + err error +} + +// Read returns the fake reader error and the lesser of the fake reader value +// and the length of p. +func (r *fakeRandReader) Read(p []byte) (int, error) { + n := r.n + if n > len(p) { + n = len(p) + } + return n, r.err +} + +// TestRandomUint64 exercises the randomness of the random number generator on +// the system by ensuring the probability of the generated numbers. If the RNG +// is evenly distributed as a proper cryptographic RNG should be, there really +// should only be 1 number < 2^56 in 2^8 tries for a 64-bit number. However, +// use a higher number of 5 to really ensure the test doesn't fail unless the +// RNG is just horrendous. +func TestRandomUint64(t *testing.T) { + tries := 1 << 8 // 2^8 + watermark := uint64(1 << 56) // 2^56 + maxHits := 5 + + numHits := 0 + for i := 0; i < tries; i++ { + nonce, err := Uint64() + if err != nil { + t.Errorf("RandomUint64 iteration %d failed - err %v", + i, err) + return + } + if nonce < watermark { + numHits++ + } + if numHits > maxHits { + str := fmt.Sprintf("The random number generator on this system is clearly "+ + "terrible since we got %d values less than %d in %d runs "+ + "when only %d was expected", numHits, watermark, tries, maxHits) + t.Errorf("Random Uint64 iteration %d failed - %v %v", i, + str, numHits) + return + } + } +} + +// TestRandomUint64Errors uses a fake reader to force error paths to be executed +// and checks the results accordingly. +func TestRandomUint64Errors(t *testing.T) { + // Test short reads. + reader := rand.Reader + rand.Reader = &fakeRandReader{n: 2, err: io.EOF} + nonce, err := Uint64() + if !errors.Is(err, io.ErrUnexpectedEOF) { + t.Errorf("Error not expected value of %v [%v]", + io.ErrUnexpectedEOF, err) + } + if nonce != 0 { + t.Errorf("Nonce is not 0 [%v]", nonce) + } + rand.Reader = reader +} diff --git a/util/staging/commit_all_changes.go b/util/staging/commit_all_changes.go new file mode 100644 index 0000000..d3b30ac --- /dev/null +++ b/util/staging/commit_all_changes.go @@ -0,0 +1,33 @@ +package staging + +import ( + "sync/atomic" + + "github.com/spectre-project/spectred/domain/consensus/model" + "github.com/spectre-project/spectred/infrastructure/logger" +) + +// CommitAllChanges creates a transaction in `databaseContext`, and commits all changes in `stagingArea` through it. +func CommitAllChanges(databaseContext model.DBManager, stagingArea *model.StagingArea) error { + onEnd := logger.LogAndMeasureExecutionTime(utilLog, "commitAllChanges") + defer onEnd() + + dbTx, err := databaseContext.Begin() + if err != nil { + return err + } + + err = stagingArea.Commit(dbTx) + if err != nil { + return err + } + + return dbTx.Commit() +} + +var lastShardingID uint64 + +// GenerateShardingID generates a unique staging sharding ID. +func GenerateShardingID() model.StagingShardID { + return model.StagingShardID(atomic.AddUint64(&lastShardingID, 1)) +} diff --git a/util/staging/log.go b/util/staging/log.go new file mode 100644 index 0000000..ebf2e2d --- /dev/null +++ b/util/staging/log.go @@ -0,0 +1,7 @@ +package staging + +import ( + "github.com/spectre-project/spectred/infrastructure/logger" +) + +var utilLog = logger.RegisterSubSystem("UTIL") diff --git a/util/txmass/calculator.go b/util/txmass/calculator.go new file mode 100644 index 0000000..dd6d9fc --- /dev/null +++ b/util/txmass/calculator.go @@ -0,0 +1,119 @@ +package txmass + +import ( + "github.com/spectre-project/spectred/domain/consensus/model/externalapi" + "github.com/spectre-project/spectred/domain/consensus/utils/transactionhelper" +) + +// Calculator exposes methods to calculate the mass of a transaction +type Calculator struct { + massPerTxByte uint64 + massPerScriptPubKeyByte uint64 + massPerSigOp uint64 +} + +// NewCalculator creates a new instance of Calculator +func NewCalculator(massPerTxByte, massPerScriptPubKeyByte, massPerSigOp uint64) *Calculator { + return &Calculator{ + massPerTxByte: massPerTxByte, + massPerScriptPubKeyByte: massPerScriptPubKeyByte, + massPerSigOp: massPerSigOp, + } +} + +// MassPerTxByte returns the mass per transaction byte configured for this Calculator +func (c *Calculator) MassPerTxByte() uint64 { return c.massPerTxByte } + +// MassPerScriptPubKeyByte returns the mass per ScriptPublicKey byte configured for this Calculator +func (c *Calculator) MassPerScriptPubKeyByte() uint64 { return c.massPerScriptPubKeyByte } + +// MassPerSigOp returns the mass per SigOp byte configured for this Calculator +func (c *Calculator) MassPerSigOp() uint64 { return c.massPerSigOp } + +// CalculateTransactionMass calculates the mass of the given transaction +func (c *Calculator) CalculateTransactionMass(transaction *externalapi.DomainTransaction) uint64 { + if transactionhelper.IsCoinBase(transaction) { + return 0 + } + + // calculate mass for size + size := transactionEstimatedSerializedSize(transaction) + massForSize := size * c.massPerTxByte + + // calculate mass for scriptPubKey + totalScriptPubKeySize := uint64(0) + for _, output := range transaction.Outputs { + totalScriptPubKeySize += 2 //output.ScriptPublicKey.Version (uint16) + totalScriptPubKeySize += uint64(len(output.ScriptPublicKey.Script)) + } + massForScriptPubKey := totalScriptPubKeySize * c.massPerScriptPubKeyByte + + // calculate mass for SigOps + totalSigOpCount := uint64(0) + for _, input := range transaction.Inputs { + totalSigOpCount += uint64(input.SigOpCount) + } + massForSigOps := totalSigOpCount * c.massPerSigOp + + // Sum all components of mass + return massForSize + massForScriptPubKey + massForSigOps +} + +// transactionEstimatedSerializedSize is the estimated size of a transaction in some +// serialization. This has to be deterministic, but not necessarily accurate, since +// it's only used as the size component in the transaction and block mass limit +// calculation. +func transactionEstimatedSerializedSize(tx *externalapi.DomainTransaction) uint64 { + if transactionhelper.IsCoinBase(tx) { + return 0 + } + size := uint64(0) + size += 2 // Txn Version + size += 8 // number of inputs (uint64) + for _, input := range tx.Inputs { + size += transactionInputEstimatedSerializedSize(input) + } + + size += 8 // number of outputs (uint64) + for _, output := range tx.Outputs { + size += TransactionOutputEstimatedSerializedSize(output) + } + + size += 8 // lock time (uint64) + size += externalapi.DomainSubnetworkIDSize + size += 8 // gas (uint64) + size += externalapi.DomainHashSize // payload hash + + size += 8 // length of the payload (uint64) + size += uint64(len(tx.Payload)) + + return size +} + +func transactionInputEstimatedSerializedSize(input *externalapi.DomainTransactionInput) uint64 { + size := uint64(0) + size += outpointEstimatedSerializedSize() + + size += 8 // length of signature script (uint64) + size += uint64(len(input.SignatureScript)) + + size += 8 // sequence (uint64) + return size +} + +func outpointEstimatedSerializedSize() uint64 { + size := uint64(0) + size += externalapi.DomainHashSize // ID + size += 4 // index (uint32) + return size +} + +// TransactionOutputEstimatedSerializedSize is the same as transactionEstimatedSerializedSize but for outputs only +func TransactionOutputEstimatedSerializedSize(output *externalapi.DomainTransactionOutput) uint64 { + size := uint64(0) + size += 8 // value (uint64) + size += 2 // output.ScriptPublicKey.Version (uint 16) + size += 8 // length of script public key (uint64) + size += uint64(len(output.ScriptPublicKey.Script)) + return size +} diff --git a/version/version.go b/version/version.go new file mode 100644 index 0000000..13f0253 --- /dev/null +++ b/version/version.go @@ -0,0 +1,52 @@ +package version + +import ( + "fmt" + "strings" +) + +// validCharacters is a list of characters valid in the appBuild string +const validCharacters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" + +const ( + appMajor uint = 0 + appMinor uint = 3 + appPatch uint = 14 +) + +// appBuild is defined as a variable so it can be overridden during the build +// process with '-ldflags "-X github.com/spectre-project/spectred/version.appBuild=foo"' if needed. +// It MUST only contain characters from validCharacters. +var appBuild string + +var version = "" // string used for memoization of version + +func init() { + if version == "" { + // Start with the major, minor, and patch versions. + version = fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch) + + // Append build metadata if there is any. + // Panic if any invalid characters are encountered + if appBuild != "" { + checkAppBuild(appBuild) + + version = fmt.Sprintf("%s-%s", version, appBuild) + } + } +} + +// Version returns the application version as a properly formed string +func Version() string { + return version +} + +// checkAppBuild verifies that appBuild does not contain any characters outside of validCharacters. +// In case of any invalid characters checkAppBuild panics +func checkAppBuild(appBuild string) { + for _, r := range appBuild { + if !strings.ContainsRune(validCharacters, r) { + panic(fmt.Errorf("appBuild string (%s) contains forbidden characters. Only alphanumeric characters and dashes are allowed", appBuild)) + } + } +}