Skip to content

Commit

Permalink
feat: integration tests #440 #452 (#467)
Browse files Browse the repository at this point in the history
feat: added integration tests, new image in the github actions pipeline, documentation #440
  • Loading branch information
brandonpille authored Oct 5, 2022
1 parent 409e97a commit 38d91f4
Show file tree
Hide file tree
Showing 11 changed files with 1,617 additions and 16 deletions.
7 changes: 6 additions & 1 deletion .github/workflows/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@
This page contains some documentation on the workflows for this repository.

## build_test
The workflow build_test is used to build and test the code (see build_test.yaml). We are using a custom docker image for building and testing the code. You can find the image on our [Docker Hub](https://hub.docker.com/repository/docker/threefolddev/tfchain). The dockerfile build_test.Dockerfile was used to build that image. If the image no longer meets the expectations please follow these steps:
The workflow build_test is used to build and test the code (see build_test.yaml). Notice that the binaries are being cached so that the build process is sped up. Once the binaries are build the pipeline will run both the unit tests and the integration tests. This can take up to 30 minutes. The pipeline is ran on every commit to a PR and also when the PR has been merged with development. PRs should only be merged if the pipeline was green (if all tests passed).

For performance reasons we are using a self hosted runner for running the pipeline. The runner will only run one pipeline at a time which means that all other runs will be queued. As the pipeline is ran on every commit it will thus also queue runs of consecutive pushed commits. We strongly advice to add `[skip ci]` to the commit messages whenever possible (when the run of a pipeline can be skipped). A pipeline can also be canceled [here](https://github.com/threefoldtech/tfchain/actions).

### Docker image
We are using a custom docker image for building and testing the code. You can find the image on our [Docker Hub](https://hub.docker.com/repository/docker/threefolddev/tfchain). The dockerfile build_test.Dockerfile was used to build that image. If the image no longer meets the expectations please follow these steps:

1) Update the dockerfile as required (add what you need)
2) Build the new image (execute the comment with .github/workflows as working directory and make sure to increment the version):
Expand Down
19 changes: 11 additions & 8 deletions .github/workflows/build_test.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive
COPY clean_disk_space.sh clean_disk_space.sh
RUN apt-get update && \
apt-get install -y \
RUN apt update && \
apt install -y \
build-essential \
clang \
cmake \
Expand All @@ -12,16 +11,20 @@ RUN apt-get update && \
libclang-dev \
lld \
lldb \
python3 \
python3-pip \
software-properties-common \
tar \
zstd && \
add-apt-repository ppa:deadsnakes/ppa && \
apt install -y python3.10 && \
curl https://bootstrap.pypa.io/get-pip.py > get-pip.py && \
python3.10 get-pip.py && \
rm -rf get-pip.py && \
curl https://sh.rustup.rs -sSf | sh -s -- -y && \
$HOME/.cargo/bin/rustup install nightly-2022-05-11 && \
# cleanup image
rm -rf /var/lib/apt/lists/* && \
apt-get clean && \
apt-get autoclean && \
apt-get autoremove && \
apt -y clean && \
apt -y autoclean && \
apt -y autoremove && \
rm -rf /tmp/*
RUN /bin/bash
12 changes: 10 additions & 2 deletions .github/workflows/build_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,16 @@ jobs:
build-and-test:
runs-on: [self-hosted, poc]
container:
image: threefolddev/tfchain:0
image: threefolddev/tfchain:1
env:
DEBIAN_FRONTEND: noninteractive
steps:
- uses: actions/checkout@v3

- name: Cache build
uses: actions/cache@v3
timeout-minutes: 6
continue-on-error: true
with:
path: |
~/.cargo/bin/
Expand All @@ -26,7 +28,6 @@ jobs:
key: ${{ runner.os }}-tfchain-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-tfchain-cargo-


- name: Build
run: |
cd substrate-node
Expand All @@ -39,3 +40,10 @@ jobs:
$HOME/.cargo/bin/cargo +nightly-2022-05-11 test --no-fail-fast
cd pallets
$HOME/.cargo/bin/cargo +nightly-2022-05-11 test --no-fail-fast
- name: Integration tests
run: |
python3.10 -m pip install robotframework cryptography substrate-interface
cd substrate-node/tests
robot -d _output_tests/ .
11 changes: 10 additions & 1 deletion substrate-node/node/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ use tfchain_runtime::opaque::SessionKeys;
use tfchain_runtime::{
AccountId, AuraConfig, BalancesConfig, CouncilConfig, CouncilMembershipConfig, GenesisConfig,
GrandpaConfig, SessionConfig, Signature, SudoConfig, SystemConfig, TFTBridgeModuleConfig,
TFTPriceModuleConfig, TfgridModuleConfig, ValidatorSetConfig, WASM_BINARY,
SmartContractModuleConfig, TFTPriceModuleConfig, TfgridModuleConfig, ValidatorSetConfig,
WASM_BINARY,
};

// The URL for the telemetry server.
Expand Down Expand Up @@ -121,6 +122,8 @@ pub fn development_config() -> Result<ChainSpec, String> {
10,
// TFT price pallet max price
1000,
// billing frequency
10
)
},
// Bootnodes
Expand Down Expand Up @@ -209,6 +212,8 @@ pub fn local_testnet_config() -> Result<ChainSpec, String> {
10,
// TFT price pallet max price
1000,
// billing frequency
5
)
},
// Bootnodes
Expand Down Expand Up @@ -239,6 +244,7 @@ fn testnet_genesis(
tft_price_allowed_account: AccountId,
min_tft_price: u32,
max_tft_price: u32,
billing_frequency: u64
) -> GenesisConfig {
GenesisConfig {
system: SystemConfig {
Expand Down Expand Up @@ -329,5 +335,8 @@ fn testnet_genesis(
min_tft_price,
max_tft_price,
},
smart_contract_module: SmartContractModuleConfig {
billing_frequency: billing_frequency
},
}
}
33 changes: 31 additions & 2 deletions substrate-node/pallets/pallet-smart-contract/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,13 @@ pub mod pallet {
#[pallet::getter(fn pallet_version)]
pub type PalletVersion<T> = StorageValue<_, types::StorageVersion, ValueQuery>;

#[pallet::type_value]
pub fn DefaultBillingFrequency<T: Config>() -> u64 { T::BillingFrequency::get() }

#[pallet::storage]
#[pallet::getter(fn billing_frequency)]
pub type BillingFrequency<T> = StorageValue<_, u64, ValueQuery, DefaultBillingFrequency<T>>;

#[pallet::config]
pub trait Config:
frame_system::Config
Expand Down Expand Up @@ -297,6 +304,28 @@ pub mod pallet {
SolutionProviderNotApproved,
}

#[pallet::genesis_config]
pub struct GenesisConfig {
pub billing_frequency: u64,
}

// The default value for the genesis config type.
#[cfg(feature = "std")]
impl Default for GenesisConfig {
fn default() -> Self {
Self {
billing_frequency: 600,
}
}
}

#[pallet::genesis_build]
impl<T: Config> GenesisBuild<T> for GenesisConfig {
fn build(&self) {
BillingFrequency::<T>::put(self.billing_frequency);
}
}

#[pallet::call]
impl<T: Config> Pallet<T> {
#[pallet::weight(10_000 + T::DbWeight::get().writes(1))]
Expand Down Expand Up @@ -901,7 +930,7 @@ impl<T: Config> Pallet<T> {
pallet_tfgrid::Twins::<T>::get(contract.twin_id).ok_or(Error::<T>::TwinNotExists)?;
let usable_balance = Self::get_usable_balance(&twin.account_id);

let mut seconds_elapsed = T::BillingFrequency::get() * 6;
let mut seconds_elapsed = BillingFrequency::<T>::get() * 6;
// Calculate amount of seconds elapsed based on the contract lock struct

let now = <timestamp::Pallet<T>>::get().saturated_into::<u64>() / 1000;
Expand Down Expand Up @@ -1324,7 +1353,7 @@ impl<T: Config> Pallet<T> {

let now = <frame_system::Pallet<T>>::block_number().saturated_into::<u64>();
// Save the contract to be billed in now + BILLING_FREQUENCY_IN_BLOCKS
let future_block = now + T::BillingFrequency::get();
let future_block = now + BillingFrequency::<T>::get();
let mut contracts = ContractsToBillAt::<T>::get(future_block);
contracts.push(contract_id);
ContractsToBillAt::<T>::insert(future_block, &contracts);
Expand Down
2 changes: 1 addition & 1 deletion substrate-node/pallets/pallet-tfgrid/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1134,7 +1134,7 @@ pub mod pallet {

ensure!(
NodeIdByTwinID::<T>::contains_key(twin_id),
Error::<T>::TwinNotExists
Error::<T>::NodeNotExists
);
let node_id = NodeIdByTwinID::<T>::get(twin_id);

Expand Down
2 changes: 1 addition & 1 deletion substrate-node/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -708,7 +708,7 @@ construct_runtime!(
Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent},
TfgridModule: pallet_tfgrid::{Pallet, Call, Storage, Event<T>, Config<T>},
SmartContractModule: pallet_smart_contract::{Pallet, Call, Storage, Event<T>},
SmartContractModule: pallet_smart_contract::{Pallet, Call, Config, Storage, Event<T>},
TFTBridgeModule: pallet_tft_bridge::{Pallet, Call, Config<T>, Storage, Event<T>},
TFTPriceModule: pallet_tft_price::{Pallet, Call, Storage, Config<T>, Event<T>},
Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event<T>},
Expand Down
185 changes: 185 additions & 0 deletions substrate-node/tests/SubstrateNetwork.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
import argparse
from datetime import datetime
import logging
import os
from os.path import dirname, isdir, isfile, join
import re
from shutil import rmtree
import signal
import subprocess
from substrateinterface import SubstrateInterface, Keypair
import tempfile
import time


SUBSTRATE_NODE_DIR = dirname(os.getcwd())
TFCHAIN_EXE = join(SUBSTRATE_NODE_DIR, "target", "release", "tfchain")

RE_NODE_STARTED = re.compile("Running JSON-RPC WS server")

TIMEOUT_STARTUP_IN_SECONDS = 600
TIMEOUT_TERMINATE_IN_SECONDS = 1

OUTPUT_TESTS = os.environ.get(
"TEST_OUTPUT_DIR", join(os.getcwd(), "_output_tests"))

PREDEFINED_KEYS = {
"Alice": Keypair.create_from_uri("//Alice"),
"Bob": Keypair.create_from_uri("//Bob"),
"Charlie": Keypair.create_from_uri("//Charlie"),
"Dave": Keypair.create_from_uri("//Dave"),
"Eve": Keypair.create_from_uri("//Eve"),
"Ferdie": Keypair.create_from_uri("//Ferdie")
}


def wait_till_node_ready(log_file: str, timeout_in_seconds=TIMEOUT_STARTUP_IN_SECONDS):
start = datetime.now()
while True:
elapsed = datetime.now() - start

if elapsed.total_seconds() >= TIMEOUT_STARTUP_IN_SECONDS:
raise Exception(f"Timeout on starting the node! See {log_file}")

with open(log_file, "r") as fd:
for line in reversed(fd.readlines()):
if RE_NODE_STARTED.search(line):
return

def setup_offchain_workers(port: int, worker_tft: str = "Alice", worker_smct: str = "Bob"):
logging.info("Setting up offchain workers")
substrate = SubstrateInterface(url=f"ws://127.0.0.1:{port}", ss58_format=42, type_registry_preset='polkadot')

insert_key_params = [
"tft!", f"//{worker_tft}", PREDEFINED_KEYS[worker_tft].public_key.hex()]
substrate.rpc_request("author_insertKey", insert_key_params)

insert_key_params = [
"smct", f"//{worker_smct}", PREDEFINED_KEYS[worker_smct].public_key.hex()]
substrate.rpc_request("author_insertKey", insert_key_params)

def execute_command(cmd: list, log_file: str | None = None):
if log_file is None:
log_file = tempfile.mktemp()

dir_of_log_file = dirname(log_file)
if not isdir(dir_of_log_file):
os.makedirs(dir_of_log_file)

fd = open(log_file, 'w')
logging.info("Running command\n\t> %s\nand saving output in file %s",
" ".join([f"{arg}" for arg in cmd]), log_file)
p = subprocess.Popen(cmd, stdout=fd, stderr=fd)

return p, fd


def run_node(log_file: str, base_path: str, predefined_account: str, port: int, ws_port: int, rpc_port: int, node_key: str | None = None, bootnodes: str | None = None):
logging.info("Starting node with logfile %s", log_file)

if not isfile(TFCHAIN_EXE):
raise Exception(
f"Executable {TFCHAIN_EXE} doesn't exist! Did you build the code?")

cmd = [TFCHAIN_EXE,
"--base-path", f"{base_path}",
"--chain", "local",
f"--{predefined_account.lower()}",
"--port", f"{port}",
"--ws-port", f"{ws_port}",
"--rpc-port", f"{rpc_port}",
"--telemetry-url", "wss://telemetry.polkadot.io/submit/ 0",
"--validator",
"--rpc-methods", "Unsafe",
"--rpc-cors", "all"
]

if node_key is not None:
cmd.extend(["--node-key", f"{node_key}"])

if bootnodes is not None:
cmd.extend(["--bootnodes", f"{bootnodes}"])

rmtree(base_path, ignore_errors=True)

return execute_command(cmd, log_file)


class SubstrateNetwork:
def __init__(self):
self._nodes = {}

def __del__(self):
if len(self._nodes) > 0:
self.tear_down_multi_node_network()

def setup_multi_node_network(self, log_name: str = "", amt: int = 2):
assert amt >= 2, "more then 2 nodes required for a multi node network"
assert amt <= len(PREDEFINED_KEYS), "maximum amount of nodes reached"

output_dir_network = join(OUTPUT_TESTS, log_name)

rmtree(output_dir_network, ignore_errors=True)

port = 30333
ws_port = 9945
rpc_port = 9933
log_file_alice = join(output_dir_network, "node_alice.log")
self._nodes["alice"] = run_node(log_file_alice, "/tmp/alice", "alice", port, ws_port,
rpc_port, node_key="0000000000000000000000000000000000000000000000000000000000000001")
wait_till_node_ready(log_file_alice)
setup_offchain_workers(ws_port)

log_file = ""
for x in range(1, amt):
port += 1
ws_port += 1
rpc_port += 1
name = list(PREDEFINED_KEYS.keys())[x].lower()
log_file = join(output_dir_network, f"node_{name}.log")
self._nodes[name] = run_node(log_file, f"/tmp/{name}", name, port, ws_port, rpc_port, node_key=None,
bootnodes="/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp")
wait_till_node_ready(log_file)
setup_offchain_workers(ws_port)

logging.info("Network is up and running.")

def tear_down_multi_node_network(self):
for (account, (process, log_file)) in self._nodes.items():
logging.info("Terminating node %s", account)
process.terminate()
process.wait(timeout=TIMEOUT_TERMINATE_IN_SECONDS)
process.kill()
logging.info("Node for %s has terminated.", account)
if log_file is not None:
log_file.close()
self._nodes = {}
logging.info("Teardown network completed!")


def main():
parser = argparse.ArgumentParser(
description="This tool allows you to start a multi node network.")

parser.add_argument("--amount", required=False, type=int, default=2,
help=f"The amount of nodes to start. Should be minimum 2 and maximum {len(PREDEFINED_KEYS)}")
args = parser.parse_args()

logging.basicConfig(
format="%(asctime)s %(levelname)s %(message)s", level=logging.DEBUG)

network = SubstrateNetwork()
network.setup_multi_node_network(args.amount)

def handler(signum, frame):
network.tear_down_multi_node_network()
exit(0)

signal.signal(signal.SIGINT, handler)
logging.info("Press Ctrl-c to teardown the network.")
while True:
time.sleep(0.1)


if __name__ == "__main__":
main()
Loading

0 comments on commit 38d91f4

Please sign in to comment.