Compare commits

...

6 Commits

Author SHA1 Message Date
MiniFrenchBread
1e4ab01bc0
Merge 605a71e826 into f2abb98d6c 2024-11-04 11:02:01 +00:00
MiniFrenchBread
605a71e826 test: tx
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-11-04 19:01:53 +08:00
MiniFrenchBread
1602c96a40 test: query 2024-11-04 16:20:59 +08:00
Solovyov1796
f2abb98d6c
Merge pull request #85 from Solovyov1796/dev2
fix: bump cosmos-sdk to enable logger for store and pruning
2024-10-27 18:29:20 +08:00
Solovyov1796
77bfade203 fix: bump cosmos-sdk to enable logger for store and pruning 2024-10-27 02:03:26 +08:00
0g-wh
58875c1bc9 update module version, clean github actions 2024-10-25 11:45:52 +08:00
32 changed files with 1186 additions and 1129 deletions

View File

@ -1,54 +0,0 @@
name: Manual Deployment (Internal Testnet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-internal-testnet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
chain-id: kava_2221-17000
seed-script-filename: seed-internal-testnet.sh
erc20-deployer-network-name: internal_testnet
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.internal
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,79 +0,0 @@
name: Continuous Deployment (Internal Testnet)
# run after every successful CI job of new commits to the master branch
# if deploy version or config has changed
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
types:
- completed
jobs:
changed_files:
runs-on: ubuntu-latest
# define output for first job forwarding output of changedInternalTestnetConfig job
outputs:
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
- name: Get all changed internal testnet files
id: changed-internal-testnet-config
uses: tj-actions/changed-files@v42
with:
# Avoid using single or double quotes for multiline patterns
files: |
ci/env/kava-internal-testnet/**
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
needs: [changed_files]
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-internal-testnet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: kava_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
chain-id: kava_2221-17000
seed-script-filename: seed-internal-testnet.sh
erc20-deployer-network-name: internal_testnet
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.internal
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,54 +0,0 @@
name: Manual Deployment (Protonet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,60 +0,0 @@
name: Continuous Deployment (Protonet)
# run after every successful CI job of new commits to the master branch
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
types:
- completed
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -1,79 +0,0 @@
name: Reset Internal Testnet
on:
workflow_call:
inputs:
chain-id:
required: true
type: string
aws-region:
required: true
type: string
ssm-document-name:
required: true
type: string
playbook-name:
required: true
type: string
playbook-infrastructure-branch:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
required: true
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# download updated binary and genesis
# reset application database state (only done on internal testnet)
jobs:
place-chain-nodes-on-standby:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: take the chain offline
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
env:
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
- name: checkout infrastructure repo
uses: actions/checkout@v4
with:
repository: Kava-Labs/infrastructure
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
path: infrastructure
ref: master
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava node updater
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
- name: run reset playbook on all chain nodes
run: |
kava-node-updater \
--debug \
--max-retries=2 \
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--wait-for-node-sync-after-upgrade=false
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
AWS_SDK_LOAD_CONFIG: 1
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}

View File

@ -1,98 +0,0 @@
name: Seed Chain
on:
workflow_call:
inputs:
chain-api-url:
required: true
type: string
chain-id:
required: true
type: string
seed-script-filename:
required: true
type: string
erc20-deployer-network-name:
required: true
type: string
genesis_validator_addresses:
required: true
type: string
kava_version_filepath:
required: true
type: string
secrets:
DEV_WALLET_MNEMONIC:
required: true
KAVA_TESTNET_GOD_MNEMONIC:
required: true
jobs:
seed-chain-state:
runs-on: ubuntu-latest
steps:
- name: checkout repo from master
uses: actions/checkout@v4
with:
ref: master
- name: get desired version of network
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
env:
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
- name: checkout version of kava used by network
uses: actions/checkout@v4
with:
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava binary
run: make install
- name: checkout go evm tools repo
uses: actions/checkout@v4
with:
repository: ethereum/go-ethereum
path: go-ethereum
ref: v1.10.26
- name: install go evm tools
run: |
make
make devtools
working-directory: go-ethereum
- name: checkout kava bridge repo for deploying evm contracts
uses: actions/checkout@v4
with:
repository: Kava-Labs/kava-bridge
path: kava-bridge
ref: main
- name: install nodeJS
uses: actions/setup-node@v3
with:
cache: npm
node-version-file: .tool-versions
cache-dependency-path: kava-bridge/contract/package.json
- name: "install ERC20 contract deployment dependencies"
run: "npm install"
working-directory: kava-bridge/contract
- name: compile default erc20 contracts
run: make compile-contracts
working-directory: kava-bridge
- name: download seed script from current commit
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
- name: run seed scripts
run: bash ./${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
CHAIN_API_URL: ${{ inputs.chain-api-url }}
CHAIN_ID: ${{ inputs.chain-id }}
DEV_WALLET_MNEMONIC: ${{ secrets.DEV_WALLET_MNEMONIC }}
KAVA_TESTNET_GOD_MNEMONIC: ${{ secrets.KAVA_TESTNET_GOD_MNEMONIC }}
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
ERC20_DEPLOYER_NETWORK_NAME: ${{ inputs.erc20-deployer-network-name }}
GENESIS_VALIDATOR_ADDRESSES: ${{ inputs.genesis_validator_addresses }}

View File

@ -1,77 +0,0 @@
name: Start Chain
on:
workflow_call:
inputs:
chain-id:
required: true
type: string
aws-region:
required: true
type: string
ssm-document-name:
required: true
type: string
playbook-name:
required: true
type: string
playbook-infrastructure-branch:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
required: true
jobs:
# start kava, allow nodes to start processing requests from users once they are synced to live
serve-traffic:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: take the chain offline
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
env:
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
- name: checkout infrastructure repo
uses: actions/checkout@v4
with:
repository: Kava-Labs/infrastructure
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
path: infrastructure
ref: master
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava node updater
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
- name: run start-chain playbook on all chain nodes
run: |
kava-node-updater \
--debug \
--max-retries=2 \
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--wait-for-node-sync-after-upgrade=true
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
CHAIN_ID: ${{ inputs.chain-id }}
AWS_REGION: ${{ inputs.aws-region }}
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
AWS_SDK_LOAD_CONFIG: 1
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}
- name: bring the chain online
run: bash ${GITHUB_WORKSPACE}/.github/scripts/exit-standby-all-chain-nodes.sh

View File

@ -1,7 +0,0 @@
name: Continuous Integration (Commit)
on:
push:
# run per commit ci checks against this commit
jobs:
lint:
uses: ./.github/workflows/ci-lint.yml

View File

@ -1,102 +0,0 @@
name: Continuous Integration (Default Checks)
on:
workflow_call:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
tests/e2e/kvtool/go.sum
- name: build application
run: make build
test:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
tests/e2e/kvtool/go.sum
- name: run unit tests
run: make test
- name: run e2e tests
run: make docker-build test-e2e
fuzz:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
- name: run fuzz tests
run: make test-fuzz
ibc-test:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: tests/e2e-ibc/go.mod
cache-dependency-path: |
tests/e2e-ibc/go.sum
go.sum
- name: run ibc e2e tests
run: make test-ibc
validate-internal-testnet-genesis:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: save version of kava that will be deployed if this pr is merged
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
- name: checkout version of kava that will be deployed if this pr is merged
uses: actions/checkout@v4
with:
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava cli
run: make install
- name: checkout repo from current commit to validate current branch's genesis
uses: actions/checkout@v4
- name: validate testnet genesis
run: kava validate-genesis ci/env/kava-internal-testnet/genesis.json
validate-protonet-genesis:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build kava cli
run: make install
- name: validate protonet genesis
run: kava validate-genesis ci/env/kava-protonet/genesis.json

View File

@ -1,124 +0,0 @@
name: Build & Publish Docker Images
on:
workflow_call:
inputs:
dockerhub-username:
required: true
type: string
# this workflow publishes a rocksdb & goleveldb docker images with these tags:
# - <commit-hash>-goleveldb
# - <extra-image-tag>-goleveldb
# - <commit-hash>-rocksdb
# - <extra-image-tag>-rocksdb
extra-image-tag:
required: true
type: string
secrets:
CI_DOCKERHUB_TOKEN:
required: true
# runs in ci-master after successful checks
# you can use images built by this action in future jobs.
# https://docs.docker.com/build/ci/github-actions/examples/#share-built-image-between-jobs
jobs:
docker-goleveldb:
# https://github.com/marketplace/actions/build-and-push-docker-images
runs-on: ubuntu-latest
steps:
# ensure working with latest code
- name: Checkout
uses: actions/checkout@v4
# generate a git commit hash to be used as image tag
- name: Generate short hash
id: commit-hash
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
# qemu is used to emulate different platform architectures
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# cross-platform build of the image
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# authenticate for publish to docker hub
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (goleveldb)
uses: docker/build-push-action@v5
with:
context: .
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
push: true
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-goleveldb,kava/kava:${{ inputs.extra-image-tag }}-goleveldb
docker-rocksdb:
# https://github.com/marketplace/actions/build-and-push-docker-images
runs-on: ubuntu-latest
steps:
# ensure working with latest code
- name: Checkout
uses: actions/checkout@v4
# generate a git commit hash to be used as image tag
- name: Generate short hash
id: commit-hash
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
# qemu is used to emulate different platform architectures
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# cross-platform build of the image
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# authenticate for publish to docker hub
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (rocksdb)
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile-rocksdb
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
push: true
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-rocksdb,kava/kava:${{ inputs.extra-image-tag }}-rocksdb

View File

@ -1,31 +0,0 @@
name: Lint Checks
on:
workflow_call:
# run per commit ci checks against this commit
jobs:
proto-lint:
uses: ./.github/workflows/proto.yml
golangci-lint:
runs-on: ubuntu-latest
permissions:
checks: write # allow write access to checks to allow the action to annotate code in the PR.
steps:
- name: Checkout code
uses: actions/checkout@v4
with: { fetch-depth: 0 }
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: Load Version
id: load-version
run: |
GOLANGCI_VERSION=$(cat .golangci-version)
REV=$(git merge-base origin/master HEAD)
echo "GOLANGCI_VERSION=$GOLANGCI_VERSION" >> $GITHUB_ENV
echo "REV=$REV" >> $GITHUB_ENV
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: ${{ env.GOLANGCI_VERSION }}
args: -v -c .golangci.yml --new-from-rev ${{ env.REV }}

View File

@ -1,59 +0,0 @@
name: Continuous Integration (Kava Master)
on:
push:
# run CI on any push to the master branch
branches:
- master
jobs:
# run per commit ci checks against master branch
lint-checks:
uses: ./.github/workflows/ci-lint.yml
# run default ci checks against master branch
default-checks:
uses: ./.github/workflows/ci-default.yml
# build and upload versions of kava for use on internal infrastructure
# configurations for databases, cpu architectures and operating systems
publish-internal:
# only run if all checks pass
needs: [lint-checks, default-checks]
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: set build tag
run: echo "BUILD_TAG=$(date +%s)-$(git rev-parse HEAD | cut -c 1-8)" >> $GITHUB_ENV
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
env:
ROCKSDB_VERSION: v8.10.0
- name: Build and upload release artifacts
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
env:
BUILD_TAG: ${{ env.BUILD_TAG }}
AWS_REGION: us-east-1
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
docker:
# only run if all checks pass
needs: [lint-checks, default-checks]
uses: ./.github/workflows/ci-docker.yml
with:
dockerhub-username: kavaops
extra-image-tag: master
secrets: inherit
rosetta:
uses: ./.github/workflows/ci-rosetta.yml
secrets: inherit
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.releases.merge
namespace: Kava/ContinuousIntegration
secrets: inherit
needs: [publish-internal]

View File

@ -1,27 +0,0 @@
# this workflow is responsible for ensuring quality titles are given to all PRs
# for PR checks to pass, the title must follow the Conventional Commits standard
# https://www.conventionalcommits.org/en/v1.0.0/
# this workflow was adapted from a similar workflow in https://github.com/cosmos/cosmos-sdk
name: "Lint PR Title"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
contents: read
jobs:
main:
permissions:
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
runs-on: ubuntu-latest
steps:
# https://github.com/marketplace/actions/semantic-pull-request
- uses: amannn/action-semantic-pull-request@v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,23 +0,0 @@
name: Continuous Integration (PR)
on:
pull_request:
# run CI on pull requests to master or a release branch
branches:
- master
- 'release/**'
- 'releases/**'
# run default ci checks against current PR
jobs:
default:
uses: ./.github/workflows/ci-default.yml
rocksdb:
uses: ./.github/workflows/ci-rocksdb-build.yml
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.releases.pr
namespace: Kava/ContinuousIntegration
secrets: inherit
needs: [default]

View File

@ -1,32 +0,0 @@
name: Continuous Integration (Release)
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+*"
jobs:
# run default ci checks against released version
default-checks:
uses: ./.github/workflows/ci-default.yml
# get the version tag that triggered this workflow
get-version-tag:
# prep version release only if all checks pass
needs: default-checks
runs-on: ubuntu-latest
outputs:
git-tag: ${{ steps.git-tag.outputs.tag }}
steps:
- uses: actions/checkout@v4
- id: git-tag
run: echo "tag=$(git describe --always --tags --match='v*')" >> $GITHUB_OUTPUT
# build and upload versions of kava for use on internal infrastructure
# configurations for databases, cpu architectures and operating systems
docker:
# only run if all checks pass
needs: get-version-tag
uses: ./.github/workflows/ci-docker.yml
with:
dockerhub-username: kavaops
extra-image-tag: ${{ needs.get-version-tag.outputs.git-tag }}
secrets: inherit

View File

@ -1,21 +0,0 @@
name: Continuous Integration (Rocksdb Build)
env:
ROCKSDB_VERSION: v8.10.0
on:
workflow_call:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
- name: build application
run: make build COSMOS_BUILD_OPTIONS=rocksdb

View File

@ -1,16 +0,0 @@
name: Dispatch run-rosetta-tests event to rosetta-kava
on:
workflow_call:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Dispatch run-rosetta-tests event to rosetta-kava
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.KAVA_PUBLIC_GITHUB_ACCESS_TOKEN }}
repository: Kava-Labs/rosetta-kava
event-type: run-rosetta-tests
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

View File

@ -1,45 +0,0 @@
name: Metric Pipeline
on:
workflow_call:
inputs:
aws-region:
required: true
type: string
metric-name:
required: true
type: string
namespace:
required: true
type: string
secrets:
CI_AWS_KEY_ID:
required: true
CI_AWS_KEY_SECRET:
required: true
jobs:
metric-pipeline-result:
runs-on: ubuntu-latest
if: always() # always run to capture workflow success or failure
steps:
# Make sure the secrets are stored in you repo settings
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.CI_AWS_KEY_ID }}
aws-secret-access-key: ${{ secrets.CI_AWS_KEY_SECRET }}
aws-region: ${{ inputs.aws-region }}
- name: Calculate Pipleline Success
# run this action to get the workflow conclusion
# You can get the conclusion via env (env.WORKFLOW_CONCLUSION)
# values: neutral, success, skipped, cancelled, timed_out,
# action_required, failure
uses: technote-space/workflow-conclusion-action@v3
- name: Metric Pipleline Success
# replace TAG by the latest tag in the repository
uses: ros-tooling/action-cloudwatch-metrics@0.0.5
with:
metric-value: ${{ env.WORKFLOW_CONCLUSION == 'success' }}
metric-name: ${{ inputs.metric-name }}
namespace: ${{ inputs.namespace }}

View File

@ -1,26 +0,0 @@
name: Protobuf Checks
on:
workflow_call:
jobs:
check-proto:
name: "Check Proto"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- run: go mod download
- run: make install-build-deps
- run: make check-proto-deps
- run: make check-proto-lint
- run: make check-proto-format
- run: make check-proto-breaking-remote
- run: BUF_CHECK_BREAKING_AGAINST_REMOTE="branch=$GITHUB_BASE_REF" make check-proto-breaking-remote
if: github.event_name == 'pull_request'
- run: make check-proto-gen
- run: make check-proto-gen-doc
- run: make check-proto-gen-swagger

View File

@ -1,76 +0,0 @@
package chaincfg
import (
"github.com/shopspring/decimal"
sdk "github.com/cosmos/cosmos-sdk/types"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
)
var (
Xmax, _ = sdk.NewDecFromStr("1.0") // upper limit on staked supply (as % of circ supply)
Ymin, _ = sdk.NewDecFromStr("0.05") // target APY at upper limit
Xmin, _ = sdk.NewDecFromStr("0.2") // lower limit on staked supply (as % of circ supply)
Ymax, _ = sdk.NewDecFromStr("0.15") // target APY at lower limit
decayRate, _ = sdk.NewDecFromStr("10")
)
func decExp(x sdk.Dec) sdk.Dec {
xDec := decimal.NewFromBigInt(x.BigInt(), -18)
expDec, _ := xDec.ExpTaylor(18)
expInt := expDec.Shift(18).BigInt()
return sdk.NewDecFromBigIntWithPrec(expInt, 18)
}
func NextInflationRate(ctx sdk.Context, minter minttypes.Minter, params minttypes.Params, bondedRatio sdk.Dec, circulatingRatio sdk.Dec) sdk.Dec {
X := bondedRatio.Quo(circulatingRatio)
var apy sdk.Dec
if X.LT(Xmin) {
apy = Ymax
} else {
exp := decayRate.Neg().Mul(Xmax.Sub(Xmin))
c := decExp(exp)
d := Ymin.Sub(Ymax.Mul(c)).Quo(sdk.OneDec().Sub(c))
expBonded := decayRate.Neg().Mul(X.Sub(Xmin))
cBonded := decExp(expBonded)
e := Ymax.Sub(d).Mul(cBonded)
apy = d.Add(e)
}
inflation := apy.Mul(bondedRatio)
// // The target annual inflation rate is recalculated for each previsions cycle. The
// // inflation is also subject to a rate change (positive or negative) depending on
// // the distance from the desired ratio (67%). The maximum rate change possible is
// // defined to be 13% per year, however the annual inflation is capped as between
// // 7% and 20%.
// // (1 - bondedRatio/GoalBonded) * InflationRateChange
// inflationRateChangePerYear := sdk.OneDec().
// Sub(bondedRatio.Quo(params.GoalBonded)).
// Mul(params.InflationRateChange)
// inflationRateChange := inflationRateChangePerYear.Quo(sdk.NewDec(int64(params.BlocksPerYear)))
// // adjust the new annual inflation for this next cycle
// inflation := minter.Inflation.Add(inflationRateChange) // note inflationRateChange may be negative
// if inflation.GT(params.InflationMax) {
// inflation = params.InflationMax
// }
// if inflation.LT(params.InflationMin) {
// inflation = params.InflationMin
// }
ctx.Logger().Info(
"nextInflationRate",
"bondedRatio", bondedRatio,
"circulatingRatio", circulatingRatio,
"apy", apy,
"inflation", inflation,
"params", params,
"minter", minter,
)
return inflation
}

4
go.mod
View File

@ -242,7 +242,7 @@ replace (
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.9.1-kava.2
// Use cosmos-sdk fork with backported fix for unsafe-reset-all, staking transfer events, and custom tally handler support
// github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.46.11-kava.3
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.5
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7
github.com/cosmos/iavl => github.com/kava-labs/iavl v1.2.0-kava.1
// See https://github.com/cosmos/cosmos-sdk/pull/13093
github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2
@ -250,7 +250,7 @@ replace (
// TODO: Tag before release
github.com/ethereum/go-ethereum => github.com/evmos/go-ethereum v1.10.26-evmos-rc2
// Use ethermint fork that respects min-gas-price with NoBaseFee true and london enabled, and includes eip712 support
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.1.5
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.1.6
// See https://github.com/cosmos/cosmos-sdk/pull/10401, https://github.com/cosmos/cosmos-sdk/commit/0592ba6158cd0bf49d894be1cef4faeec59e8320
github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.9.0
// Downgraded to avoid bugs in following commits which causes "version does not exist" errors

8
go.sum
View File

@ -211,10 +211,10 @@ git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFN
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA=
github.com/0glabs/cometbft v0.37.9-0glabs.1 h1:KQJG17Y21suKP3QNICLto4b5Ak73XbSmKxeLbg0ZM68=
github.com/0glabs/cometbft v0.37.9-0glabs.1/go.mod h1:j0Q3RqrCd+cztWCugs3obbzC4NyHGBPZZjtm/fWV00I=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.5 h1:CwL3i7ccMjOXpKnJ9w0WkM7UpTWLXa/W2ZAwpxFUfb8=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.5/go.mod h1:KskIVnhXTFqrw7CDccMvx7To5KzUsOomIsQV7sPGOog=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.5 h1:aYqzUqq0F5j2b6Qa0P64oA0ibMAbWruS1lZQit+juqs=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.5/go.mod h1:S1Ahmqpzo1XUsfmmpGT7ok0hu5Fekz/pD6EDtXaBg9Q=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7 h1:6+JquK9BaZZdIA3gx1AXhPBAdYCG+FQ94Y7FN35CvB4=
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.7/go.mod h1:KskIVnhXTFqrw7CDccMvx7To5KzUsOomIsQV7sPGOog=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.6 h1:js58IERJXdR/arMz6RaGWswiuE6EWkl+2t99ijhr3yI=
github.com/0glabs/ethermint v0.21.0-0g.v3.1.6/go.mod h1:S1Ahmqpzo1XUsfmmpGT7ok0hu5Fekz/pD6EDtXaBg9Q=
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs=
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=

View File

@ -72,8 +72,8 @@ func (suite *DASignersTestSuite) SetupTest() {
suite.Assert().EqualValues(ok, true)
suite.dasigners = precompile.(*dasignersprecompile.DASignersPrecompile)
suite.signerOne = testutil.GenSigner()
suite.signerTwo = testutil.GenSigner()
suite.signerOne = suite.GenSigner()
suite.signerTwo = suite.GenSigner()
abi, err := abi.JSON(strings.NewReader(dasignersprecompile.DASignersABI))
suite.Assert().NoError(err)
suite.abi = abi

View File

@ -22,7 +22,6 @@ struct CommissionRates {
uint maxChangeRate; // 18 decimals
}
/**
* @dev Commission defines the commission parameters.
*/
@ -207,7 +206,7 @@ interface IStaking {
function delegate(
string memory validatorAddress,
uint amount // in bond denom
) external returns (bool success);
) external;
/**
* @dev BeginRedelegate defines a method for performing a redelegationA

View File

@ -142,13 +142,7 @@
}
],
"name": "delegate",
"outputs": [
{
"internalType": "bool",
"name": "success",
"type": "bool"
}
],
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},

File diff suppressed because one or more lines are too long

View File

@ -1,9 +1,12 @@
package staking_test
import (
"math/big"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
sdk "github.com/cosmos/cosmos-sdk/types"
query "github.com/cosmos/cosmos-sdk/types/query"
"github.com/ethereum/go-ethereum/common"
)
func (s *StakingTestSuite) TestValidators() {
@ -63,3 +66,736 @@ func (s *StakingTestSuite) TestValidators() {
})
}
}
func (s *StakingTestSuite) TestValidator() {
method := stakingprecompile.StakingFunctionValidator
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
validator := out[0].(stakingprecompile.Validator)
s.Require().EqualValues(common.HexToAddress(validator.OperatorAddress), common.BytesToAddress(operatorAddress.Bytes()))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestValidatorDelegations() {
method := stakingprecompile.StakingFunctionValidatorDelegations
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
delegations := out[0].([]stakingprecompile.DelegationResponse)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
s.Require().EqualValues(len(delegations), len(d))
// jsonData, _ := json.MarshalIndent(delegations, "", " ")
// fmt.Printf("delegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestValidatorUnbondingDelegations() {
method := stakingprecompile.StakingFunctionValidatorUnbondingDelegations
testCases := []struct {
name string
malleate func(operatorAddress string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(operatorAddress string) []byte {
input, err := s.abi.Pack(
method,
operatorAddress,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
unbonding := out[0].([]stakingprecompile.UnbondingDelegation)
s.Require().EqualValues(len(unbonding), 1)
// jsonData, _ := json.MarshalIndent(unbonding, "", " ")
// fmt.Printf("delegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegation() {
method := stakingprecompile.StakingFunctionDelegation
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].(stakingprecompile.Delegation)
b := out[1].(*big.Int)
_ = d
_ = b
/*
jsonData, _ := json.MarshalIndent(d, "", " ")
fmt.Printf("delegation: %s\n", string(jsonData))
fmt.Printf("balance: %v\n", b)
*/
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestUnbondingDelegation() {
method := stakingprecompile.StakingFunctionUnbondingDelegation
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
u := out[0].(stakingprecompile.UnbondingDelegation)
_ = u
// jsonData, _ := json.MarshalIndent(u, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorDelegations() {
method := stakingprecompile.StakingFunctionDelegatorDelegations
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.DelegationResponse)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorUnbondingDelegations() {
method := stakingprecompile.StakingFunctionDelegatorUnbondingDelegations
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.UnbondingDelegation)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
_, err = s.stakingKeeper.Undelegate(s.Ctx, delAddr, operatorAddress, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestRedelegations() {
method := stakingprecompile.StakingFunctionRedelegations
testCases := []struct {
name string
malleate func(delAddr, srcValAddr, dstValAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, srcValAddr, dstValAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
srcValAddr,
dstValAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d := out[0].([]stakingprecompile.RedelegationResponse)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(d))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegations: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
// setup redelegations
s.setupValidator(s.signerOne)
_, err = s.stakingKeeper.BeginRedelegation(s.Ctx, delAddr, operatorAddress, s.signerOne.ValAddr, sdk.NewDec(1))
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String(), s.signerOne.ValAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorValidators() {
method := stakingprecompile.StakingFunctionDelegatorValidators
testCases := []struct {
name string
malleate func(delAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
query.PageRequest{
Limit: 10,
CountTotal: true,
},
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
v := out[0].([]stakingprecompile.Validator)
paginationResult := out[1].(stakingprecompile.PageResponse)
s.Assert().EqualValues(1, len(v))
s.Assert().EqualValues(1, paginationResult.Total)
// jsonData, _ := json.MarshalIndent(v, "", " ")
// fmt.Printf("validators: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestDelegatorValidator() {
method := stakingprecompile.StakingFunctionDelegatorValidator
testCases := []struct {
name string
malleate func(delAddr, valAddr string) []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func(delAddr, valAddr string) []byte {
input, err := s.abi.Pack(
method,
delAddr,
valAddr,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
v := out[0].(stakingprecompile.Validator)
_ = v
// jsonData, _ := json.MarshalIndent(v, "", " ")
// fmt.Printf("validators: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
d := s.stakingKeeper.GetValidatorDelegations(s.Ctx, operatorAddress)
delAddr, err := sdk.AccAddressFromBech32(d[0].DelegatorAddress)
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(delAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestPool() {
method := stakingprecompile.StakingFunctionPool
testCases := []struct {
name string
malleate func() []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
bonded := out[0].(*big.Int)
unbonded := out[0].(*big.Int)
s.Assert().Equal(bonded.Int64(), int64(0))
s.Assert().Equal(unbonded.Int64(), int64(0))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}
func (s *StakingTestSuite) TestParams() {
method := stakingprecompile.StakingFunctionParams
testCases := []struct {
name string
malleate func() []byte
postCheck func(bz []byte)
gas uint64
expErr bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
)
s.Assert().NoError(err)
return input
},
func(data []byte) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
params := out[0].(stakingprecompile.Params)
_ = params
// jsonData, _ := json.MarshalIndent(params, "", " ")
// fmt.Printf("params: %s\n", string(jsonData))
},
100000,
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expErr {
s.Require().Error(err)
s.Require().Contains(err.Error(), tc.errContains)
} else {
s.Require().NoError(err)
s.Require().NotNil(bz)
tc.postCheck(bz)
}
})
}
}

View File

@ -1,6 +1,7 @@
package staking_test
import (
"errors"
"math/big"
"strings"
"testing"
@ -74,6 +75,48 @@ func (suite *StakingTestSuite) AddDelegation(from string, to string, amount math
})
}
func (suite *StakingTestSuite) setupValidator(signer *testutil.TestSigner) {
method := stakingprecompile.StakingFunctionCreateValidator
description := stakingprecompile.Description{
Moniker: "test node",
Identity: "test node identity",
Website: "http://test.node.com",
SecurityContact: "test node security contract",
Details: "test node details",
}
commission := stakingprecompile.CommissionRates{
Rate: math.LegacyOneDec().BigInt(),
MaxRate: math.LegacyOneDec().BigInt(),
MaxChangeRate: math.LegacyOneDec().BigInt(),
}
minSelfDelegation := big.NewInt(1)
pubkey := "eh/aR8BGUBIYI/Ust0NVBxZafLDAm7344F9dKzZU+7g="
value := big.NewInt(100000000)
input, err := suite.abi.Pack(
method,
description,
commission,
minSelfDelegation,
pubkey,
value,
)
suite.Assert().NoError(err)
_, err = suite.runTx(input, signer, 10000000)
suite.Assert().NoError(err)
_, err = suite.stakingKeeper.ApplyAndReturnValidatorSetUpdates(suite.Ctx)
suite.Assert().NoError(err)
}
func (suite *StakingTestSuite) firstBondedValidator() (sdk.ValAddress, error) {
validators := suite.stakingKeeper.GetValidators(suite.Ctx, 10)
for _, v := range validators {
if v.IsBonded() {
return sdk.ValAddressFromBech32(v.OperatorAddress)
}
}
return nil, errors.New("no bonded validator")
}
func (suite *StakingTestSuite) runTx(input []byte, signer *testutil.TestSigner, gas uint64) ([]byte, error) {
contract := vm.NewPrecompile(vm.AccountRef(signer.Addr), vm.AccountRef(suite.addr), big.NewInt(0), gas)
contract.Input = input

View File

@ -2,6 +2,7 @@ package staking
import (
"fmt"
"math/big"
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
sdk "github.com/cosmos/cosmos-sdk/types"
@ -103,12 +104,12 @@ func (s *StakingPrecompile) BeginRedelegate(
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).BeginRedelegate(ctx, msg)
response, err := stakingkeeper.NewMsgServerImpl(s.stakingKeeper).BeginRedelegate(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
return method.Outputs.Pack(big.NewInt(response.CompletionTime.UTC().Unix()))
}
func (s *StakingPrecompile) Undelegate(
@ -128,12 +129,12 @@ func (s *StakingPrecompile) Undelegate(
return nil, fmt.Errorf(precopmiles_common.ErrSenderNotOrigin)
}
// execute
_, err = stakingkeeper.NewMsgServerImpl(s.stakingKeeper).Undelegate(ctx, msg)
response, err := stakingkeeper.NewMsgServerImpl(s.stakingKeeper).Undelegate(ctx, msg)
if err != nil {
return nil, err
}
// emit events
return method.Outputs.Pack()
return method.Outputs.Pack(big.NewInt(response.CompletionTime.UTC().Unix()))
}
func (s *StakingPrecompile) CancelUnbondingDelegation(

View File

@ -3,10 +3,13 @@ package staking_test
import (
"encoding/base64"
"math/big"
"time"
"cosmossdk.io/math"
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/ethereum/go-ethereum/common"
"github.com/evmos/ethermint/x/evm/statedb"
)
func (s *StakingTestSuite) TestCreateValidator() {
@ -83,6 +86,7 @@ func (s *StakingTestSuite) TestCreateValidator() {
s.SetupTest()
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
s.stakingKeeper.ApplyAndReturnValidatorSetUpdates(s.Ctx)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
@ -97,7 +101,7 @@ func (s *StakingTestSuite) TestCreateValidator() {
tc.postCheck(bz)
isBonded := validator.IsBonded()
s.Require().Equal(false, isBonded, "expected validator bonded to be %t; got %t", false, isBonded)
s.Require().Equal(true, isBonded, "expected validator bonded to be %t; got %t", true, isBonded)
consPubKey, err := validator.ConsPubKey()
s.Require().NoError(err)
@ -122,3 +126,360 @@ func (s *StakingTestSuite) TestCreateValidator() {
})
}
}
func (s *StakingTestSuite) TestEditValidator() {
method := stakingprecompile.StakingFunctionEditValidator
description := stakingprecompile.Description{
Moniker: "test node",
Identity: "test node identity",
Website: "http://test.node.com",
SecurityContact: "test node security contract",
Details: "test node details",
}
newRate := math.LegacyOneDec().BigInt()
newRate.Div(newRate, big.NewInt(2))
minSelfDelegation := big.NewInt(2)
testCases := []struct {
name string
malleate func() []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte)
expError bool
errContains string
}{
{
"success",
func() []byte {
input, err := s.abi.Pack(
method,
description,
stakingprecompile.NullableUint{
IsNull: false,
Value: newRate,
},
stakingprecompile.NullableUint{
IsNull: true,
Value: math.LegacyOneDec().BigInt(),
},
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte) {},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
s.setupValidator(s.signerOne)
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
bz, err := s.runTx(tc.malleate(), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
// query the validator in the staking keeper
validator := s.StakingKeeper.Validator(s.Ctx, s.signerOne.ValAddr)
s.Require().NoError(err)
s.Require().NotNil(validator, "expected validator not to be nil")
tc.postCheck(bz)
isBonded := validator.IsBonded()
s.Require().Equal(true, isBonded, "expected validator bonded to be %t; got %t", false, isBonded)
operator := validator.GetOperator()
s.Require().Equal(s.signerOne.ValAddr, operator, "expected validator operator to be %s; got %s", s.signerOne.ValAddr, operator)
commissionRate := validator.GetCommission()
s.Require().Equal(newRate.String(), commissionRate.BigInt().String(), "expected validator commission rate to be %s; got %s", newRate.String(), commissionRate.String())
valMinSelfDelegation := validator.GetMinSelfDelegation()
s.Require().Equal(big.NewInt(1).String(), valMinSelfDelegation.String(), "expected validator min self delegation to be %s; got %s", minSelfDelegation.String(), valMinSelfDelegation.String())
moniker := validator.GetMoniker()
s.Require().Equal(description.Moniker, moniker, "expected validator moniker to be %s; got %s", description.Moniker, moniker)
jailed := validator.IsJailed()
s.Require().Equal(false, jailed, "expected validator jailed to be %t; got %t", false, jailed)
}
})
}
}
func (s *StakingTestSuite) TestDelegate() {
method := stakingprecompile.StakingFunctionDelegate
testCases := []struct {
name string
malleate func(valAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(valAddr sdk.ValAddress) {
d, found := s.stakingKeeper.GetDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.ValidatorAddress, valAddr.String())
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("delegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
bz, err := s.runTx(tc.malleate(operatorAddress.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(operatorAddress)
}
})
}
}
func (s *StakingTestSuite) TestBeginRedelegate() {
method := stakingprecompile.StakingFunctionBeginRedelegate
testCases := []struct {
name string
malleate func(srcAddr, dstAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte, srcAddr, dstAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(srcAddr, dstAddr string) []byte {
input, err := s.abi.Pack(
method,
srcAddr,
dstAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte, srcAddr, dstAddr sdk.ValAddress) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d, found := s.stakingKeeper.GetRedelegation(s.Ctx, s.signerOne.AccAddr, srcAddr, dstAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
s.Assert().EqualValues(d.ValidatorSrcAddress, srcAddr.String())
s.Assert().EqualValues(d.ValidatorDstAddress, dstAddr.String())
completionTime := out[0].(*big.Int)
params := s.stakingKeeper.GetParams(s.Ctx)
s.Assert().EqualValues(completionTime.Int64(), s.Ctx.BlockHeader().Time.Add(params.UnbondingTime).UTC().Unix())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
operatorAddress, err := s.firstBondedValidator()
s.Require().NoError(err)
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String(), operatorAddress.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(bz, s.signerOne.ValAddr, operatorAddress)
}
})
}
}
func (s *StakingTestSuite) TestUndelegate() {
method := stakingprecompile.StakingFunctionUndelegate
testCases := []struct {
name string
malleate func(valAddr string) []byte
gas uint64
callerAddress *common.Address
postCheck func(data []byte, valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1000000),
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(data []byte, valAddr sdk.ValAddress) {
out, err := s.abi.Methods[method].Outputs.Unpack(data)
s.Require().NoError(err, "failed to unpack output")
d, found := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, true)
s.Assert().EqualValues(d.DelegatorAddress, s.signerOne.AccAddr.String())
s.Assert().EqualValues(d.ValidatorAddress, valAddr.String())
completionTime := out[0].(*big.Int)
params := s.stakingKeeper.GetParams(s.Ctx)
s.Assert().EqualValues(completionTime.Int64(), s.Ctx.BlockHeader().Time.Add(params.UnbondingTime).UTC().Unix())
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String()), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(bz, s.signerOne.ValAddr)
}
})
}
}
func (s *StakingTestSuite) TestCancelUnbondingDelegation() {
method := stakingprecompile.StakingFunctionCancelUnbondingDelegation
testCases := []struct {
name string
malleate func(valAddr string, height *big.Int) []byte
gas uint64
callerAddress *common.Address
postCheck func(valAddr sdk.ValAddress)
expError bool
errContains string
}{
{
"success",
func(valAddr string, height *big.Int) []byte {
input, err := s.abi.Pack(
method,
valAddr,
big.NewInt(1),
height,
)
s.Assert().NoError(err)
return input
},
200000,
nil,
func(valAddr sdk.ValAddress) {
_, found := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, valAddr)
s.Assert().EqualValues(found, false)
// jsonData, _ := json.MarshalIndent(d, "", " ")
// fmt.Printf("redelegation: %s\n", string(jsonData))
},
false,
"",
},
}
for _, tc := range testCases {
s.Run(tc.name, func() {
s.SetupTest()
// move block time forward
s.Ctx = s.Ctx.WithBlockTime(time.Now().Add(time.Hour * 100))
s.Statedb = statedb.New(s.Ctx, s.EvmKeeper, statedb.NewEmptyTxConfig(common.BytesToHash(s.Ctx.HeaderHash().Bytes())))
s.setupValidator(s.signerOne)
// unbond
_, err := s.stakingKeeper.Undelegate(s.Ctx, s.signerOne.AccAddr, s.signerOne.ValAddr, sdk.NewDec(1))
s.Require().NoError(err)
u, _ := s.stakingKeeper.GetUnbondingDelegation(s.Ctx, s.signerOne.AccAddr, s.signerOne.ValAddr)
height := u.Entries[0].CreationHeight
bz, err := s.runTx(tc.malleate(s.signerOne.ValAddr.String(), big.NewInt(height)), s.signerOne, 10000000)
if tc.expError {
s.Require().ErrorContains(err, tc.errContains)
s.Require().Empty(bz)
} else {
s.Require().NoError(err)
tc.postCheck(s.signerOne.ValAddr)
}
})
}
}

View File

@ -181,6 +181,12 @@ func convertQueryPageRequest(pagination PageRequest) *query.PageRequest {
}
func convertPageResponse(pagination *query.PageResponse) PageResponse {
if pagination == nil {
return PageResponse{
NextKey: make([]byte, 0),
Total: 1,
}
}
return PageResponse{
NextKey: pagination.NextKey,
Total: pagination.Total,
@ -329,7 +335,7 @@ func NewMsgCreateValidator(args []interface{}, sender common.Address, denom stri
}
value := args[4].(*big.Int)
return &stakingtypes.MsgCreateValidator{
msg := &stakingtypes.MsgCreateValidator{
Description: convertStakingDescription(description),
Commission: convertStakingCommissionRates(commission),
MinSelfDelegation: math.NewIntFromBigInt(minSelfDelegation),
@ -337,7 +343,8 @@ func NewMsgCreateValidator(args []interface{}, sender common.Address, denom stri
ValidatorAddress: sdk.ValAddress(sender.Bytes()).String(),
Pubkey: pkAny,
Value: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(value)},
}, nil
}
return msg, msg.ValidateBasic()
}
func NewMsgEditValidator(args []interface{}, sender common.Address) (*stakingtypes.MsgEditValidator, error) {
@ -360,12 +367,13 @@ func NewMsgEditValidator(args []interface{}, sender common.Address) (*stakingtyp
minSelfDelegation = &value
}
return &stakingtypes.MsgEditValidator{
msg := &stakingtypes.MsgEditValidator{
Description: convertStakingDescription(description),
CommissionRate: commissionRate,
ValidatorAddress: sdk.ValAddress(sender.Bytes()).String(),
MinSelfDelegation: minSelfDelegation,
}, nil
}
return msg, msg.ValidateBasic()
}
func NewMsgDelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgDelegate, error) {
@ -375,11 +383,12 @@ func NewMsgDelegate(args []interface{}, sender common.Address, denom string) (*s
validatorAddress := args[0].(string)
amount := args[1].(*big.Int)
return &stakingtypes.MsgDelegate{
msg := &stakingtypes.MsgDelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}, nil
}
return msg, msg.ValidateBasic()
}
func NewMsgBeginRedelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgBeginRedelegate, error) {
@ -390,12 +399,13 @@ func NewMsgBeginRedelegate(args []interface{}, sender common.Address, denom stri
validatorDstAddress := args[1].(string)
amount := args[2].(*big.Int)
return &stakingtypes.MsgBeginRedelegate{
msg := &stakingtypes.MsgBeginRedelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorSrcAddress: validatorSrcAddress,
ValidatorDstAddress: validatorDstAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}, nil
}
return msg, msg.ValidateBasic()
}
func NewMsgUndelegate(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgUndelegate, error) {
@ -405,11 +415,12 @@ func NewMsgUndelegate(args []interface{}, sender common.Address, denom string) (
validatorAddress := args[0].(string)
amount := args[1].(*big.Int)
return &stakingtypes.MsgUndelegate{
msg := &stakingtypes.MsgUndelegate{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
}, nil
}
return msg, msg.ValidateBasic()
}
func NewMsgCancelUnbondingDelegation(args []interface{}, sender common.Address, denom string) (*stakingtypes.MsgCancelUnbondingDelegation, error) {
@ -420,12 +431,13 @@ func NewMsgCancelUnbondingDelegation(args []interface{}, sender common.Address,
amount := args[1].(*big.Int)
creationHeight := args[2].(*big.Int)
return &stakingtypes.MsgCancelUnbondingDelegation{
msg := &stakingtypes.MsgCancelUnbondingDelegation{
DelegatorAddress: sdk.AccAddress(sender.Bytes()).String(),
ValidatorAddress: validatorAddress,
Amount: sdk.Coin{Denom: denom, Amount: math.NewIntFromBigInt(amount)},
CreationHeight: creationHeight.Int64(),
}, nil
}
return msg, msg.ValidateBasic()
}
func NewQueryValidatorsRequest(args []interface{}) (*stakingtypes.QueryValidatorsRequest, error) {

View File

@ -3,6 +3,7 @@ package testutil
import (
"math/big"
"strings"
"time"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
@ -81,7 +82,7 @@ func (suite *PrecompileTestSuite) SetupTest() {
hexAddr := strings.ToLower(crypto.PubkeyToAddress(key.PublicKey).Hex()[2:])
valAddr, err := sdk.ValAddressFromHex(hexAddr)
suite.Assert().NoError(err)
suite.Ctx = suite.App.NewContext(true, tmproto.Header{Height: 1, ChainID: app.TestChainId, ProposerAddress: consAddress})
suite.Ctx = suite.App.NewContext(true, tmproto.Header{Height: 1, ChainID: app.TestChainId, ProposerAddress: consAddress, Time: time.Now()})
newValidator, err := stakingtypes.NewValidator(valAddr, privkey.PubKey(), stakingtypes.Description{})
suite.Assert().NoError(err)
err = suite.StakingKeeper.SetValidatorByConsAddr(suite.Ctx, newValidator)