Compare commits

..

98 Commits

Author SHA1 Message Date
0g-wh
14401bd2fb
add Upload Release Assets workflow (#49)
* Create upload-release-assets.yml
2024-07-30 18:31:48 +08:00
Solovyov1796
3b67405fe2
Merge pull request #48 from 0glabs/dev_merge_testnet
merge testnet script
2024-07-30 11:02:24 +08:00
Solovyov1796
b4b51d2ef8 update gitignore 2024-07-30 10:33:56 +08:00
Solovyov1796
faa7000efe merge testnet script 2024-07-30 10:30:56 +08:00
0g-wh
4d8001de22 add cosmovisor init script 2024-07-29 17:49:54 +08:00
Solovyov1796
2a63bffa89 use 0glabs' cometbft 2024-07-02 16:06:48 +08:00
Solovyov1796
f590e8b738
Merge pull request #37 from 0glabs/enable_vesting_msg
enable vesting msgs
2024-06-20 00:34:58 +08:00
0xsatoshi
e5f54f891e enable vesting msgs 2024-06-20 00:13:31 +08:00
MiniFrenchBread
d382cd10ec Merge remote-tracking branch 'origin/testnet/v0.1.x' into dev 2024-06-17 13:49:48 +08:00
0xsatoshi
be974c4b0c
Merge pull request #35 from Solovyov1796/testnet_merge_vesting
Merge pull request #34 from 0glabs/vesting
2024-06-17 11:16:37 +08:00
Solovyov1796
ee61e39769 Merge pull request #34 from 0glabs/vesting
calc inflation based on staking apy
2024-06-17 11:02:26 +08:00
0xsatoshi
c42d1f1181
Merge pull request #34 from 0glabs/vesting
calc inflation based on staking apy
2024-06-17 08:56:21 +08:00
0xsatoshi
8b0604651f fix 2024-06-16 23:18:18 +08:00
0xsatoshi
563d255930 fix 2024-06-16 17:26:38 +08:00
MiniFrenchBread
32ca84c90b
Merge pull request #31 from 0glabs/opt-quorum
refactor: epoch quorum storage
2024-06-14 19:10:06 +08:00
MiniFrenchBread
f3f9b706ea refactor: epoch quorum storage 2024-06-14 18:29:01 +08:00
0xsatoshi
b37e27e58b
Merge pull request #28 from 0glabs/dev
testnet: merge dev for relaunch version
2024-06-14 17:06:29 +08:00
MiniFrenchBread
455d173924
Merge pull request #30 from 0glabs/add-quorum-row
feat: getQuorumRow
2024-06-12 13:31:07 +08:00
MiniFrenchBread
344f8b0565 feat: getQuorumRow 2024-06-12 13:27:48 +08:00
Solovyov1796
0f2f82d98f
Merge pull request #29 from Solovyov1796/pr28
for review of Pr28
2024-06-12 10:25:49 +08:00
Solovyov1796
b8ed70c775 keep the EthSecp256k1 from cosmos for compatible 2024-06-12 02:00:16 +08:00
Solovyov1796
d2acd8bf79 recover "rename denoms" in 3 files 2024-06-12 01:47:42 +08:00
Solovyov1796
141796f158 use chaincfg.MakeCoinForGasDenom 2024-06-12 01:36:29 +08:00
MiniFrenchBread
f22eaea78a
Merge pull request #27 from 0glabs/dev-merge
dev: merge testnet
2024-06-11 15:39:07 +08:00
MiniFrenchBread
bb71f37b49 merge: testnet 2024-06-11 15:35:48 +08:00
MiniFrenchBread
2ab492da82
Merge pull request #26 from 0glabs/fix-decimals
fix: decimals in DASigner module
2024-06-11 15:25:52 +08:00
MiniFrenchBread
429946d0f2 chore: remove tmp output 2024-06-11 15:24:39 +08:00
MiniFrenchBread
ff01dec980 fix: decimals 2024-06-11 15:20:30 +08:00
MiniFrenchBread
7feec1d03b
Merge pull request #24 from 0glabs/signer-delegator
refactor: designers precompile signer registration
2024-06-11 12:06:09 +08:00
0xsatoshi
35a9c8376e
Merge pull request #25 from Solovyov1796/custom_inflation_calculation
custom inflation calculation function
2024-06-10 16:55:02 +08:00
Solovyov1796
1e01260353 custom inflation calculation function 2024-06-09 15:40:37 +08:00
MiniFrenchBread
4762d5acda refactor: delegator 2024-06-07 15:47:56 +08:00
MiniFrenchBread
9216f3dfc9 fix: localtestnet.sh 2024-06-07 13:35:36 +08:00
Solovyov1796
dd17b8f33d
Merge pull request #23 from 0glabs/dev-merge
merge testnet/v0.1.x
2024-06-07 12:55:25 +08:00
MiniFrenchBread
217d4cb048 tidy 2024-06-07 12:50:45 +08:00
MiniFrenchBread
8bdba3d46d merge testnet/v0.1.x 2024-06-07 12:47:44 +08:00
Solovyov1796
e84a7b02ff
Merge pull request #20 from 0glabs/fix_secp256k1
use eth's secp256k1 for cli keys add
2024-05-31 11:59:01 +08:00
Solovyov1796
eb1614dfde uss eth secp256k1 for cli keys add 2024-05-31 11:54:15 +08:00
Solovyov1796
63c1689224
Merge pull request #19 from Solovyov1796/v0.1.0_log_no_color
using 0glabs' cosmos-sdk, v0.46.11-0glabs.4
2024-05-30 14:07:06 +08:00
Solovyov1796
5389e161fe using 0glabs' cosmos-sdk, v0.46.11-0glabs.4 2024-05-30 13:05:18 +08:00
Solovyov1796
5db21cba18
Merge pull request #18 from Solovyov1796/remove_das
Remove das
2024-05-22 16:08:05 +08:00
Solovyov1796
578897f69f remove the EthSecp256k1 from cosmos 2024-05-21 18:45:42 +08:00
Solovyov1796
838a2d9ae9 rename denoms 2024-05-21 18:45:00 +08:00
Solovyov1796
a4ed55e9f3 fix unit test 2024-05-21 18:44:38 +08:00
Solovyov1796
b7ae5a4254 remove module's legacy code 2024-05-21 18:44:21 +08:00
Solovyov1796
f8cf8525a4 recover go mod file 2024-05-21 18:29:53 +08:00
Solovyov1796
db3d8316ef remove das module 2024-05-21 18:15:02 +08:00
0xsatoshi
5489b140da
Merge pull request #16 from 0glabs/quorum
Feat: Add Quorum in DASigners Precompile
2024-05-21 16:12:50 +08:00
MiniFrenchBread
cb39713e5d feat: add get functions 2024-05-18 23:01:28 +08:00
MiniFrenchBread
a501d1dae1 fix: da signers begin block 2024-05-16 23:46:12 +08:00
MiniFrenchBread
4f8d519a4d feat: max quorum num 2024-05-16 23:39:07 +08:00
MiniFrenchBread
93595837ca fix: quorum 2024-05-16 23:25:43 +08:00
MiniFrenchBread
0cc08ddba9 feat: quorum 2024-05-16 22:49:29 +08:00
MiniFrenchBread
b14d8118fc
Merge pull request #13 from 0glabs/precompile
feat: Precompile and DASigners module
2024-05-11 17:38:39 +08:00
MiniFrenchBread
dd986de448 fix: defaultGenesis 2024-05-11 17:33:24 +08:00
MiniFrenchBread
3f7ae266fb feat: update dasigners proto api 2024-05-11 17:13:54 +08:00
MiniFrenchBread
015f6224bd fix: dasigners module 2024-05-11 02:41:14 +08:00
MiniFrenchBread
c7a31a1a9f chore: dependency 2024-05-10 03:44:37 +08:00
MiniFrenchBread
0709f2e129 feat: precompile 2024-05-10 02:54:47 +08:00
Solovyov1796
7bc25a060f
rename the app name showed in usage (#10) 2024-05-06 22:23:18 +08:00
Peter Zhang
2a1d44c2d1 update max validator count 2024-05-06 15:26:24 +08:00
Peter Zhang
14e7e097fb update checkout branch 2024-05-06 08:16:53 +08:00
Solovyov1796
df8931b401 update init-genesis.sh for devnet and testnet 2024-05-05 15:06:31 +08:00
Solovyov1796
18b6199c1d fix unit test 2024-05-05 15:00:03 +08:00
Solovyov1796
119762996e recv both cosmos denom and evm denom from bank keeper 2024-05-05 14:17:37 +08:00
Solovyov1796
70b145ed41
Merge pull request #9 from 0glabs/fix_localtestnet
fix localtestnet.sh
2024-05-04 22:58:00 +08:00
0xsatoshi
758c81e481 fix 2024-05-04 19:30:24 +08:00
0xsatoshi
71b48e64ad fix 2024-05-04 19:22:22 +08:00
Solovyov1796
aef6312f70
Merge pull request #8 from 0glabs/fix_denom
fix denom
2024-05-04 15:36:31 +08:00
0xsatoshi
9b4a1e6b32 fix 2024-05-04 14:26:54 +08:00
Solovyov1796
b4064b7d9f update scripts 2024-05-03 23:26:17 +08:00
Solovyov1796
9926bd7169 update env vars 2024-05-03 23:14:04 +08:00
Solovyov1796
e012f3b35e Merge branch 'patch_testnet_1' of https://github.com/0glabs/0g-chain into patch_testnet_1 2024-05-03 22:54:18 +08:00
Solovyov1796
d98f766000 update 2024-05-03 22:54:07 +08:00
Peter Zhang
62fe3fb50e modify deploy script 2024-05-02 13:54:28 +08:00
Solovyov1796
8eb3b881fc fix unit test for x 2024-05-02 01:43:59 +08:00
Solovyov1796
ebff3e752d add scripts for devnet 2024-05-01 17:00:05 +08:00
Solovyov1796
de94f36095 fix panic 2024-05-01 16:56:23 +08:00
Solovyov1796
d7101481c0 merge script from branch v0.1.0 2024-05-01 16:11:39 +08:00
Solovyov1796
7991406367 fix test 2024-05-01 16:07:32 +08:00
Solovyov1796
c0f3951b40 add 0g code 2024-05-01 14:08:58 +08:00
Solovyov1796
5d11dcab48 rename kava 2024-05-01 13:53:58 +08:00
Solovyov1796
522d69e0a8 add vrf 2024-05-01 12:33:44 +08:00
Solovyov1796
6eb38a0aa4 revise file structure in cmd 2024-05-01 12:26:39 +08:00
Solovyov1796
4c21606bb5 add chaincfg to save all configration of chain 2024-05-01 12:26:29 +08:00
Solovyov1796
b995c16abd update build file 2024-05-01 12:25:00 +08:00
Solovyov1796
c2af9df57f revise proto files 2024-05-01 11:56:00 +08:00
Solovyov1796
55dee00b2c remove useless modules 2024-05-01 11:46:33 +08:00
Solovyov1796
916f251143 rename go mod path 2024-05-01 11:17:24 +08:00
Peter Zhang
e6244b7e89 add deploy scripts 2024-04-25 19:10:53 +08:00
Peter Zhang
2f42984449 add deploy scripts 2024-04-21 20:06:41 +08:00
Robert Pirtle
1f82949c56 ci: update CI workflows and pipelines (#1768)
* build & publish rocksdb docker images merge to master
* publish docker images on push of release version tags

NOTE: New docker image tag pattern. ALL tags now include database suffix
ex. <githash>-goleveldb, v0.25.0-alpha.1-rocksdb, master-rocksdb, etc

* update dockerfiles for better caching
* update all github action workflow versions
* improve caching of go packages
* cache docker image layers for reuse between runs
* update dockerignore to remove non-essential files
2023-11-21 11:58:09 -08:00
drklee3
803f54113a
Add v0.25.0 upgrade guide and staking rewards doc (#1769)
* Add upgrade guide and staking rewards doc

* Update date

* Update date for upgrade vs failure

* Use height 7637650 for upgrade

* Adjust bolding

* Use linux/amd64 in docs

* Upgrade height at 7637070
2023-11-21 11:37:46 -08:00
Draco
821d67a20b
formatting fixes (#1783) 2023-11-21 11:13:04 -08:00
drklee3
c1e6321179
Update changelog with v0.25.0 (#1773) 2023-11-17 15:34:09 -08:00
drklee3
1db1e8da8a
fix: update ledger-cosmos-go v0.13.1 to resolve signing error with cosmos ledger app 2.34.12 (#1770)
* Update ledger-cosmos-go v0.13.1 with cosmos fork update

* Bump cosmos-sdk v0.46.11-kava.2

* Update changelog

* Update cosmos-sdk tag v0.46.11-kava.3

Incorrect kava.2 tag
2023-11-16 12:45:40 -08:00
Nick DeLuca
72063ddda9
correct rocksdb path -- ensure we use KAVA_HOME/data/application.db (#1767)
and not a nested application.db within that path
2023-11-06 17:06:50 -08:00
drklee3
802f1c8112
Add upgrade handler and upgrade e2e tests (#1739)
- Add upgrade handler for mainnet, testnet, and e2e test
- Set validator minimum commission to 5%
- Initialize `x/community` parameters
- Add `banktypes.MsgSend` authz grant for `x/kavadist` for gov proposals
- Set `x/gov` Quorum param to 20%
- Set `x/incentive` earn rewards param for bkava to 600K KAVA per year
2023-11-01 17:22:38 -07:00
578 changed files with 19252 additions and 52421 deletions

View File

@ -11,10 +11,5 @@ docs/
networks/
scratch/
# Ignore build cache directories to avoid
# errors when addings these to docker images
build/.cache
build/.golangci-lint
go.work
go.work.sum

3
.github/CODEOWNERS vendored
View File

@ -1,3 +0,0 @@
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# Global rule:
* @rhuairahrighairidh @karzak @pirtleshell @drklee3 @nddeluca @DracoLi @evgeniy-scherbina @sesheffield @boodyvo @lbayas

20
.github/mergify.yml vendored
View File

@ -25,8 +25,6 @@ pull_request_rules:
- release/v0.21.x
- release/v0.23.x
- release/v0.24.x
- release/v0.25.x
- release/v0.26.x
- name: Backport patches to the release/v0.17.x branch
conditions:
@ -81,21 +79,3 @@ pull_request_rules:
backport:
branches:
- release/v0.24.x
- name: Backport patches to the release/v0.25.x branch
conditions:
- base=master
- label=A:backport/v0.25.x
actions:
backport:
branches:
- release/v0.25.x
- name: Backport patches to the release/v0.26.x branch
conditions:
- base=master
- label=A:backport/v0.26.x
actions:
backport:
branches:
- release/v0.26.x

View File

@ -33,7 +33,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode sync
kava config broadcast-mode block
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god

View File

@ -1,14 +1,6 @@
#!/bin/bash
set -ex
# by sleeping 1 block in between tx's
# we can emulate the behavior of the
# the deprecated and now removed (as of Kava 16)
# broadcast mode of `block` in order to
# minimize the chance tx's fail due to an
# account sequence number mismatch
AVG_SECONDS_BETWEEN_BLOCKS=6.5
# configure kava binary to talk to the desired chain endpoint
kava config node "${CHAIN_API_URL}"
kava config chain-id "${CHAIN_ID}"
@ -17,7 +9,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode sync
kava config broadcast-mode block
# setup dev wallet
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
@ -31,8 +23,6 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
# fund evm-contract-deployer account (using issuance)
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# deploy and fund USDC ERC20 contract
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
@ -83,31 +73,6 @@ TETHER_USDT_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NA
TETHER_USDT_CONTRACT_ADDRESS=${TETHER_USDT_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000
# deploy and fund axlBNB ERC20 contract
AXL_BNB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBNB" axlBNB 18)
AXL_BNB_CONTRACT_ADDRESS=${AXL_BNB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBUSD ERC20 contract
AXL_BUSD_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBUSD" axlBUSD 18)
AXL_BUSD_CONTRACT_ADDRESS=${AXL_BUSD_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlXRPB ERC20 contract
AXL_XRPB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlXRPB" axlXRPB 18)
AXL_XRPB_CONTRACT_ADDRESS=${AXL_XRPB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBTC ERC20 contract
AXL_BTCB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBTCB" axlBTCB 18)
AXL_BTCB_CONTRACT_ADDRESS=${AXL_BTCB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund native wBTC ERC20 contract
WBTC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "wBTC" wBTC 8)
WBTC_CONTRACT_ADDRESS=${WBTC_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 100000000000000000
# seed some evm wallets
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
@ -116,11 +81,6 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
# seed webapp E2E whale account
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 100000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
@ -129,11 +89,6 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$WBTC_CONTRACT_ADDRESS" 10000000000000
# give dev-wallet enough delegation power to pass proposals by itself
@ -141,8 +96,6 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRAC
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
--from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# parse space seperated list of validators
# into bash array
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
@ -150,14 +103,11 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
# delegate 300KAVA to each validator
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"; do
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
done
# create a text proposal
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
@ -173,7 +123,7 @@ PARAM_CHANGE_PROP_TEMPLATE=$(
{
"subspace": "evmutil",
"key": "EnabledConversionPairs",
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"},{\"kava_erc20_address\":\"AXL_BNB_CONTRACT_ADDRESS\",\"denom\":\"bnb\"},{\"kava_erc20_address\":\"AXL_BUSD_CONTRACT_ADDRESS\",\"denom\":\"busd\"},{\"kava_erc20_address\":\"AXL_BTCB_CONTRACT_ADDRESS\",\"denom\":\"btcb\"},{\"kava_erc20_address\":\"AXL_XRPB_CONTRACT_ADDRESS\",\"denom\":\"xrpb\"},{\"kava_erc20_address\":\"WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/bitgo/wbtc\"}]"
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"}]"
}
]
}
@ -190,11 +140,6 @@ finalProposal="${finalProposal/AXL_USDC_CONTRACT_ADDRESS/$AXL_USDC_CONTRACT_ADDR
finalProposal="${finalProposal/AXL_WBTC_CONTRACT_ADDRESS/$AXL_WBTC_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/wETH_CONTRACT_ADDRESS/$wETH_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/TETHER_USDT_CONTRACT_ADDRESS/$TETHER_USDT_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BNB_CONTRACT_ADDRESS/$AXL_BNB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BUSD_CONTRACT_ADDRESS/$AXL_BUSD_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BTCB_CONTRACT_ADDRESS/$AXL_BTCB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_XRPB_CONTRACT_ADDRESS/$AXL_XRPB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/WBTC_CONTRACT_ADDRESS/$WBTC_CONTRACT_ADDRESS}"
# create unique proposal filename
proposalFileName="$(date +%s)-proposal.json"
@ -214,37 +159,16 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
# committee 1 is the stability committee. on internal testnet, this has only one member.
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fetch current module params
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
# submit a kava token committee proposal
COMMITTEE_PROP_TEMPLATE=$(
cat <<'END_HEREDOC'
{
"@type": "/cosmos.gov.v1beta1.TextProposal",
"title": "The next big thing signaling proposal.",
"description": "The purpose of this proposal is to signal support/opposition to the next big thing"
}
END_HEREDOC
)
committeeProposalFileName="$(date +%s)-committee-proposal.json"
echo "$COMMITTEE_PROP_TEMPLATE" >$committeeProposalFileName
tokenCommitteeId=4
kava tx committee submit-proposal "$tokenCommitteeId" "$committeeProposalFileName" --gas auto --gas-adjustment 1.5 --gas-prices 0.01ukava --from god -y
# if adding more cosmos coins -> er20s, ensure that the deployment order below remains the same.
# convert 1 HARD to an erc20. doing this ensures the contract is deployed.
kava tx evmutil convert-cosmos-coin-to-erc20 \
"$DEV_TEST_WALLET_ADDRESS" \
1000000hard \
--from dev-wallet --gas 2000000 --gas-prices 0.001ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS

View File

@ -1,14 +1,6 @@
#!/bin/bash
set -ex
# by sleeping 1 block in between tx's
# we can emulate the behavior of the
# the deprecated and now removed (as of Kava 16)
# broadcast mode of `block` in order to
# minimize the chance tx's fail due to an
# account sequence number mismatch
AVG_SECONDS_BETWEEN_BLOCKS=6.5
# configure kava binary to talk to the desired chain endpoint
kava config node "${CHAIN_API_URL}"
kava config chain-id "${CHAIN_ID}"
@ -17,7 +9,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode sync
kava config broadcast-mode block
# setup dev wallet
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
@ -31,13 +23,9 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
# fund evm-contract-deployer account (using issuance)
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fund 5k kava to x/community account
kava tx community fund-community-pool 5000000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# deploy and fund USDC ERC20 contract
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
@ -101,8 +89,6 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CON
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
--from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# parse space seperated list of validators
# into bash array
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
@ -111,14 +97,11 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"
do
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
done
# create a text proposal
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
@ -167,13 +150,9 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
# https://github.com/0glabs/0g-chain/pull/1556/files#diff-0bd6043650c708661f37bbe6fa5b29b52149e0ec0069103c3954168fc9f12612R900-R903
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fetch current module params
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"

View File

@ -1,6 +1,5 @@
name: Continuous Deployment (Internal Testnet)
# run after every successful CI job of new commits to the master branch
# if deploy version or config has changed
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
@ -8,23 +7,6 @@ on:
- completed
jobs:
changed_files:
runs-on: ubuntu-latest
# define output for first job forwarding output of changedInternalTestnetConfig job
outputs:
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
- name: Get all changed internal testnet files
id: changed-internal-testnet-config
uses: tj-actions/changed-files@v42
with:
# Avoid using single or double quotes for multiline patterns
files: |
ci/env/kava-internal-testnet/**
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
@ -32,9 +14,8 @@ jobs:
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
needs: [changed_files]
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
if: ${{ github.event.workflow_run.conclusion == 'success' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1

View File

@ -1,54 +0,0 @@
name: Manual Deployment (Protonet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -67,6 +67,7 @@ jobs:
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=false
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}

View File

@ -35,16 +35,12 @@ jobs:
uses: actions/checkout@v4
with:
ref: master
- name: get desired version of network
id: kava-version
- name: checkout version of kava used by network
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
git pull -p
git checkout $(cat ${KAVA_VERSION_FILEPATH})
env:
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
- name: checkout version of kava used by network
uses: actions/checkout@v4
with:
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:
@ -72,7 +68,7 @@ jobs:
uses: actions/setup-node@v3
with:
cache: npm
node-version-file: .tool-versions
node-version: 18
cache-dependency-path: kava-bridge/contract/package.json
- name: "install ERC20 contract deployment dependencies"
run: "npm install"
@ -80,8 +76,8 @@ jobs:
- name: compile default erc20 contracts
run: make compile-contracts
working-directory: kava-bridge
- name: download seed script from current commit
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
- name: download seed script from master
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/master/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}

View File

@ -63,6 +63,7 @@ jobs:
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=true
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}

View File

@ -35,35 +35,6 @@ jobs:
run: make test
- name: run e2e tests
run: make docker-build test-e2e
fuzz:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
- name: run fuzz tests
run: make test-fuzz
ibc-test:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: tests/e2e-ibc/go.mod
cache-dependency-path: |
tests/e2e-ibc/go.sum
go.sum
- name: run ibc e2e tests
run: make test-ibc
validate-internal-testnet-genesis:
runs-on: ubuntu-latest
steps:
@ -73,10 +44,16 @@ jobs:
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
- name: checkout version of kava that will be deployed if this pr is merged
- name: checkout repo from master
uses: actions/checkout@v4
with:
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
ref: master
- name: checkout version of kava that will be deployed if this pr is merged
run: |
git pull -p
git checkout $KAVA_VERSION
env:
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:

View File

@ -50,17 +50,6 @@ jobs:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (goleveldb)
uses: docker/build-push-action@v5
@ -100,17 +89,6 @@ jobs:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (rocksdb)
uses: docker/build-push-action@v5

View File

@ -7,25 +7,11 @@ jobs:
uses: ./.github/workflows/proto.yml
golangci-lint:
runs-on: ubuntu-latest
permissions:
checks: write # allow write access to checks to allow the action to annotate code in the PR.
steps:
- name: Checkout code
uses: actions/checkout@v4
with: { fetch-depth: 0 }
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: Load Version
id: load-version
run: |
GOLANGCI_VERSION=$(cat .golangci-version)
REV=$(git merge-base origin/master HEAD)
echo "GOLANGCI_VERSION=$GOLANGCI_VERSION" >> $GITHUB_ENV
echo "REV=$REV" >> $GITHUB_ENV
- uses: actions/checkout@v4
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
uses: reviewdog/action-golangci-lint@v2
with:
version: ${{ env.GOLANGCI_VERSION }}
args: -v -c .golangci.yml --new-from-rev ${{ env.REV }}
github_token: ${{ secrets.github_token }}
reporter: github-pr-review
golangci_lint_flags: --timeout 10m

View File

@ -29,7 +29,7 @@ jobs:
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
env:
ROCKSDB_VERSION: v8.10.0
ROCKSDB_VERSION: v8.1.1
- name: Build and upload release artifacts
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
env:
@ -45,9 +45,6 @@ jobs:
dockerhub-username: kavaops
extra-image-tag: master
secrets: inherit
rosetta:
uses: ./.github/workflows/ci-rosetta.yml
secrets: inherit
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes

View File

@ -1,27 +0,0 @@
# this workflow is responsible for ensuring quality titles are given to all PRs
# for PR checks to pass, the title must follow the Conventional Commits standard
# https://www.conventionalcommits.org/en/v1.0.0/
# this workflow was adapted from a similar workflow in https://github.com/cosmos/cosmos-sdk
name: "Lint PR Title"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
contents: read
jobs:
main:
permissions:
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
runs-on: ubuntu-latest
steps:
# https://github.com/marketplace/actions/semantic-pull-request
- uses: amannn/action-semantic-pull-request@v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -4,6 +4,9 @@ on:
tags:
- "v[0-9]+.[0-9]+.[0-9]+*"
jobs:
# run per commit ci checks against released version
lint-checks:
uses: ./.github/workflows/ci-lint.yml
# run default ci checks against released version
default-checks:
uses: ./.github/workflows/ci-default.yml
@ -11,7 +14,7 @@ jobs:
# get the version tag that triggered this workflow
get-version-tag:
# prep version release only if all checks pass
needs: default-checks
needs: [lint-checks, default-checks]
runs-on: ubuntu-latest
outputs:
git-tag: ${{ steps.git-tag.outputs.tag }}

View File

@ -1,7 +1,7 @@
name: Continuous Integration (Rocksdb Build)
env:
ROCKSDB_VERSION: v8.10.0
ROCKSDB_VERSION: v8.1.1
on:
workflow_call:
@ -19,3 +19,25 @@ jobs:
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
- name: build application
run: make build COSMOS_BUILD_OPTIONS=rocksdb
test:
runs-on: ubuntu-latest
steps:
- name: install RocksDB dependencies
run: sudo apt-get update
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
- name: install RocksDB as shared library
run: git clone https://github.com/facebook/rocksdb.git
&& cd rocksdb
&& git checkout $ROCKSDB_VERSION
&& sudo make -j$(nproc) install-shared
&& sudo ldconfig
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: run unit tests
run: make test-rocksdb

View File

@ -1,16 +0,0 @@
name: Dispatch run-rosetta-tests event to rosetta-kava
on:
workflow_call:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Dispatch run-rosetta-tests event to rosetta-kava
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.KAVA_PUBLIC_GITHUB_ACCESS_TOKEN }}
repository: Kava-Labs/rosetta-kava
event-type: run-rosetta-tests
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

View File

@ -12,11 +12,11 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
go-version: '1.20'
- name: Build
run: sudo LINK_STATICALLY=true make build-release
run: make build
- name: Rename file
run: sudo mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
run: mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
- name: Upload Release Asset
uses: softprops/action-gh-release@v2
with:

3
.gitignore vendored
View File

@ -31,9 +31,6 @@ out
# Ignore build cache dir
build/.cache
# Ignore make lint cache
build/.golangci-lint
# Ignore installed binaires
build/bin

View File

@ -1 +0,0 @@
v1.59

View File

@ -1,130 +0,0 @@
run:
timeout: 20m # set maximum time allowed for the linter to run. If the linting process exceeds this duration, it will be terminated
modules-download-mode: readonly # Ensures that modules are not modified during the linting process
allow-parallel-runners: true # enables parallel execution of linters to speed up linting process
linters:
disable-all: true
enable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- containedctx
- contextcheck
- decorder
- dogsled
# - dupl
# - dupword
- durationcheck
- errcheck
- errchkjson
- errname
- errorlint
# - exhaustive
- exportloopref
- funlen
- gci
- ginkgolinter
- gocheckcompilerdirectives
# - gochecknoglobals
# - gochecknoinits
- goconst
- gocritic
- godox
- gofmt
# - gofumpt
- goheader
- goimports
- mnd
# - gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- grouper
- importas
- ineffassign
# - interfacebloat
- lll
- loggercheck
- makezero
- mirror
- misspell
- musttag
# - nakedret
# - nestif
- nilerr
# - nilnil
# - noctx
- nolintlint
# - nonamedreturns
- nosprintfhostport
- prealloc
- predeclared
- promlinter
# - reassign
- revive
- rowserrcheck
- staticcheck
# - stylecheck
- tagalign
# - testpackage
# - thelper
# - tparallel
- typecheck
# - unconvert
- unparam
- unused
# - usestdlibvars
- wastedassign
# - whitespace
- wrapcheck
issues:
exclude-rules:
# Disable funlen for "func Test..." or func (suite *Suite) Test..." type functions
# These functions tend to be descriptive and exceed length limits.
- source: "^func (\\(.*\\) )?Test"
linters:
- funlen
linters-settings:
errcheck:
check-blank: true # check for assignments to the blank identifier '_' when errors are returned
check-type-assertions: false # check type assertion
errorlint:
check-generated: false # disabled linting of generated files
default-signifies-exhaustive: false # exhaustive handling of error types
exhaustive:
default-signifies-exhaustive: false # exhaustive handling of error types
gci:
sections: # defines the order of import sections
- standard
- default
- localmodule
goconst:
min-len: 3 # min length for string constants to be checked
min-occurrences: 3 # min occurrences of the same constant before it's flagged
godox:
keywords: # specific keywords to flag for further action
- BUG
- FIXME
- HACK
gosec:
exclude-generated: true
lll:
line-length: 120
misspell:
locale: US
ignore-words: expect
nolintlint:
allow-leading-space: false
require-explanation: true
require-specific: true
prealloc:
simple: true # enables simple preallocation checks
range-loops: true # enabled preallocation checks in range loops
for-loops: false # disables preallocation checks in for loops
unparam:
check-exported: true # checks exported functions and methods for unused params

View File

@ -1,16 +0,0 @@
# Generate EXPECT() methods, type-safe methods to generate call expectations
with-expecter: true
# Generate mocks in adjacent mocks directory to the interfaces
dir: "{{.InterfaceDir}}/mocks"
mockname: "Mock{{.InterfaceName}}"
outpkg: "mocks"
filename: "Mock{{.InterfaceName}}.go"
packages:
github.com/0glabs/0g-chain/x/precisebank/types:
# package-specific config
config:
interfaces:
AccountKeeper:
BankKeeper:

View File

@ -1,2 +1,2 @@
golang 1.21.9
nodejs 20.16.0
golang 1.20
nodejs 18.16.0

View File

@ -36,29 +36,6 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
## [v0.26.0]
### Features
- (precisebank) [#1906] Add new `x/precisebank` module with bank decimal extension for EVM usage.
- (cli) [#1922] Add `iavlviewer` CLI command for low-level iavl db debugging.
### Improvements
- (rocksdb) [#1903] Bump cometbft-db dependency for use with rocksdb v8.10.0
- (deps) [#1988] Bump cometbft to v0.37.9-kava.1
## [v0.26.0]
### Features
- (cli) [#1785] Add `shard` CLI command to support creating partitions of data for standalone nodes
- (cdp) [#1818] Add module param and logic for running x/cdp begin blocker every `n` blocks
- (cli) [#1804] Add `rocksdb compact` command for manual DB compaction of state or blockstore
- (cosmos-sdk) [#1811] [#1846] Upgrades app to cosmos-sdk v0.47.10 with iavl v1 support
- (validator-vesting) [#1832] Add grpc query service to replace removed legacy querier
- (incentive) [#1836] Update x/incentive cli to use grpc query client
- (ibc) [#1839] Add ibc packet forward middleware for ibc transfers
- (evmutil) [#1848] Update evm native conversion logic to handle bep3 assets
## [v0.25.0]
### Features
@ -66,12 +43,11 @@ Ref: https://keepachangelog.com/en/1.0.0/
- (community) [#1704] Add module params
- (community) [#1706] Add disable inflation upgrade
- (community) [#1745] Enable params update via governance with `MsgUpdateParams`
- (client) [#1784] Add Kava gRPC client
### Bug Fixes
- (ethermint) [#1788] Fixes issue where tracing a transaction could show it's status as successful when isolated in simulation even if the tx when executed on the chain failed due to an error such as exhausting the block gas meter
- (evmutil) [#1655] Initialize x/evmutil module account in InitGenesis
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
cosmos ledger app 2.34.12
## State Machine Breaking
@ -84,19 +60,9 @@ Ref: https://keepachangelog.com/en/1.0.0/
- (community) [#1755] Keep funds in `x/community` in `CommunityPoolLendWithdrawProposal` handler
- (staking) [#1761] Set validator minimum commission to 5% for all validators under 5%
## [v0.24.3]
### Bug Fixes
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
- (rocksdb) [#1767] Fix resolution of rocksdb database path introduced in v0.24.2
**Note**: There was a bug released as v0.24.2. The tag has been removed and the commit should not be used.
## [v0.24.1]
### Features
- (metrics) [#1668] Adds non-state breaking x/metrics module for custom telemetry.
- (metrics) [#1669] Add performance timing metrics to all Begin/EndBlockers
- (community) [#1751] Add `AnnualizedRewards` query endpoint
@ -340,19 +306,6 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
large-scale simulations remotely using aws-batch
[#1988]: https://github.com/Kava-Labs/kava/pull/1988
[#1922]: https://github.com/Kava-Labs/kava/pull/1922
[#1906]: https://github.com/Kava-Labs/kava/pull/1906
[#1903]: https://github.com/Kava-Labs/kava/pull/1903
[#1846]: https://github.com/Kava-Labs/kava/pull/1846
[#1848]: https://github.com/Kava-Labs/kava/pull/1848
[#1839]: https://github.com/Kava-Labs/kava/pull/1839
[#1836]: https://github.com/Kava-Labs/kava/pull/1836
[#1832]: https://github.com/Kava-Labs/kava/pull/1832
[#1811]: https://github.com/Kava-Labs/kava/pull/1811
[#1804]: https://github.com/Kava-Labs/kava/pull/1804
[#1785]: https://github.com/Kava-Labs/kava/pull/1785
[#1784]: https://github.com/Kava-Labs/kava/pull/1784
[#1770]: https://github.com/Kava-Labs/kava/pull/1770
[#1755]: https://github.com/Kava-Labs/kava/pull/1755
[#1761]: https://github.com/Kava-Labs/kava/pull/1761
@ -406,13 +359,14 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
[#750]: https://github.com/Kava-Labs/kava/pull/750
[#751]: https://github.com/Kava-Labs/kava/pull/751
[#780]: https://github.com/Kava-Labs/kava/pull/780
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.26.0...HEAD
[v0.26.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.26.0
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.25.0
[v0.24.3]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.24.1
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.24.0
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.23.2
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.25.0...HEAD
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.25.0
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.24.1
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.23.2...v0.24.0
[v0.23.2]: https://github.com/Kava-Labs/kava/compare/v0.23.1...v0.23.2
[v0.23.1]: https://github.com/Kava-Labs/kava/compare/v0.23.0...v0.23.1
[v0.23.0]: https://github.com/Kava-Labs/kava/compare/v0.21.1...v0.23.0
[v0.16.1]: https://github.com/Kava-Labs/kava/compare/v0.16.0...v0.16.1
[v0.16.0]: https://github.com/Kava-Labs/kava/compare/v0.15.2...v0.16.0

View File

@ -1,4 +1,4 @@
FROM golang:1.21-alpine AS build-env
FROM golang:1.20-alpine AS build-env
# Set up dependencies
# bash, jq, curl for debugging
@ -19,15 +19,6 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go version && go mod download
# Cosmwasm - Download correct libwasmvm version
RUN ARCH=$(uname -m) && WASMVM_VERSION=$(go list -m github.com/CosmWasm/wasmvm | sed 's/.* //') && \
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/libwasmvm_muslc.$ARCH.a \
-O /lib/libwasmvm.$ARCH.a && \
# verify checksum
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/checksums.txt -O /tmp/checksums.txt && \
sha256sum /lib/libwasmvm.$ARCH.a | grep $(cat /tmp/checksums.txt | grep libwasmvm_muslc.$ARCH | cut -d ' ' -f 1)
# Add source files
COPY . .
@ -36,7 +27,6 @@ COPY . .
# Mount go build and mod caches as container caches, persisted between builder invocations
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
LINK_STATICALLY=true \
make install
FROM alpine:3.15

View File

@ -1,42 +0,0 @@
FROM --platform=linux/amd64 ubuntu:24.04
# Install dependencies
RUN apt-get update && \
apt-get install -y \
git \
sudo \
wget \
jq \
make \
gcc \
unzip && \
rm -rf /var/lib/apt/lists/*
# Install Go
RUN wget https://golang.org/dl/go1.22.5.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go1.22.5.linux-amd64.tar.gz && \
rm go1.22.5.linux-amd64.tar.gz
# Set Go environment variables
ENV GOPATH=/root/go
ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
# Create Go workspace directory
RUN mkdir -p /root/go
WORKDIR /root
# https://docs.0g.ai/0g-doc/run-a-node/validator-node
RUN git clone -b v0.2.3 https://github.com/0glabs/0g-chain.git
RUN ./0g-chain/networks/testnet/install.sh
RUN 0gchaind config chain-id zgtendermint_16600-2
RUN 0gchaind init testnetnode --chain-id zgtendermint_16600-2
RUN rm ~/.0gchain/config/genesis.json
RUN wget -P ~/.0gchain/config https://github.com/0glabs/0g-chain/releases/download/v0.2.3/genesis.json
RUN 0gchaind validate-genesis
RUN sed -i 's|seeds = ""|seeds = "81987895a11f6689ada254c6b57932ab7ed909b6@54.241.167.190:26656,010fb4de28667725a4fef26cdc7f9452cc34b16d@54.176.175.48:26656,e9b4bc203197b62cc7e6a80a64742e752f4210d5@54.193.250.204:26656,68b9145889e7576b652ca68d985826abd46ad660@18.166.164.232:26656"|' $HOME/.0gchain/config/config.toml
ENTRYPOINT ["0gchaind", "start"]

View File

@ -1,6 +1,23 @@
FROM kava/rocksdb:v8.10.1-go1.21 AS kava-builder
FROM golang:1.20-bullseye AS chain-builder
RUN apt-get update
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.1.1
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig
WORKDIR /root/0gchain
# Copy dependency files first to facilitate dependency caching

View File

@ -1,22 +0,0 @@
# published to https://hub.docker.com/repository/docker/kava/rocksdb/tags
# docker buildx build --platform linux/amd64,linux/arm64 -t kava/rocksdb:v8.10.1-go1.21 -f Dockerfile-rocksdb-base . --push
FROM golang:1.21-bullseye
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.10.0
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig

View File

@ -6,8 +6,6 @@ BINARY_NAME := 0gchaind
MAIN_ENTRY := ./cmd/$(BINARY_NAME)
DOCKER_IMAGE_NAME := 0glabs/$(PROJECT_NAME)
GO_BIN ?= go
ARCH := $(shell uname -m)
WASMVM_VERSION := $(shell $(GO_BIN) list -m github.com/CosmWasm/wasmvm | sed 's/.* //')
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
GIT_COMMIT := $(shell git rev-parse HEAD)
@ -32,7 +30,7 @@ VERSION := $(GIT_COMMIT_SHORT)
VERSION_NUMBER := $(VERSION)
endif
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/cometbft/cometbft | sed 's:.* ::')
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/tendermint/tendermint | sed 's:.* ::')
COSMOS_SDK_VERSION := $(shell $(GO_BIN) list -m github.com/cosmos/cosmos-sdk | sed 's:.* ::')
.PHONY: print-git-info
@ -105,8 +103,6 @@ include $(BUILD_DIR)/deps.mk
include $(BUILD_DIR)/proto.mk
include $(BUILD_DIR)/proto-deps.mk
include $(BUILD_DIR)/lint.mk
#export GO111MODULE = on
# process build tags
build_tags = netgo
@ -153,7 +149,7 @@ ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=$(PROJECT_NAME) \
-X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION_NUMBER) \
-X github.com/cosmos/cosmos-sdk/version.Commit=$(GIT_COMMIT) \
-X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \
-X github.com/cometbft/cometbft/version.TMCoreSemVer=$(TENDERMINT_VERSION)
-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(TENDERMINT_VERSION)
# DB backend selection
ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
@ -178,10 +174,6 @@ endif
ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
ldflags += -w -s
endif
ifeq ($(LINK_STATICALLY),true)
ldflags += -linkmode=external -extldflags "-Wl,-z,muldefs -static -lm"
endif
ldflags += $(LDFLAGS)
ldflags := $(strip $(ldflags))
@ -203,21 +195,9 @@ else
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
endif
build-release: go.sum
wget -q https://github.com/CosmWasm/wasmvm/releases/download/$(WASMVM_VERSION)/libwasmvm_muslc.$(ARCH).a -O /lib/libwasmvm.$(ARCH).a
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
build-linux: go.sum
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
# build on rocksdb-backed kava on macOS with shared libs from brew
# this assumes you are on macOS & these deps have been installed with brew:
# rocksdb, snappy, lz4, and zstd
# use like `make build-rocksdb-brew COSMOS_BUILD_OPTIONS=rocksdb`
build-rocksdb-brew:
export CGO_CFLAGS := -I$(shell brew --prefix rocksdb)/include
export CGO_LDFLAGS := -L$(shell brew --prefix rocksdb)/lib -lrocksdb -lstdc++ -lm -lz -L$(shell brew --prefix snappy)/lib -L$(shell brew --prefix lz4)/lib -L$(shell brew --prefix zstd)/lib
install: go.sum
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) $(MAIN_ENTRY)
@ -244,6 +224,13 @@ link-check:
# TODO: replace kava in following line with project name
liche -r . --exclude "^http://127.*|^https://riot.im/app*|^http://kava-testnet*|^https://testnet-dex*|^https://kava3.data.kava.io*|^https://ipfs.io*|^https://apps.apple.com*|^https://kava.quicksync.io*"
lint:
golangci-lint run
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" | xargs gofmt -d -s
$(GO_BIN) mod verify
.PHONY: lint
format:
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs gofmt -w -s
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs misspell -w
@ -268,11 +255,11 @@ build-docker-local-0gchain:
# Run a 4-node testnet locally
localnet-start: build-linux localnet-stop
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z kava/kavanode testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
$(DOCKER) compose up -d
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z $(DOCKER_IMAGE_NAME)-node testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
docker-compose up -d
localnet-stop:
$(DOCKER) compose down
docker-compose down
# Launch a new single validator chain
start:
@ -314,14 +301,12 @@ test-basic: test
test-e2e: docker-build
$(GO_BIN) test -failfast -count=1 -v ./tests/e2e/...
# run interchaintest tests (./tests/e2e-ibc)
test-ibc: docker-build
cd tests/e2e-ibc && KAVA_TAG=local $(GO_BIN) test -timeout 10m .
.PHONY: test-ibc
test:
@$(GO_BIN) test $$($(GO_BIN) list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
test-rocksdb:
@go test -tags=rocksdb $(MAIN_ENTRY)/opendb
# Run cli integration tests
# `-p 4` to use 4 cores, `-tags cli_test` to tell $(GO_BIN) not to ignore the cli package
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.
@ -332,18 +317,6 @@ test-cli: build
test-migrate:
@$(GO_BIN) test -v -count=1 ./migrate/...
# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169
ifeq ($(OS_FAMILY),Darwin)
FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic
endif
test-fuzz:
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzMintCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzBurnCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzSendCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_NonZeroRemainder ./x/precisebank/types
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_ZeroRemainder ./x/precisebank/types
# Kick start lots of sims on an AWS cluster.
# This submits an AWS Batch job to run a lot of sims, each within a docker image. Results are uploaded to S3
start-remote-sims:
@ -354,14 +327,13 @@ start-remote-sims:
# submit an array job on AWS Batch, using 1000 seeds, spot instances
aws batch submit-job \
-—job-name "master-$(VERSION)" \
-—job-queue "simulation-1-queue-spot" \
-—job-queue “simulation-1-queue-spot" \
-—array-properties size=1000 \
-—job-definition $(BINARY_NAME)-sim-master \
-—container-override environment=[{SIM_NAME=master-$(VERSION)}]
update-kvtool:
git submodule init || true
git submodule update --remote
git submodule update
cd tests/e2e/kvtool && make install
.PHONY: all build-linux install build test test-cli test-all test-rest test-basic test-fuzz start-remote-sims
.PHONY: all build-linux install clean build test test-cli test-all test-rest test-basic start-remote-sims

View File

@ -17,13 +17,13 @@ Reference implementation of 0G Chain, the first modular AI chain. Built using th
<!---
## Mainnet
The current recommended version of the software for mainnet is [v0.26.2](https://github.com/Kava-Labs/kava/releases/tag/v0.26.2) The `master` branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
The current recommended version of the software for mainnet is [v0.25.0](https://github.com/Kava-Labs/kava/releases/tag/v0.25.0) The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
### Installation and Setup
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/nodes-and-validators/validator-node).
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/participate/validator-node).
```bash
git checkout v0.26.2
git checkout v0.25.0
make install
```
@ -49,7 +49,7 @@ If you have technical questions or concerns, ask a developer or community member
## Security
If you find a security issue, please report it to security [at] kavalabs.io. Depending on the verification and severity, a bug bounty may be available.
If you find a security issue, please report it to security [at] kava.io. Depending on the verification and severity, a bug bounty may be available.
## License

View File

@ -11,15 +11,15 @@ import (
"github.com/stretchr/testify/require"
dbm "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"cosmossdk.io/simapp"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/store"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
"github.com/cosmos/cosmos-sdk/x/auth"
@ -275,7 +275,7 @@ func TestAppStateDeterminism(t *testing.T) {
config.ExportParamsPath = ""
config.OnOperation = false
config.AllInvariants = false
config.ChainID = sims.SimAppChainID
config.ChainID = helpers.SimAppChainID
numTimesToRunPerSeed := 2
appHashList := make([]json.RawMessage, numTimesToRunPerSeed)

View File

@ -11,10 +11,6 @@ import (
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
abci "github.com/cometbft/cometbft/abci/types"
tmbytes "github.com/cometbft/cometbft/libs/bytes"
ctypes "github.com/cometbft/cometbft/rpc/core/types"
jsonrpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types"
"github.com/cosmos/cosmos-sdk/client/context"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/rest"
@ -23,6 +19,10 @@ import (
"github.com/gorilla/mux"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
jsonrpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)
type SimulateRequestTestSuite struct {

View File

@ -5,16 +5,16 @@ import (
"runtime/debug"
errorsmod "cosmossdk.io/errors"
tmlog "github.com/cometbft/cometbft/libs/log"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
authante "github.com/cosmos/cosmos-sdk/x/auth/ante"
authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
ibcante "github.com/cosmos/ibc-go/v7/modules/core/ante"
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
ibcante "github.com/cosmos/ibc-go/v6/modules/core/ante"
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
evmante "github.com/evmos/ethermint/app/ante"
evmtypes "github.com/evmos/ethermint/x/evm/types"
tmlog "github.com/tendermint/tendermint/libs/log"
)
// HandlerOptions extend the SDK's AnteHandler options by requiring the IBC

View File

@ -7,13 +7,9 @@ import (
"time"
sdkmath "cosmossdk.io/math"
tmdb "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/testutil/sims"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
@ -21,6 +17,9 @@ import (
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmdb "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
@ -58,11 +57,10 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
nil,
encodingConfig,
opts,
baseapp.SetChainID(app.TestChainId),
),
}
chainID := app.TestChainId
chainID := "kavatest_1-1"
tApp = tApp.InitializeFromGenesisStatesWithTimeAndChainID(
time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
chainID,
@ -109,7 +107,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
stdTx, err := sims.GenSignedMockTx(
stdTx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
encodingConfig.TxConfig,
[]sdk.Msg{
@ -120,7 +118,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
chainID,
[]uint64{0},
[]uint64{0}, // fixed sequence numbers will cause tests to fail sig verification if the same address is used twice
@ -212,7 +210,7 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
return msg
}
chainID := app.TestChainId
chainID := "kavatest_1-1"
encodingConfig := app.MakeEncodingConfig()
testcases := []struct {
@ -241,12 +239,12 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
chainID,
)
stdTx, err := sims.GenSignedMockTx(
stdTx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
encodingConfig.TxConfig,
[]sdk.Msg{tc.msg},
sdk.NewCoins(), // no fee
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
chainID,
[]uint64{0},
[]uint64{0},

View File

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/cosmos/cosmos-sdk/testutil/sims"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/stretchr/testify/require"
@ -39,7 +39,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
fetcher := mockAddressFetcher(testAddresses[1:]...)
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := sims.GenSignedMockTx(
tx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -50,7 +50,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},
@ -74,7 +74,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := sims.GenSignedMockTx(
tx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -90,7 +90,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0, 123},
[]uint64{0, 123},
@ -115,7 +115,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := sims.GenSignedMockTx(
tx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -126,7 +126,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/cosmos/cosmos-sdk/testutil/sims"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/x/authz"
@ -213,12 +213,12 @@ func TestAuthzLimiterDecorator(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tx, err := sims.GenSignedMockTx(
tx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
tc.msgs,
sdk.NewCoins(),
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/client"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/testutil/sims"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
@ -20,11 +20,6 @@ import (
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/crypto/tmhash"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmversion "github.com/cometbft/cometbft/proto/tendermint/version"
"github.com/cometbft/cometbft/version"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/ethereum/eip712"
"github.com/evmos/ethermint/tests"
@ -32,6 +27,11 @@ import (
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
"github.com/tendermint/tendermint/version"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
@ -42,7 +42,7 @@ import (
)
const (
ChainID = app.TestChainId
ChainID = "kavatest_1-1"
USDCCoinDenom = "erc20/usdc"
USDCCDPType = "erc20-usdc"
)
@ -137,7 +137,6 @@ func (suite *EIP712TestSuite) createTestEIP712CosmosTxBuilder(
func (suite *EIP712TestSuite) SetupTest() {
tApp := app.NewTestApp()
suite.tApp = tApp
cdc := tApp.AppCodec()
suite.evmutilKeeper = tApp.GetEvmutilKeeper()
@ -291,11 +290,6 @@ func (suite *EIP712TestSuite) SetupTest() {
)
suite.usdcEVMAddr = pair.GetAddress()
// update consensus params
cParams := tApp.GetConsensusParams(suite.ctx)
cParams.Block.MaxGas = sims.DefaultGenTxGas * 20
tApp.StoreConsensusParams(suite.ctx, cParams)
// Add a contract to evmutil conversion pair
evmutilParams := suite.evmutilKeeper.GetParams(suite.ctx)
evmutilParams.EnabledConversionPairs =
@ -405,7 +399,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
// usdxToMintAmt: 99,
// },
{
name: "fails when conversion more erc20 usdc than balance",
name: "fails when convertion more erc20 usdc than balance",
usdcDepositAmt: 51_000,
usdxToMintAmt: 100,
errMsg: "transfer amount exceeds balance",
@ -461,7 +455,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
var option *codectypes.Any
option, _ = codectypes.NewAnyWithValue(&etherminttypes.ExtensionOptionsWeb3Tx{
FeePayer: suite.testAddr.String(),
TypedDataChainID: 2221,
TypedDataChainID: 1,
FeePayerSig: []byte("sig"),
})
builder, _ := txBuilder.(authtx.ExtensionOptionsTxBuilder)
@ -490,7 +484,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
return suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
)
},
},
@ -503,7 +497,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
return suite.createTestEIP712CosmosTxBuilder(
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
)
},
},
@ -531,7 +525,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
txBuilder := suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
)
if tc.updateTx != nil {
txBuilder = tc.updateTx(txBuilder, msgs)
@ -605,7 +599,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
// deliver deposit msg
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
txBuilder := suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, depositMsgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, depositMsgs,
)
txBytes, err := encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
suite.Require().NoError(err)
@ -639,7 +633,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
// deliver withdraw msg
txBuilder = suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
)
txBytes, err = encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
suite.Require().NoError(err)

View File

@ -4,12 +4,12 @@ import (
"strings"
"testing"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmtime "github.com/cometbft/cometbft/types/time"
sdk "github.com/cosmos/cosmos-sdk/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/ante"

View File

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/testutil/sims"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
sdk "github.com/cosmos/cosmos-sdk/types"
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
@ -73,14 +73,14 @@ func TestVestingMempoolDecorator_MsgCreateVestingAccount_Unauthorized(t *testing
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tx, err := sims.GenSignedMockTx(
tx, err := helpers.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
tt.msg,
},
sdk.NewCoins(),
sims.DefaultGenTxGas,
helpers.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -5,10 +5,6 @@ import (
"io"
"net/http"
dbm "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
tmjson "github.com/cometbft/cometbft/libs/json"
tmlog "github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
@ -24,7 +20,6 @@ import (
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/cosmos-sdk/x/auth"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
"github.com/cosmos/cosmos-sdk/x/auth/vesting"
@ -39,13 +34,11 @@ import (
"github.com/cosmos/cosmos-sdk/x/capability"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
consensus "github.com/cosmos/cosmos-sdk/x/consensus"
consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
"github.com/cosmos/cosmos-sdk/x/crisis"
crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
distr "github.com/cosmos/cosmos-sdk/x/distribution"
distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client"
distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
"github.com/cosmos/cosmos-sdk/x/evidence"
@ -77,25 +70,16 @@ import (
upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client"
upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
"github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward"
packetforwardkeeper "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/keeper"
packetforwardtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/types"
ibcwasm "github.com/cosmos/ibc-go/modules/light-clients/08-wasm"
ibcwasmkeeper "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/keeper"
ibcwasmtypes "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/types"
transfer "github.com/cosmos/ibc-go/v7/modules/apps/transfer"
ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper"
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
ibc "github.com/cosmos/ibc-go/v7/modules/core"
ibcclient "github.com/cosmos/ibc-go/v7/modules/core/02-client"
ibcclientclient "github.com/cosmos/ibc-go/v7/modules/core/02-client/client"
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
ibcporttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types"
ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported"
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
"github.com/ethereum/go-ethereum/core/vm"
transfer "github.com/cosmos/ibc-go/v6/modules/apps/transfer"
ibctransferkeeper "github.com/cosmos/ibc-go/v6/modules/apps/transfer/keeper"
ibctransfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
ibc "github.com/cosmos/ibc-go/v6/modules/core"
ibcclient "github.com/cosmos/ibc-go/v6/modules/core/02-client"
ibcclientclient "github.com/cosmos/ibc-go/v6/modules/core/02-client/client"
ibcclienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
porttypes "github.com/cosmos/ibc-go/v6/modules/core/05-port/types"
ibchost "github.com/cosmos/ibc-go/v6/modules/core/24-host"
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
evmante "github.com/evmos/ethermint/app/ante"
ethermintconfig "github.com/evmos/ethermint/server/config"
"github.com/evmos/ethermint/x/evm"
@ -106,6 +90,11 @@ import (
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/gorilla/mux"
abci "github.com/tendermint/tendermint/abci/types"
tmjson "github.com/tendermint/tendermint/libs/json"
tmlog "github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app/ante"
chainparams "github.com/0glabs/0g-chain/app/params"
"github.com/0glabs/0g-chain/chaincfg"
@ -130,9 +119,6 @@ import (
issuance "github.com/0glabs/0g-chain/x/issuance"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
"github.com/0glabs/0g-chain/x/precisebank"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeed "github.com/0glabs/0g-chain/x/pricefeed"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
@ -140,13 +126,14 @@ import (
validatorvestingrest "github.com/0glabs/0g-chain/x/validator-vesting/client/rest"
validatorvestingtypes "github.com/0glabs/0g-chain/x/validator-vesting/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
)
var (
// ModuleBasics manages simple versions of full app modules.
// It's used for things such as codec registration and genesis file verification.
ModuleBasics = module.NewBasicManager(
genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
genutil.AppModuleBasic{},
auth.AppModuleBasic{},
bank.AppModuleBasic{},
capability.AppModuleBasic{},
@ -154,6 +141,7 @@ var (
distr.AppModuleBasic{},
gov.NewAppModuleBasic([]govclient.ProposalHandler{
paramsclient.ProposalHandler,
distrclient.ProposalHandler,
upgradeclient.LegacyProposalHandler,
upgradeclient.LegacyCancelProposalHandler,
ibcclientclient.UpdateClientProposalHandler,
@ -164,9 +152,6 @@ var (
crisis.AppModuleBasic{},
slashing.AppModuleBasic{},
ibc.AppModuleBasic{},
ibctm.AppModuleBasic{},
solomachine.AppModuleBasic{},
packetforward.AppModuleBasic{},
upgrade.AppModuleBasic{},
evidence.AppModuleBasic{},
authzmodule.AppModuleBasic{},
@ -181,11 +166,8 @@ var (
validatorvesting.AppModuleBasic{},
evmutil.AppModuleBasic{},
mint.AppModuleBasic{},
precisebank.AppModuleBasic{},
council.AppModuleBasic{},
dasigners.AppModuleBasic{},
consensus.AppModuleBasic{},
ibcwasm.AppModuleBasic{},
)
// module account permissions
@ -203,13 +185,13 @@ var (
issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner},
bep3types.ModuleName: {authtypes.Burner, authtypes.Minter},
minttypes.ModuleName: {authtypes.Minter},
precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, // used for reserve account to back fractional amounts
}
)
// Verify app interface at compile time
var (
_ servertypes.Application = (*App)(nil)
_ servertypes.Application = (*App)(nil)
_ servertypes.ApplicationQueryService = (*App)(nil)
)
// Options bundles several configuration params for an App.
@ -245,35 +227,31 @@ type App struct {
memKeys map[string]*storetypes.MemoryStoreKey
// keepers from all the modules
accountKeeper authkeeper.AccountKeeper
bankKeeper bankkeeper.Keeper
capabilityKeeper *capabilitykeeper.Keeper
stakingKeeper *stakingkeeper.Keeper
distrKeeper distrkeeper.Keeper
govKeeper govkeeper.Keeper
paramsKeeper paramskeeper.Keeper
authzKeeper authzkeeper.Keeper
crisisKeeper crisiskeeper.Keeper
slashingKeeper slashingkeeper.Keeper
ibcWasmClientKeeper ibcwasmkeeper.Keeper
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
packetForwardKeeper *packetforwardkeeper.Keeper
evmKeeper *evmkeeper.Keeper
evmutilKeeper evmutilkeeper.Keeper
feeMarketKeeper feemarketkeeper.Keeper
upgradeKeeper upgradekeeper.Keeper
evidenceKeeper evidencekeeper.Keeper
transferKeeper ibctransferkeeper.Keeper
CouncilKeeper councilkeeper.Keeper
issuanceKeeper issuancekeeper.Keeper
bep3Keeper bep3keeper.Keeper
pricefeedKeeper pricefeedkeeper.Keeper
committeeKeeper committeekeeper.Keeper
vestingKeeper vestingkeeper.VestingKeeper
mintKeeper mintkeeper.Keeper
dasignersKeeper dasignerskeeper.Keeper
consensusParamsKeeper consensusparamkeeper.Keeper
precisebankKeeper precisebankkeeper.Keeper
accountKeeper authkeeper.AccountKeeper
bankKeeper bankkeeper.Keeper
capabilityKeeper *capabilitykeeper.Keeper
stakingKeeper stakingkeeper.Keeper
distrKeeper distrkeeper.Keeper
govKeeper govkeeper.Keeper
paramsKeeper paramskeeper.Keeper
authzKeeper authzkeeper.Keeper
crisisKeeper crisiskeeper.Keeper
slashingKeeper slashingkeeper.Keeper
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
evmKeeper *evmkeeper.Keeper
evmutilKeeper evmutilkeeper.Keeper
feeMarketKeeper feemarketkeeper.Keeper
upgradeKeeper upgradekeeper.Keeper
evidenceKeeper evidencekeeper.Keeper
transferKeeper ibctransferkeeper.Keeper
CouncilKeeper councilkeeper.Keeper
issuanceKeeper issuancekeeper.Keeper
bep3Keeper bep3keeper.Keeper
pricefeedKeeper pricefeedkeeper.Keeper
committeeKeeper committeekeeper.Keeper
vestingKeeper vestingkeeper.VestingKeeper
mintKeeper mintkeeper.Keeper
dasignersKeeper dasignerskeeper.Keeper
// make scoped keepers public for test purposes
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
@ -313,8 +291,8 @@ func NewApp(
keys := sdk.NewKVStoreKeys(
authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
distrtypes.StoreKey, slashingtypes.StoreKey, packetforwardtypes.StoreKey,
govtypes.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey,
distrtypes.StoreKey, slashingtypes.StoreKey,
govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey,
upgradetypes.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey,
evmtypes.StoreKey, feemarkettypes.StoreKey, authzkeeper.StoreKey,
capabilitytypes.StoreKey,
@ -324,15 +302,12 @@ func NewApp(
counciltypes.StoreKey,
dasignerstypes.StoreKey,
vestingtypes.StoreKey,
consensusparamtypes.StoreKey, crisistypes.StoreKey, precisebanktypes.StoreKey,
ibcwasmtypes.StoreKey,
)
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
// Authority for gov proposals, using the x/gov module account address
govAuthAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
govAuthAddrStr := govAuthAddr.String()
govAuthorityAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
app := &App{
BaseApp: bApp,
@ -361,20 +336,18 @@ func NewApp(
issuanceSubspace := app.paramsKeeper.Subspace(issuancetypes.ModuleName)
bep3Subspace := app.paramsKeeper.Subspace(bep3types.ModuleName)
pricefeedSubspace := app.paramsKeeper.Subspace(pricefeedtypes.ModuleName)
ibcSubspace := app.paramsKeeper.Subspace(ibcexported.ModuleName)
ibcSubspace := app.paramsKeeper.Subspace(ibchost.ModuleName)
ibctransferSubspace := app.paramsKeeper.Subspace(ibctransfertypes.ModuleName)
packetforwardSubspace := app.paramsKeeper.Subspace(packetforwardtypes.ModuleName).WithKeyTable(packetforwardtypes.ParamKeyTable())
feemarketSubspace := app.paramsKeeper.Subspace(feemarkettypes.ModuleName)
evmSubspace := app.paramsKeeper.Subspace(evmtypes.ModuleName)
evmutilSubspace := app.paramsKeeper.Subspace(evmutiltypes.ModuleName)
mintSubspace := app.paramsKeeper.Subspace(minttypes.ModuleName)
// set the BaseApp's parameter store
app.consensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, keys[consensusparamtypes.StoreKey], govAuthAddrStr)
bApp.SetParamStore(&app.consensusParamsKeeper)
bApp.SetParamStore(
app.paramsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()),
)
app.capabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey])
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibcexported.ModuleName)
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibchost.ModuleName)
scopedTransferKeeper := app.capabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName)
app.capabilityKeeper.Seal()
@ -382,17 +355,17 @@ func NewApp(
app.accountKeeper = authkeeper.NewAccountKeeper(
appCodec,
keys[authtypes.StoreKey],
authSubspace,
authtypes.ProtoBaseAccount,
mAccPerms,
sdk.GetConfig().GetBech32AccountAddrPrefix(),
govAuthAddrStr,
)
app.bankKeeper = bankkeeper.NewBaseKeeper(
appCodec,
keys[banktypes.StoreKey],
app.accountKeeper,
bankSubspace,
app.loadBlockedMaccAddrs(),
govAuthAddrStr,
)
app.vestingKeeper = vestingkeeper.NewVestingKeeper(app.accountKeeper, app.bankKeeper, keys[vestingtypes.StoreKey])
@ -402,7 +375,7 @@ func NewApp(
app.accountKeeper,
app.bankKeeper,
app.vestingKeeper,
govAuthAddrStr,
stakingSubspace,
)
app.authzKeeper = authzkeeper.NewKeeper(
keys[authzkeeper.StoreKey],
@ -413,68 +386,52 @@ func NewApp(
app.distrKeeper = distrkeeper.NewKeeper(
appCodec,
keys[distrtypes.StoreKey],
distrSubspace,
app.accountKeeper,
app.bankKeeper,
app.stakingKeeper,
&app.stakingKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
app.slashingKeeper = slashingkeeper.NewKeeper(
appCodec,
app.legacyAmino,
keys[slashingtypes.StoreKey],
app.stakingKeeper,
govAuthAddrStr,
&app.stakingKeeper,
slashingSubspace,
)
app.crisisKeeper = *crisiskeeper.NewKeeper(
appCodec,
keys[crisistypes.StoreKey],
app.crisisKeeper = crisiskeeper.NewKeeper(
crisisSubspace,
options.InvariantCheckPeriod,
app.bankKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
app.upgradeKeeper = *upgradekeeper.NewKeeper(
app.upgradeKeeper = upgradekeeper.NewKeeper(
options.SkipUpgradeHeights,
keys[upgradetypes.StoreKey],
appCodec,
homePath,
app.BaseApp,
govAuthAddrStr,
govAuthorityAddr.String(),
)
app.evidenceKeeper = *evidencekeeper.NewKeeper(
appCodec,
keys[evidencetypes.StoreKey],
app.stakingKeeper,
&app.stakingKeeper,
app.slashingKeeper,
)
app.ibcKeeper = ibckeeper.NewKeeper(
appCodec,
keys[ibcexported.StoreKey],
keys[ibchost.StoreKey],
ibcSubspace,
app.stakingKeeper,
app.upgradeKeeper,
scopedIBCKeeper,
)
app.ibcWasmClientKeeper = ibcwasmkeeper.NewKeeperWithConfig(
appCodec,
keys[ibcwasmtypes.StoreKey],
app.ibcKeeper.ClientKeeper,
authtypes.NewModuleAddress(govtypes.ModuleName).String(),
ibcwasmtypes.WasmConfig{
DataDir: "ibc_08-wasm",
SupportedCapabilities: "iterator,stargate",
ContractDebugMode: false,
},
app.GRPCQueryRouter(),
)
// Create Ethermint keepers
app.feeMarketKeeper = feemarketkeeper.NewKeeper(
appCodec,
govAuthAddr,
govAuthorityAddr,
keys[feemarkettypes.StoreKey],
tkeys[feemarkettypes.TransientKey],
feemarketSubspace,
@ -488,15 +445,9 @@ func NewApp(
app.accountKeeper,
)
app.precisebankKeeper = precisebankkeeper.NewKeeper(
app.appCodec,
keys[precisebanktypes.StoreKey],
app.bankKeeper,
app.accountKeeper,
)
evmBankKeeper := evmutilkeeper.NewEvmBankKeeper(app.evmutilKeeper, app.bankKeeper, app.accountKeeper)
// dasigners keeper
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper, govAuthAddrStr)
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper)
// precopmiles
precompiles := make(map[common.Address]vm.PrecompiledContract)
daSignersPrecompile, err := dasignersprecompile.NewDASignersPrecompile(app.dasignersKeeper)
@ -504,63 +455,35 @@ func NewApp(
panic("initialize precompile failed")
}
precompiles[daSignersPrecompile.Address()] = daSignersPrecompile
// evm keeper
app.evmKeeper = evmkeeper.NewKeeper(
appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey],
govAuthAddr,
app.accountKeeper,
app.precisebankKeeper, // x/precisebank in place of x/bank
app.stakingKeeper,
app.feeMarketKeeper,
govAuthorityAddr,
app.accountKeeper, evmBankKeeper, app.stakingKeeper, app.feeMarketKeeper,
options.EVMTrace,
evmSubspace,
precompiles,
)
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper
app.packetForwardKeeper = packetforwardkeeper.NewKeeper(
appCodec,
keys[packetforwardtypes.StoreKey],
nil, // will be zero-value here, reference is set later on with SetTransferKeeper.
app.ibcKeeper.ChannelKeeper,
app.distrKeeper,
app.bankKeeper,
app.ibcKeeper.ChannelKeeper,
govAuthAddrStr,
)
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
app.transferKeeper = ibctransferkeeper.NewKeeper(
appCodec,
keys[ibctransfertypes.StoreKey],
ibctransferSubspace,
app.packetForwardKeeper,
app.ibcKeeper.ChannelKeeper,
app.ibcKeeper.ChannelKeeper,
&app.ibcKeeper.PortKeeper,
app.accountKeeper,
app.bankKeeper,
scopedTransferKeeper,
)
app.packetForwardKeeper.SetTransferKeeper(app.transferKeeper)
transferModule := transfer.NewAppModule(app.transferKeeper)
// allow ibc packet forwarding for ibc transfers.
// transfer stack contains (from top to bottom):
// - Packet Forward Middleware
// - Transfer
var transferStack ibcporttypes.IBCModule
transferStack = transfer.NewIBCModule(app.transferKeeper)
transferStack = packetforward.NewIBCMiddleware(
transferStack,
app.packetForwardKeeper,
0, // retries on timeout
packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp,
packetforwardkeeper.DefaultRefundTransferPacketTimeoutTimestamp,
)
transferIBCModule := transfer.NewIBCModule(app.transferKeeper)
// Create static IBC router, add transfer route, then set and seal it
ibcRouter := ibcporttypes.NewRouter()
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
ibcRouter := porttypes.NewRouter()
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferIBCModule)
app.ibcKeeper.SetRouter(ibcRouter)
app.issuanceKeeper = issuancekeeper.NewKeeper(
@ -587,11 +510,11 @@ func NewApp(
app.mintKeeper = mintkeeper.NewKeeper(
appCodec,
keys[minttypes.StoreKey],
mintSubspace,
app.stakingKeeper,
app.accountKeeper,
app.bankKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
// create committee keeper with router
@ -599,7 +522,8 @@ func NewApp(
committeeGovRouter.
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper))
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper))
// Note: the committee proposal handler is not registered on the committee router. This means committees cannot create or update other committees.
// Adding the committee proposal handler to the router is possible but awkward as the handler depends on the keeper which depends on the handler.
app.committeeKeeper = committeekeeper.NewKeeper(
@ -612,11 +536,12 @@ func NewApp(
)
// register the staking hooks
app.stakingKeeper.SetHooks(
// NOTE: These keepers are passed by reference above, so they will contain these hooks.
app.stakingKeeper = *(app.stakingKeeper.SetHooks(
stakingtypes.NewMultiStakingHooks(
app.distrKeeper.Hooks(),
app.slashingKeeper.Hooks(),
))
)))
// create gov keeper with router
// NOTE this must be done after any keepers referenced in the gov router (ie committee) are defined
@ -624,27 +549,27 @@ func NewApp(
govRouter.
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)).
AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.ibcKeeper.ClientKeeper)).
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
AddRoute(committeetypes.RouterKey, committee.NewProposalHandler(app.committeeKeeper))
govConfig := govtypes.DefaultConfig()
govKeeper := govkeeper.NewKeeper(
app.govKeeper = govkeeper.NewKeeper(
appCodec,
keys[govtypes.StoreKey],
govSubspace,
app.accountKeeper,
app.bankKeeper,
app.stakingKeeper,
&app.stakingKeeper,
govRouter,
app.MsgServiceRouter(),
govConfig,
govAuthAddrStr,
)
govKeeper.SetLegacyRouter(govRouter)
app.govKeeper = *govKeeper
// override x/gov tally handler with custom implementation
tallyHandler := NewTallyHandler(
app.govKeeper, *app.stakingKeeper, app.bankKeeper,
app.govKeeper, app.stakingKeeper, app.bankKeeper,
)
app.govKeeper.SetTallyHandler(tallyHandler)
@ -656,24 +581,22 @@ func NewApp(
// must be passed by reference here.)
app.mm = module.NewManager(
genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx, encodingConfig.TxConfig),
auth.NewAppModule(appCodec, app.accountKeeper, authsims.RandomGenesisAccounts, authSubspace),
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper, bankSubspace),
capability.NewAppModule(appCodec, *app.capabilityKeeper, false), // todo: confirm if this is okay to not be sealed
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper, stakingSubspace),
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, distrSubspace),
gov.NewAppModule(appCodec, &app.govKeeper, app.accountKeeper, app.bankKeeper, govSubspace),
auth.NewAppModule(appCodec, app.accountKeeper, nil),
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper),
capability.NewAppModule(appCodec, *app.capabilityKeeper),
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper),
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
gov.NewAppModule(appCodec, app.govKeeper, app.accountKeeper, app.bankKeeper),
params.NewAppModule(app.paramsKeeper),
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants, crisisSubspace),
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, slashingSubspace),
consensus.NewAppModule(appCodec, app.consensusParamsKeeper),
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants),
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
ibc.NewAppModule(app.ibcKeeper),
packetforward.NewAppModule(app.packetForwardKeeper, packetforwardSubspace),
evm.NewAppModule(app.evmKeeper, app.accountKeeper),
feemarket.NewAppModule(app.feeMarketKeeper, feemarketSubspace),
upgrade.NewAppModule(&app.upgradeKeeper),
upgrade.NewAppModule(app.upgradeKeeper),
evidence.NewAppModule(app.evidenceKeeper),
transferModule,
vesting.NewAppModule(app.accountKeeper, app.bankKeeper, app.vestingKeeper),
vesting.NewAppModule(app.accountKeeper, app.vestingKeeper),
authzmodule.NewAppModule(appCodec, app.authzKeeper, app.accountKeeper, app.bankKeeper, app.interfaceRegistry),
issuance.NewAppModule(app.issuanceKeeper, app.accountKeeper, app.bankKeeper),
bep3.NewAppModule(app.bep3Keeper, app.accountKeeper, app.bankKeeper),
@ -682,11 +605,9 @@ func NewApp(
committee.NewAppModule(app.committeeKeeper, app.accountKeeper),
evmutil.NewAppModule(app.evmutilKeeper, app.bankKeeper, app.accountKeeper),
// nil InflationCalculationFn, use SDK's default inflation function
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, nil, mintSubspace),
precisebank.NewAppModule(app.precisebankKeeper, app.bankKeeper, app.accountKeeper),
council.NewAppModule(app.CouncilKeeper),
ibcwasm.NewAppModule(app.ibcWasmClientKeeper),
dasigners.NewAppModule(app.dasignersKeeper, *app.stakingKeeper),
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, chaincfg.NextInflationRate),
council.NewAppModule(app.CouncilKeeper, app.stakingKeeper),
dasigners.NewAppModule(app.dasignersKeeper, app.stakingKeeper),
)
// Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list.
@ -714,7 +635,7 @@ func NewApp(
// It should be run before cdp begin blocker which cancels out debt with stable and starts more auctions.
bep3types.ModuleName,
issuancetypes.ModuleName,
ibcexported.ModuleName,
ibchost.ModuleName,
// Add all remaining modules with an empty begin blocker below since cosmos 0.45.0 requires it
vestingtypes.ModuleName,
pricefeedtypes.ModuleName,
@ -728,11 +649,8 @@ func NewApp(
paramstypes.ModuleName,
authz.ModuleName,
evmutiltypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -755,7 +673,7 @@ func NewApp(
upgradetypes.ModuleName,
evidencetypes.ModuleName,
vestingtypes.ModuleName,
ibcexported.ModuleName,
ibchost.ModuleName,
validatorvestingtypes.ModuleName,
authtypes.ModuleName,
banktypes.ModuleName,
@ -766,10 +684,6 @@ func NewApp(
evmutiltypes.ModuleName,
minttypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -783,7 +697,7 @@ func NewApp(
slashingtypes.ModuleName, // iterates over validators, run after staking
govtypes.ModuleName,
minttypes.ModuleName,
ibcexported.ModuleName,
ibchost.ModuleName,
evidencetypes.ModuleName,
authz.ModuleName,
ibctransfertypes.ModuleName,
@ -795,21 +709,18 @@ func NewApp(
committeetypes.ModuleName,
evmutiltypes.ModuleName,
genutiltypes.ModuleName, // runs arbitrary txs included in genisis state, so run after modules have been initialized
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
// Add all remaining modules with an empty InitGenesis below since cosmos 0.45.0 requires it
vestingtypes.ModuleName,
paramstypes.ModuleName,
upgradetypes.ModuleName,
validatorvestingtypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName, // Must be run after x/bank to verify reserve balance
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
app.mm.RegisterInvariants(&app.crisisKeeper)
app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino)
app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
app.RegisterServices(app.configurator)
@ -881,15 +792,6 @@ func NewApp(
}
}
if manager := app.SnapshotManager(); manager != nil {
err := manager.RegisterExtensions(
ibcwasmkeeper.NewWasmSnapshotter(app.CommitMultiStore(), &app.ibcWasmClientKeeper),
)
if err != nil {
panic(fmt.Errorf("failed to register snapshot extension: %s", err))
}
}
app.ScopedIBCKeeper = scopedIBCKeeper
app.ScopedTransferKeeper = scopedTransferKeeper

View File

@ -3,26 +3,22 @@ package app
import (
"encoding/json"
"fmt"
"github.com/0glabs/0g-chain/chaincfg"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmtypes "github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
"os"
"sort"
"testing"
"time"
"github.com/0glabs/0g-chain/chaincfg"
db "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewApp(t *testing.T) {
@ -40,7 +36,7 @@ func TestNewApp(t *testing.T) {
func TestExport(t *testing.T) {
chaincfg.SetSDKConfig()
db := db.NewMemDB()
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions, baseapp.SetChainID(TestChainId))
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions)
genesisState := GenesisStateWithSingleValidator(&TestApp{App: *app}, NewDefaultGenesisState())
@ -49,23 +45,21 @@ func TestExport(t *testing.T) {
initRequest := abci.RequestInitChain{
Time: time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
ChainId: TestChainId,
ChainId: "kavatest_1-1",
InitialHeight: 1,
ConsensusParams: sims.DefaultConsensusParams,
ConsensusParams: tmtypes.TM2PB.ConsensusParams(tmtypes.DefaultConsensusParams()),
Validators: nil,
AppStateBytes: stateBytes,
}
app.InitChain(initRequest)
app.Commit()
exportedApp, err := app.ExportAppStateAndValidators(false, []string{}, []string{})
exportedApp, err := app.ExportAppStateAndValidators(false, []string{})
require.NoError(t, err)
// Assume each module is exported correctly, so only check modules in genesis are present in export
initialModules, err := unmarshalJSONKeys(initRequest.AppStateBytes)
require.NoError(t, err)
// note ibctm is only registered in the BasicManager and not module manager so can be ignored
initialModules = removeIbcTmModule(initialModules)
exportedModules, err := unmarshalJSONKeys(exportedApp.AppState)
require.NoError(t, err)
assert.ElementsMatch(t, initialModules, exportedModules)
@ -149,13 +143,3 @@ func unmarshalJSONKeys(jsonBytes []byte) ([]string, error) {
return keys, nil
}
func removeIbcTmModule(modules []string) []string {
var result []string
for _, str := range modules {
if str != ibctm.ModuleName && str != solomachine.ModuleName {
result = append(result, str)
}
}
return result
}

View File

@ -4,7 +4,7 @@ import (
"encoding/json"
"log"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
sdk "github.com/cosmos/cosmos-sdk/types"
@ -14,7 +14,7 @@ import (
)
// ExportAppStateAndValidators export the state of the app for a genesis file
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string, modulesToExport []string,
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string,
) (servertypes.ExportedApp, error) {
// as if they could withdraw from the start of the next block
// block time is not available and defaults to Jan 1st, 0001
@ -26,7 +26,7 @@ func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []
app.prepForZeroHeightGenesis(ctx, jailWhiteList)
}
genState := app.mm.ExportGenesisForModules(ctx, app.appCodec, modulesToExport)
genState := app.mm.ExportGenesis(ctx, app.appCodec)
newAppState, err := json.MarshalIndent(genState, "", " ")
if err != nil {
return servertypes.ExportedApp{}, err

View File

@ -3,9 +3,9 @@ Package params defines the simulation parameters for the 0gChain app.
It contains the default weights used for each transaction used on the module's
simulation. These weights define the chance for a transaction to be simulated at
any given operation.
any gived operation.
You can replace the default values for the weights by providing a params.json
You can repace the default values for the weights by providing a params.json
file with the weights defined for each of the transaction operations:
{

View File

@ -143,7 +143,7 @@ func (th TallyHandler) Tally(
totalVotingPower = totalVotingPower.Add(votingPower)
}
tallyParams := th.gk.GetParams(ctx)
tallyParams := th.gk.GetTallyParams(ctx)
tallyResults = govv1.NewTallyResultFromMap(results)
// TODO: Upgrade the spec to cover all of these cases & remove pseudocode.
@ -155,7 +155,7 @@ func (th TallyHandler) Tally(
// If there is not enough quorum of votes, the proposal fails
percentVoting := totalVotingPower.Quo(sdk.NewDecFromInt(th.stk.TotalBondedTokens(ctx)))
if percentVoting.LT(sdk.MustNewDecFromStr(tallyParams.Quorum)) {
return false, tallyParams.BurnVoteQuorum, tallyResults
return false, true, tallyResults
}
// If no one votes (everyone abstains), proposal fails
@ -165,7 +165,7 @@ func (th TallyHandler) Tally(
// If more than 1/3 of voters veto, proposal fails
if results[govv1.OptionNoWithVeto].Quo(totalVotingPower).GT(sdk.MustNewDecFromStr(tallyParams.VetoThreshold)) {
return false, tallyParams.BurnVoteVeto, tallyResults
return false, true, tallyResults
}
// If more than 1/2 of non-abstaining voters vote Yes, proposal passes

View File

@ -5,7 +5,6 @@ import (
"time"
sdkmath "cosmossdk.io/math"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
sdk "github.com/cosmos/cosmos-sdk/types"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
@ -16,6 +15,7 @@ import (
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
// d is an alias for sdk.MustNewDecFromStr
@ -41,13 +41,12 @@ func (suite *tallyHandlerSuite) SetupTest() {
genesisTime := time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC)
suite.ctx = suite.app.NewContext(false, tmproto.Header{Height: 1, Time: genesisTime})
stakingKeeper := *suite.app.GetStakingKeeper()
suite.staking = stakingHelper{stakingKeeper}
suite.staking = stakingHelper{suite.app.GetStakingKeeper()}
suite.staking.setBondDenom(suite.ctx, "ukava")
suite.tallier = NewTallyHandler(
suite.app.GetGovKeeper(),
stakingKeeper,
suite.app.GetStakingKeeper(),
suite.app.GetBankKeeper(),
)
}
@ -130,7 +129,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
})
suite.Run("VetoedFails", func() {
suite.SetupTest()
@ -145,7 +144,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
})
suite.Run("UnvetoedAndYesAboveThresholdPasses", func() {
suite.SetupTest()
@ -162,7 +161,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Truef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndYesBelowThresholdFails", func() {
suite.SetupTest()
@ -179,7 +178,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
})
suite.Run("NotEnoughStakeFails", func() {
suite.SetupTest()
@ -191,7 +190,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndAllAbstainedFails", func() {
suite.SetupTest()
@ -204,18 +203,17 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
})
}
func (suite *tallyHandlerSuite) setTallyParams(quorum, threshold, veto sdk.Dec) {
params := suite.app.GetGovKeeper().GetParams(suite.ctx)
params.Quorum = quorum.String()
params.Threshold = threshold.String()
params.VetoThreshold = veto.String()
params.BurnVoteQuorum = true
suite.app.GetGovKeeper().SetParams(suite.ctx, params)
suite.app.GetGovKeeper().SetTallyParams(suite.ctx, govv1.TallyParams{
Quorum: quorum.String(),
Threshold: threshold.String(),
VetoThreshold: veto.String(),
})
}
func (suite *tallyHandlerSuite) voteOnProposal(
@ -236,7 +234,7 @@ func (suite *tallyHandlerSuite) voteOnProposal(
func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
gk := suite.app.GetGovKeeper()
deposit := gk.GetParams(suite.ctx).MinDeposit
deposit := gk.GetDepositParams(suite.ctx).MinDeposit
proposer := suite.createAccount(deposit...)
msg, err := govv1beta1.NewMsgSubmitProposal(
@ -246,7 +244,7 @@ func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
)
suite.Require().NoError(err)
msgServerv1 := govkeeper.NewMsgServerImpl(&gk)
msgServerv1 := govkeeper.NewMsgServerImpl(gk)
govAcct := gk.GetGovernanceAccount(suite.ctx).GetAddress()
msgServer := govkeeper.NewLegacyMsgServerImpl(govAcct.String(), msgServerv1)
@ -366,7 +364,7 @@ func (h stakingHelper) createUnbondedValidator(ctx sdk.Context, address sdk.ValA
return nil, err
}
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
_, err = msgServer.CreateValidator(sdk.WrapSDKContext(ctx), msg)
if err != nil {
return nil, err
@ -386,7 +384,7 @@ func (h stakingHelper) delegate(ctx sdk.Context, delegator sdk.AccAddress, valid
h.newBondCoin(ctx, amount),
)
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
_, err := msgServer.Delegate(sdk.WrapSDKContext(ctx), msg)
if err != nil {
return sdk.Dec{}, err

View File

@ -9,12 +9,6 @@ import (
"time"
sdkmath "cosmossdk.io/math"
dasignerskeeper "github.com/0glabs/0g-chain/x/dasigners/v1/keeper"
tmdb "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
@ -41,23 +35,26 @@ import (
evmkeeper "github.com/evmos/ethermint/x/evm/keeper"
feemarketkeeper "github.com/evmos/ethermint/x/feemarket/keeper"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
tmdb "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/chaincfg"
bep3keeper "github.com/0glabs/0g-chain/x/bep3/keeper"
committeekeeper "github.com/0glabs/0g-chain/x/committee/keeper"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
)
var (
emptyTime time.Time
testChainID = "kavatest_1-1"
defaultInitialHeight int64 = 1
)
const TestChainId = "zgchain_8888-1"
// TestApp is a simple wrapper around an App. It exposes internal keepers for use in integration tests.
// This file also contains test helpers. Ideally they would be in separate package.
// Basic Usage:
@ -92,41 +89,32 @@ func NewTestAppFromSealed() TestApp {
encCfg := MakeEncodingConfig()
app := NewApp(
log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil,
encCfg, DefaultOptions, baseapp.SetChainID(TestChainId),
)
app := NewApp(log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil, encCfg, DefaultOptions)
return TestApp{App: *app}
}
// nolint
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
func (tApp TestApp) GetPrecisebankKeeper() precisebankkeeper.Keeper { return tApp.precisebankKeeper }
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetKVStoreKey(key string) *storetypes.KVStoreKey {
return tApp.keys[key]
}
func (tApp TestApp) GetBlockedMaccAddrs() map[string]bool {
return tApp.loadBlockedMaccAddrs()
}
// LegacyAmino returns the app's amino codec.
func (app *App) LegacyAmino() *codec.LegacyAmino {
return app.legacyAmino
@ -258,7 +246,6 @@ func genesisStateWithValSet(
balances,
totalSupply,
currentBankGenesis.DenomMetadata,
currentBankGenesis.SendEnabled,
)
// set genesis state
@ -272,13 +259,13 @@ func genesisStateWithValSet(
// InitializeFromGenesisStates calls InitChain on the app using the provided genesis states.
// If any module genesis states are missing, defaults are used.
func (tApp TestApp) InitializeFromGenesisStates(genesisStates ...GenesisState) TestApp {
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, TestChainId, defaultInitialHeight, true, genesisStates...)
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, testChainID, defaultInitialHeight, true, genesisStates...)
}
// InitializeFromGenesisStatesWithTime calls InitChain on the app using the provided genesis states and time.
// If any module genesis states are missing, defaults are used.
func (tApp TestApp) InitializeFromGenesisStatesWithTime(genTime time.Time, genesisStates ...GenesisState) TestApp {
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, TestChainId, defaultInitialHeight, true, genesisStates...)
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, testChainID, defaultInitialHeight, true, genesisStates...)
}
// InitializeFromGenesisStatesWithTimeAndChainID calls InitChain on the app using the provided genesis states, time, and chain id.
@ -335,8 +322,8 @@ func (tApp TestApp) InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(
AppStateBytes: stateBytes,
ChainId: chainID,
// Set consensus params, which is needed by x/feemarket
ConsensusParams: &tmproto.ConsensusParams{
Block: &tmproto.BlockParams{
ConsensusParams: &abci.ConsensusParams{
Block: &abci.BlockParams{
MaxBytes: 200000,
MaxGas: 20000000,
},
@ -471,7 +458,7 @@ func (tApp TestApp) SetInflation(ctx sdk.Context, value sdk.Dec) {
mk.SetParams(ctx, mintParams)
}
// GeneratePrivKeyAddressPairs generates (deterministically) a total of n private keys and addresses.
// GeneratePrivKeyAddressPairsFromRand generates (deterministically) a total of n private keys and addresses.
func GeneratePrivKeyAddressPairs(n int) (keys []cryptotypes.PrivKey, addrs []sdk.AccAddress) {
r := rand.New(rand.NewSource(12345)) // make the generation deterministic
keys = make([]cryptotypes.PrivKey, n)

View File

@ -4,40 +4,86 @@ import (
"fmt"
sdkmath "cosmossdk.io/math"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const (
UpgradeName_Testnet = "v0.4.0"
UpgradeName_Mainnet = "v0.25.0"
UpgradeName_Testnet = "v0.25.0-alpha.0"
UpgradeName_E2ETest = "v0.25.0-testing"
)
var (
// KAVA to ukava - 6 decimals
kavaConversionFactor = sdk.NewInt(1000_000)
secondsPerYear = sdk.NewInt(365 * 24 * 60 * 60)
// 10 Million KAVA per year in staking rewards, inflation disable time 2024-01-01T00:00:00 UTC
// CommunityParams_Mainnet = communitytypes.NewParams(
// time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
// // before switchover
// sdkmath.LegacyZeroDec(),
// // after switchover - 10M KAVA to ukava per year / seconds per year
// sdkmath.LegacyNewDec(10_000_000).
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear),
// )
// Testnet -- 15 Trillion KAVA per year in staking rewards, inflation disable time 2023-11-16T00:00:00 UTC
// CommunityParams_Testnet = communitytypes.NewParams(
// time.Date(2023, 11, 16, 0, 0, 0, 0, time.UTC),
// // before switchover
// sdkmath.LegacyZeroDec(),
// // after switchover
// sdkmath.LegacyNewDec(15_000_000).
// MulInt64(1_000_000). // 15M * 1M = 15T
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear),
// )
// CommunityParams_E2E = communitytypes.NewParams(
// time.Now().Add(10*time.Second).UTC(), // relative time for testing
// sdkmath.LegacyNewDec(0), // stakingRewardsPerSecond
// sdkmath.LegacyNewDec(1000), // upgradeTimeSetstakingRewardsPerSecond
// )
// ValidatorMinimumCommission is the new 5% minimum commission rate for validators
ValidatorMinimumCommission = sdk.NewDecWithPrec(5, 2)
)
// RegisterUpgradeHandlers registers the upgrade handlers for the app.
func (app App) RegisterUpgradeHandlers() {
app.upgradeKeeper.SetUpgradeHandler(
UpgradeName_Testnet,
upgradeHandler(app, UpgradeName_Testnet),
)
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_Mainnet,
// upgradeHandler(app, UpgradeName_Mainnet, CommunityParams_Mainnet),
// )
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_Testnet,
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_Testnet),
// )
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_E2ETest,
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_E2E),
// )
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
panic(err)
}
doUpgrade := upgradeInfo.Name == UpgradeName_Testnet
doUpgrade := upgradeInfo.Name == UpgradeName_Mainnet ||
upgradeInfo.Name == UpgradeName_Testnet ||
upgradeInfo.Name == UpgradeName_E2ETest
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
storeUpgrades := storetypes.StoreUpgrades{
Added: []string{
precisebanktypes.ModuleName,
// x/community added store
// communitytypes.ModuleName,
},
}
@ -50,219 +96,163 @@ func (app App) RegisterUpgradeHandlers() {
func upgradeHandler(
app App,
name string,
// communityParams communitytypes.Params,
) upgradetypes.UpgradeHandler {
return func(
ctx sdk.Context,
plan upgradetypes.Plan,
fromVM module.VersionMap,
) (module.VersionMap, error) {
logger := app.Logger()
logger.Info(fmt.Sprintf("running %s upgrade handler", name))
app.Logger().Info(fmt.Sprintf("running %s upgrade handler", name))
// Run migrations for all modules and return new consensus version map.
versionMap, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
toVM, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
if err != nil {
return nil, err
return toVM, err
}
logger.Info("completed store migrations")
//
// Staking validator minimum commission
//
UpdateValidatorMinimumCommission(ctx, app)
// Migration of fractional balances from x/evmutil to x/precisebank
if err := MigrateEvmutilToPrecisebank(
ctx,
app.accountKeeper,
app.bankKeeper,
app.evmutilKeeper,
app.precisebankKeeper,
); err != nil {
return nil, err
}
//
// Community Params
//
// app.communityKeeper.SetParams(ctx, communityParams)
// app.Logger().Info(
// "initialized x/community params",
// "UpgradeTimeDisableInflation", communityParams.UpgradeTimeDisableInflation,
// "StakingRewardsPerSecond", communityParams.StakingRewardsPerSecond,
// "UpgradeTimeSetStakingRewardsPerSecond", communityParams.UpgradeTimeSetStakingRewardsPerSecond,
// )
logger.Info("completed x/evmutil to x/precisebank migration")
//
// Kavadist gov grant
//
// msgGrant, err := authz.NewMsgGrant(
// app.accountKeeper.GetModuleAddress(kavadisttypes.ModuleName), // granter
// app.accountKeeper.GetModuleAddress(govtypes.ModuleName), // grantee
// authz.NewGenericAuthorization(sdk.MsgTypeURL(&banktypes.MsgSend{})), // authorization
// nil, // expiration
// )
// if err != nil {
// return toVM, err
// }
// _, err = app.authzKeeper.Grant(ctx, msgGrant)
// if err != nil {
// return toVM, err
// }
// app.Logger().Info("created gov grant for kavadist funds")
return versionMap, nil
//
// Gov Quorum
//
govTallyParams := app.govKeeper.GetTallyParams(ctx)
oldQuorum := govTallyParams.Quorum
govTallyParams.Quorum = sdkmath.LegacyMustNewDecFromStr("0.2").String()
app.govKeeper.SetTallyParams(ctx, govTallyParams)
app.Logger().Info(fmt.Sprintf("updated tally quorum from %s to %s", oldQuorum, govTallyParams.Quorum))
//
// Incentive Params
//
UpdateIncentiveParams(ctx, app)
return toVM, nil
}
}
// MigrateEvmutilToPrecisebank migrates all required state from x/evmutil to
// x/precisebank and ensures the resulting state is correct.
// This migrates the following state:
// - Fractional balances
// - Fractional balance reserve
// Initializes the following state in x/precisebank:
// - Remainder amount
func MigrateEvmutilToPrecisebank(
// UpdateValidatorMinimumCommission updates the commission rate for all
// validators to be at least the new min commission rate, and sets the minimum
// commission rate in the staking params.
func UpdateValidatorMinimumCommission(
ctx sdk.Context,
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
app App,
) {
resultCount := make(map[stakingtypes.BondStatus]int)
aggregateSum, err := TransferFractionalBalances(
// Iterate over *all* validators including inactive
app.stakingKeeper.IterateValidators(
ctx,
evmutilKeeper,
precisebankKeeper,
)
if err != nil {
return fmt.Errorf("fractional balances transfer: %w", err)
}
logger.Info(
"fractional balances transferred from x/evmutil to x/precisebank",
"aggregate sum", aggregateSum,
func(index int64, validator stakingtypes.ValidatorI) (stop bool) {
// Skip if validator commission is already >= 5%
if validator.GetCommission().GTE(ValidatorMinimumCommission) {
return false
}
val, ok := validator.(stakingtypes.Validator)
if !ok {
panic("expected stakingtypes.Validator")
}
// Set minimum commission rate to 5%, when commission is < 5%
val.Commission.Rate = ValidatorMinimumCommission
val.Commission.UpdateTime = ctx.BlockTime()
// Update MaxRate if necessary
if val.Commission.MaxRate.LT(ValidatorMinimumCommission) {
val.Commission.MaxRate = ValidatorMinimumCommission
}
if err := app.stakingKeeper.BeforeValidatorModified(ctx, val.GetOperator()); err != nil {
panic(fmt.Sprintf("failed to call BeforeValidatorModified: %s", err))
}
app.stakingKeeper.SetValidator(ctx, val)
// Keep track of counts just for logging purposes
switch val.GetStatus() {
case stakingtypes.Bonded:
resultCount[stakingtypes.Bonded]++
case stakingtypes.Unbonded:
resultCount[stakingtypes.Unbonded]++
case stakingtypes.Unbonding:
resultCount[stakingtypes.Unbonding]++
}
return false
},
)
remainder := InitializeRemainder(ctx, precisebankKeeper, aggregateSum)
logger.Info("remainder amount initialized in x/precisebank", "remainder", remainder)
app.Logger().Info(
"updated validator minimum commission rate for all existing validators",
stakingtypes.BondStatusBonded, resultCount[stakingtypes.Bonded],
stakingtypes.BondStatusUnbonded, resultCount[stakingtypes.Unbonded],
stakingtypes.BondStatusUnbonding, resultCount[stakingtypes.Unbonding],
)
// Migrate fractional balances, reserve, and ensure reserve fully backs all
// fractional balances.
if err := TransferFractionalBalanceReserve(
ctx,
accountKeeper,
bankKeeper,
precisebankKeeper,
); err != nil {
return fmt.Errorf("reserve transfer: %w", err)
}
stakingParams := app.stakingKeeper.GetParams(ctx)
stakingParams.MinCommissionRate = ValidatorMinimumCommission
app.stakingKeeper.SetParams(ctx, stakingParams)
return nil
app.Logger().Info(
"updated x/staking params minimum commission rate",
"MinCommissionRate", stakingParams.MinCommissionRate,
)
}
// TransferFractionalBalances migrates fractional balances from x/evmutil to
// x/precisebank. It sets the fractional balance in x/precisebank and deletes
// the account from x/evmutil. Returns the aggregate sum of all fractional
// balances.
func TransferFractionalBalances(
// UpdateIncentiveParams modifies the earn rewards period for bkava to be 600K KAVA per year.
func UpdateIncentiveParams(
ctx sdk.Context,
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) (sdkmath.Int, error) {
aggregateSum := sdkmath.ZeroInt()
app App,
) {
// incentiveParams := app.incentiveKeeper.GetParams(ctx)
var iterErr error
// bkava annualized rewards: 600K KAVA
// newAmount := sdkmath.LegacyNewDec(600_000).
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear).
// TruncateInt()
evmutilKeeper.IterateAllAccounts(ctx, func(acc evmutiltypes.Account) bool {
// Set account balance in x/precisebank
precisebankKeeper.SetFractionalBalance(ctx, acc.Address, acc.Balance)
// for i := range incentiveParams.EarnRewardPeriods {
// if incentiveParams.EarnRewardPeriods[i].CollateralType != "bkava" {
// continue
// }
// Delete account from x/evmutil
iterErr := evmutilKeeper.SetAccount(ctx, evmutiltypes.Account{
Address: acc.Address,
// Set balance to 0 to delete it
Balance: sdkmath.ZeroInt(),
})
// // Update rewards per second via index
// incentiveParams.EarnRewardPeriods[i].RewardsPerSecond = sdk.NewCoins(
// sdk.NewCoin("ukava", newAmount),
// )
// }
// Halt iteration if there was an error
if iterErr != nil {
return true
}
// Aggregate sum of all fractional balances
aggregateSum = aggregateSum.Add(acc.Balance)
// Continue iterating
return false
})
return aggregateSum, iterErr
}
// InitializeRemainder initializes the remainder amount in x/precisebank. It
// calculates the remainder amount that is needed to ensure that the sum of all
// fractional balances is a multiple of the conversion factor. The remainder
// amount is stored in the store and returned.
func InitializeRemainder(
ctx sdk.Context,
precisebankKeeper precisebankkeeper.Keeper,
aggregateSum sdkmath.Int,
) sdkmath.Int {
// Extra fractional coins that exceed the conversion factor.
// This extra + remainder should equal the conversion factor to ensure
// (sum(fBalances) + remainder) % conversionFactor = 0
extraFractionalAmount := aggregateSum.Mod(precisebanktypes.ConversionFactor())
remainder := precisebanktypes.ConversionFactor().
Sub(extraFractionalAmount).
// Mod conversion factor to ensure remainder is valid.
// If extraFractionalAmount is a multiple of conversion factor, the
// remainder is 0.
Mod(precisebanktypes.ConversionFactor())
// Panics if the remainder is invalid. In a correct chain state and only
// mint/burns due to transfers, this would be 0.
precisebankKeeper.SetRemainderAmount(ctx, remainder)
return remainder
}
// TransferFractionalBalanceReserve migrates the fractional balance reserve from
// x/evmutil to x/precisebank. It transfers the reserve balance from x/evmutil
// to x/precisebank and ensures that the reserve fully backs all fractional
// balances. It mints or burns coins to back the fractional balances exactly.
func TransferFractionalBalanceReserve(
ctx sdk.Context,
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
// Transfer x/evmutil reserve to x/precisebank.
evmutilAddr := accountKeeper.GetModuleAddress(evmutiltypes.ModuleName)
reserveBalance := bankKeeper.GetBalance(ctx, evmutilAddr, precisebanktypes.IntegerCoinDenom)
if err := bankKeeper.SendCoinsFromModuleToModule(
ctx,
evmutiltypes.ModuleName, // from x/evmutil
precisebanktypes.ModuleName, // to x/precisebank
sdk.NewCoins(reserveBalance),
); err != nil {
return fmt.Errorf("failed to transfer reserve from x/evmutil to x/precisebank: %w", err)
}
logger.Info(fmt.Sprintf("transferred reserve balance: %s", reserveBalance))
// Ensure x/precisebank reserve fully backs all fractional balances.
totalFractionalBalances := precisebankKeeper.GetTotalSumFractionalBalances(ctx)
// Does NOT ensure state is correct, total fractional balances should be a
// multiple of conversion factor but is not guaranteed due to the remainder.
// Remainder initialization is handled by InitializeRemainder.
// Determine how much the reserve is off by, e.g. unbacked amount
expectedReserveBalance := totalFractionalBalances.Quo(precisebanktypes.ConversionFactor())
// If there is a remainder (totalFractionalBalances % conversionFactor != 0),
// then expectedReserveBalance is rounded up to the nearest integer.
if totalFractionalBalances.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBalance = expectedReserveBalance.Add(sdkmath.OneInt())
}
unbackedAmount := expectedReserveBalance.Sub(reserveBalance.Amount)
logger.Info(fmt.Sprintf("total account fractional balances: %s", totalFractionalBalances))
// Three possible cases:
// 1. Reserve is not enough, mint coins to back the fractional balances
// 2. Reserve is too much, burn coins to back the fractional balances exactly
// 3. Reserve is exactly enough, no action needed
if unbackedAmount.IsPositive() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount))
if err := bankKeeper.MintCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to mint extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("unbacked amount minted to reserve: %s", unbackedAmount))
} else if unbackedAmount.IsNegative() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount.Neg()))
if err := bankKeeper.BurnCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to burn extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("extra reserve amount burned: %s", unbackedAmount.Neg()))
} else {
logger.Info("reserve exactly backs fractional balances, no mint/burn needed")
}
return nil
// app.incentiveKeeper.SetParams(ctx, incentiveParams)
}

View File

@ -1,434 +1,239 @@
package app_test
import (
"strconv"
"testing"
"time"
sdkmath "cosmossdk.io/math"
"github.com/0glabs/0g-chain/app"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
sdk "github.com/cosmos/cosmos-sdk/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/stretchr/testify/require"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func TestMigrateEvmutilToPrecisebank(t *testing.T) {
// Full test case with all components together
tests := []struct {
name string
initialReserve sdkmath.Int
fractionalBalances []sdkmath.Int
// func TestUpgradeCommunityParams_Mainnet(t *testing.T) {
// require.Equal(
// t,
// sdkmath.LegacyZeroDec().String(),
// app.CommunityParams_Mainnet.StakingRewardsPerSecond.String(),
// )
// require.Equal(
// t,
// // Manually confirmed
// "317097.919837645865043125",
// app.CommunityParams_Mainnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
// "mainnet kava per second should be correct",
// )
// }
// func TestUpgradeCommunityParams_Testnet(t *testing.T) {
// require.Equal(
// t,
// sdkmath.LegacyZeroDec().String(),
// app.CommunityParams_Testnet.StakingRewardsPerSecond.String(),
// )
// require.Equal(
// t,
// // Manually confirmed
// "475646879756.468797564687975646",
// app.CommunityParams_Testnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
// "testnet kava per second should be correct",
// )
// }
func TestUpdateValidatorMinimumCommission(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
sk := tApp.GetStakingKeeper()
stakingParams := sk.GetParams(ctx)
stakingParams.MinCommissionRate = sdk.ZeroDec()
sk.SetParams(ctx, stakingParams)
// Set some validators with varying commission rates
vals := []struct {
name string
operatorAddr sdk.ValAddress
consPriv *ethsecp256k1.PrivKey
commissionRateMin sdk.Dec
commissionRateMax sdk.Dec
shouldBeUpdated bool
}{
{
"no fractional balances",
sdkmath.NewInt(0),
[]sdkmath.Int{},
name: "zero commission rate",
operatorAddr: sdk.ValAddress("val0"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.ZeroDec(),
commissionRateMax: sdk.ZeroDec(),
shouldBeUpdated: true,
},
{
"sufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
name: "0.01 commission rate",
operatorAddr: sdk.ValAddress("val1"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.01"),
commissionRateMax: sdk.MustNewDecFromStr("0.01"),
shouldBeUpdated: true,
},
{
"insufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, but only 1 int unit in reserve
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
name: "0.05 commission rate",
operatorAddr: sdk.ValAddress("val2"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.05"),
commissionRateMax: sdk.MustNewDecFromStr("0.05"),
shouldBeUpdated: false,
},
{
"excess reserve, 0 remainder",
// Accounts adding up to 2 int units, but 3 int unit in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
name: "0.06 commission rate",
operatorAddr: sdk.ValAddress("val3"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.06"),
commissionRateMax: sdk.MustNewDecFromStr("0.06"),
shouldBeUpdated: false,
},
{
"sufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, less than reserve,
// Reserve should be 2 and remainder 0.5
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"excess reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, 3 int units in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
name: "0.5 commission rate",
operatorAddr: sdk.ValAddress("val4"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.5"),
commissionRateMax: sdk.MustNewDecFromStr("0.5"),
shouldBeUpdated: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
for _, v := range vals {
val, err := stakingtypes.NewValidator(
v.operatorAddr,
v.consPriv.PubKey(),
stakingtypes.Description{},
)
require.NoError(t, err)
val.Commission.Rate = v.commissionRateMin
val.Commission.MaxRate = v.commissionRateMax
ak := tApp.GetAccountKeeper()
bk := tApp.GetBankKeeper()
evmuk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
reserveCoin := sdk.NewCoin(precisebanktypes.IntegerCoinDenom, tt.initialReserve)
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(reserveCoin))
require.NoError(t, err)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.Equal(t, tt.initialReserve, oldReserveBalance.Amount, "initial x/evmutil reserve balance")
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
// Set accounts
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
err := evmuk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// Run full x/evmutil -> x/precisebank migration
err = app.MigrateEvmutilToPrecisebank(
ctx,
ak,
bk,
evmuk,
pbk,
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
remainder := pbk.GetRemainderAmount(ctx)
expectedReserveBal := fractionalBalanceTotal.Add(remainder)
require.Equal(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount.Mul(precisebanktypes.ConversionFactor()),
"new reserve should equal total fractional balances",
)
// Check balances are deleted in evmutil and migrated to precisebank
for i := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmuk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
// Checks balances valid and remainder
res, stop := precisebankkeeper.AllInvariants(pbk)(ctx)
require.Falsef(t, stop, "invariants should pass: %s", res)
})
}
}
func TestTransferFractionalBalances(t *testing.T) {
tests := []struct {
name string
fractionalBalances []sdkmath.Int
}{
{
"no fractional balances",
[]sdkmath.Int{},
},
{
"balanced fractional balances",
[]sdkmath.Int{
// 4 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"unbalanced balances",
[]sdkmath.Int{
// 3 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
err = sk.SetValidatorByConsAddr(ctx, val)
require.NoError(t, err)
sk.SetValidator(ctx, val)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
evmutilk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
err := evmutilk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// Run balance transfer
aggregateSum, err := app.TransferFractionalBalances(
ctx,
evmutilk,
pbk,
)
require.NoError(t, err)
// Check balances are deleted in evmutil and migrated to precisebank
sum := sdkmath.ZeroInt()
for i := range tt.fractionalBalances {
sum = sum.Add(tt.fractionalBalances[i])
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmutilk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
require.Equal(t, sum, aggregateSum, "aggregate sum should be correct")
})
}
}
func TestInitializeRemainder(t *testing.T) {
tests := []struct {
name string
giveAggregateSum sdkmath.Int
wantRemainder sdkmath.Int
}{
{
"0 remainder, 1ukava",
precisebanktypes.ConversionFactor(),
sdkmath.NewInt(0),
require.NotPanics(
t, func() {
app.UpdateValidatorMinimumCommission(ctx, tApp.App)
},
{
"0 remainder, multiple ukava",
precisebanktypes.ConversionFactor().MulRaw(5),
sdkmath.NewInt(0),
},
{
"non-zero remainder, min",
precisebanktypes.ConversionFactor().SubRaw(1),
sdkmath.NewInt(1),
},
{
"non-zero remainder, max",
sdkmath.NewInt(1),
precisebanktypes.ConversionFactor().SubRaw(1),
},
{
"non-zero remainder, half",
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
}
)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
stakingParamsAfter := sk.GetParams(ctx)
require.Equal(t, stakingParamsAfter.MinCommissionRate, app.ValidatorMinimumCommission)
pbk := tApp.GetPrecisebankKeeper()
// Check that all validators have a commission rate >= 5%
for _, val := range vals {
t.Run(val.name, func(t *testing.T) {
validator, found := sk.GetValidator(ctx, val.operatorAddr)
require.True(t, found, "validator should be found")
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
remainder := app.InitializeRemainder(
ctx,
tApp.GetPrecisebankKeeper(),
tt.giveAggregateSum,
)
require.Equal(t, tt.wantRemainder, remainder)
// Check actual state
remainderAfter := pbk.GetRemainderAmount(ctx)
require.Equal(t, tt.wantRemainder, remainderAfter)
// Not checking invariants here since it requires actual balance state
aggregateSumWithRemainder := tt.giveAggregateSum.Add(remainder)
require.True(
t,
aggregateSumWithRemainder.
Mod(precisebanktypes.ConversionFactor()).
IsZero(),
"remainder + aggregate sum should be a multiple of the conversion factor",
validator.GetCommission().GTE(app.ValidatorMinimumCommission),
"commission rate should be >= 5%",
)
})
}
}
func TestTransferFractionalBalanceReserve(t *testing.T) {
tests := []struct {
name string
initialReserve sdk.Coin
fractionalBalances []sdkmath.Int
}{
{
"balanced reserve, no remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 2 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 4 accounts, total 2 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(2)),
[]sdkmath.Int{
// 2 accounts, total 1 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 5 accounts, total 2.5 int units
// Expected 3 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(3)),
[]sdkmath.Int{
// 3 accounts, total 1.5 int units.
// Expected 2 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
bk := tApp.GetBankKeeper()
pbk := tApp.GetPrecisebankKeeper()
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(tt.initialReserve))
require.NoError(t, err)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.Equal(t, tt.initialReserve, oldReserveBalance)
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte{byte(i)})
require.NotPanics(t, func() {
pbk.SetFractionalBalance(ctx, addr, balance)
}, "given fractional balances should be valid")
}
// Run reserve migration
err = app.TransferFractionalBalanceReserve(
ctx,
tApp.GetAccountKeeper(),
bk,
tApp.GetPrecisebankKeeper(),
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
expectedReserveBal := fractionalBalanceTotal.
Quo(precisebanktypes.ConversionFactor())
// Check if theres a remainder
if fractionalBalanceTotal.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBal = expectedReserveBal.Add(sdkmath.OneInt())
}
require.Equal(
require.True(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount,
"new reserve should equal total fractional balances + remainder",
validator.Commission.MaxRate.GTE(app.ValidatorMinimumCommission),
"commission rate max should be >= 5%, got %s",
validator.Commission.MaxRate,
)
if val.shouldBeUpdated {
require.Equal(
t,
ctx.BlockTime(),
validator.Commission.UpdateTime,
"commission update time should be set to block time",
)
} else {
require.Equal(
t,
time.Unix(0, 0).UTC(),
validator.Commission.UpdateTime,
"commission update time should not be changed -- default value is 0",
)
}
})
}
}
// func TestUpdateIncentiveParams(t *testing.T) {
// tApp := app.NewTestApp()
// tApp.InitializeFromGenesisStates()
// ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
// ik := tApp.GetIncentiveKeeper()
// params := ik.GetParams(ctx)
// startPeriod := time.Date(2021, 10, 26, 15, 0, 0, 0, time.UTC)
// endPeriod := time.Date(2022, 10, 26, 15, 0, 0, 0, time.UTC)
// params.EarnRewardPeriods = incentivetypes.MultiRewardPeriods{
// incentivetypes.NewMultiRewardPeriod(
// true,
// "bkava",
// startPeriod,
// endPeriod,
// sdk.NewCoins(
// sdk.NewCoin("ukava", sdk.NewInt(159459)),
// ),
// ),
// }
// ik.SetParams(ctx, params)
// beforeParams := ik.GetParams(ctx)
// require.Equal(t, params, beforeParams, "initial incentive params should be set")
// // -- UPGRADE
// app.UpdateIncentiveParams(ctx, tApp.App)
// // -- After
// afterParams := ik.GetParams(ctx)
// require.Len(
// t,
// afterParams.EarnRewardPeriods[0].RewardsPerSecond,
// 1,
// "bkava earn reward period should only contain 1 coin",
// )
// require.Equal(
// t,
// // Manual calculation of
// // 600,000 * 1000,000 / (365 * 24 * 60 * 60)
// sdk.NewCoin("ukava", sdkmath.NewInt(19025)),
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0],
// "bkava earn reward period should be updated",
// )
// // Check that other params are not changed
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0] = beforeParams.EarnRewardPeriods[0].RewardsPerSecond[0]
// require.Equal(
// t,
// beforeParams,
// afterParams,
// "other param values should not be changed",
// )
// }
func generateConsKey(
t *testing.T,
) *ethsecp256k1.PrivKey {
t.Helper()
key, err := ethsecp256k1.GenerateKey()
require.NoError(t, err)
return key
}

View File

@ -28,9 +28,9 @@ DIRS := $(BUILD_CACHE_DIR) $(BIN_DIR)
### Tool Versions ###
################################################################################
GO_BIN ?= go
PROTOC_VERSION ?= v25.1
BUF_VERSION ?= v1.28.1
PROTOC_GEN_GOCOSMOS_VERSION ?= $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/gogoproto)
PROTOC_VERSION ?= v21.9
BUF_VERSION ?= v1.9.0
PROTOC_GEN_GOCOSMOS_VERSION ?= v0.3.1
PROTOC_GEN_GRPC_GATEWAY_VERSION ?= $(shell $(GO_BIN) list -m github.com/grpc-ecosystem/grpc-gateway| sed 's:.* ::')
PROTOC_GEN_DOC_VERSION ?= v1.5.1
SWAGGER_COMBINE_VERSION ?= v1.4.0
@ -68,7 +68,7 @@ $(PROTOC_VERSION_FILE):
mkdir -p protoc && cd protoc; \
curl -sOL $(PROTOC_DOWNLOAD_URL); \
unzip -q $(PROTOC_ARCHIVE_NAME) bin/protoc
@cp -f $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
@cp $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
@rm -rf $(BUILD_CACHE_DIR)/protoc
PROTOC := $(BIN_DIR)/protoc
@ -93,7 +93,7 @@ $(BUF_VERSION_FILE):
mkdir -p buf && cd buf; \
curl -sOL $(BUF_DOWNLOAD_URL); \
tar -xzf $(BUF_ARCHIVE_NAME) buf/bin/buf
@cp -f $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
@cp $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
@rm -rf $(BUILD_CACHE_DIR)/buf
BUF := $(BIN_DIR)/buf
@ -113,8 +113,8 @@ $(PROTOC_GEN_GOCOSMOS_VERSION_FILE):
@touch $(PROTOC_GEN_GOCOSMOS_VERSION_FILE)
@cd $(BUILD_CACHE_DIR); \
mkdir -p protoc-gen-gocosmos && cd protoc-gen-gocosmos; \
git clone -q https://github.com/cosmos/gogoproto.git; \
cd gogoproto; \
git clone -q https://github.com/regen-network/cosmos-proto.git; \
cd cosmos-proto; \
git checkout -q $(PROTOC_GEN_GOCOSMOS_VERSION); \
GOBIN=$(ROOT_DIR)/$(BIN_DIR) $(GO_BIN) install ./protoc-gen-gocosmos
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-gocosmos
@ -185,7 +185,7 @@ $(PROTOC_GEN_DOC_VERSION_FILE):
mkdir -p protoc-gen-doc && cd protoc-gen-doc; \
curl -sOL $(PROTOC_GEN_DOC_DOWNLOAD_URL); \
tar -xzf $(PROTOC_GEN_DOC_ARCHIVE_NAME) protoc-gen-doc
@cp -f $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
@cp $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-doc
PROTOC_GEN_DOC := $(BIN_DIR)/protoc-gen-doc

View File

@ -1,45 +0,0 @@
################################################################################
### Required Variables ###
################################################################################
ifndef DOCKER
$(error DOCKER not set)
endif
ifndef BUILD_DIR
$(error BUILD_DIR not set)
endif
################################################################################
### Lint Settings ###
################################################################################
LINT_FROM_REV ?= $(shell git merge-base origin/master HEAD)
GOLANGCI_VERSION ?= $(shell cat .golangci-version)
GOLANGCI_IMAGE_TAG ?= golangci/golangci-lint:$(GOLANGCI_VERSION)
GOLANGCI_DIR ?= $(CURDIR)/$(BUILD_DIR)/.golangci-lint
GOLANGCI_CACHE_DIR ?= $(GOLANGCI_DIR)/$(GOLANGCI_VERSION)-cache
GOLANGCI_MOD_CACHE_DIR ?= $(GOLANGCI_DIR)/go-mod
################################################################################
### Lint Target ###
################################################################################
.PHONY: lint
lint: $(GOLANGCI_CACHE_DIR) $(GOLANGCI_MOD_CACHE_DIR)
@echo "Running lint from rev $(LINT_FROM_REV), use LINT_FROM_REV var to override."
$(DOCKER) run -t --rm \
-v $(GOLANGCI_CACHE_DIR):/root/.cache \
-v $(GOLANGCI_MOD_CACHE_DIR):/go/pkg/mod \
-v $(CURDIR):/app \
-w /app \
$(GOLANGCI_IMAGE_TAG) \
golangci-lint run -v --new-from-rev $(LINT_FROM_REV)
$(GOLANGCI_CACHE_DIR):
@mkdir -p $@
$(GOLANGCI_MOD_CACHE_DIR):
@mkdir -p $@

View File

@ -14,23 +14,13 @@ PROTOBUF_ANY_DOWNLOAD_URL = https://raw.githubusercontent.com/protocolbuffers/pr
#
# Proto dependencies under go.mod
#
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/gogoproto)
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cometbft/cometbft)
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/gogo/protobuf)
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/tendermint/tendermint)
COSMOS_PROTO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-proto)
COSMOS_SDK_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-sdk)
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v7)
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v6)
ETHERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/evmos/ethermint)
#
# ICS23 Proof Proto
#
ICS23_VERSION := $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/ics23/go)
ICS23_PROOFS_PROTO_PATH := cosmos/ics23/v1/proofs.proto
ICS23_PROOFS_PROTO_LOCAL_PATH := third_party/proto/$(ICS23_PROOFS_PROTO_PATH)
ICS23_PROOFS_PROTO_DOWNLOAD_URL := https://raw.githubusercontent.com/cosmos/ics23/go/$(ICS23_VERSION)/proto/$(ICS23_PROOFS_PROTO_PATH)
#
# Common target directories
#
@ -54,21 +44,18 @@ proto-update-deps: check-rsync ## Update all third party proto files
@curl -sSL $(PROTOBUF_ANY_DOWNLOAD_URL)/any.proto > $(PROTOBUF_GOOGLE_TYPES)/any.proto
@mkdir -p client/docs
@cp -f $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
@cp -f $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
@cp -f $(ETHERMINT_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/ethermint-swagger.yml
@cp $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
@cp $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
@mkdir -p $(COSMOS_PROTO_TYPES)
@cp -f $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
@mkdir -p $(dir $(ICS23_PROOFS_PROTO_LOCAL_PATH))
@curl -sSL $(ICS23_PROOFS_PROTO_DOWNLOAD_URL) > $(ICS23_PROOFS_PROTO_LOCAL_PATH)
@cp $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(GOGO_PATH)/gogoproto third_party/proto
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(TENDERMINT_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(COSMOS_SDK_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(IBC_GO_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(ETHERMINT_PATH)/proto third_party
@cp -f $(IBC_GO_PATH)/third_party/proto/proofs.proto third_party/proto/proofs.proto
.PHONY: check-proto-deps
check-proto-deps: proto-update-deps ## Return error code 1 if proto dependencies are not changed

View File

@ -1,7 +1,7 @@
.PHONY: proto-lint check-proto-lint
proto-lint check-proto-lint: install-build-deps
@echo "Linting proto file"
@$(BUF) lint proto
@$(BUF) lint
.PHONY: proto-gen
proto-gen: install-build-deps

View File

@ -1,8 +1,6 @@
package chaincfg
import (
sdk "github.com/cosmos/cosmos-sdk/types"
)
import sdk "github.com/cosmos/cosmos-sdk/types"
const (
AppName = "0gchaind"

View File

@ -1 +1 @@
6862cde560c70cb82f7908e6cef22ca223465bd2
a967d2fdda299ec8e1e3b99fb55bd06ecfdb0469

View File

@ -22,8 +22,6 @@
},
"app_hash": "",
"app_state": {
"06-solomachine": null,
"07-tendermint": null,
"auction": {
"next_auction_id": "1",
"params": {
@ -507,10 +505,6 @@
{
"address": "kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq",
"coins": [
{
"denom": "bnb",
"amount": "500000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -531,10 +525,6 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/multichain/usdc",
"amount": "1000000000000000000"
@ -566,20 +556,12 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "1000000000000000"
}
]
},
{
"address": "kava1krh7k30pc9rteejpl2zycj0vau58y8c69xkzws",
"coins": [
{
"denom": "bnb",
"amount": "100000000000000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -600,14 +582,6 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/tether/usdt",
"amount": "100000000000"
},
{
"denom": "hard",
"amount": "1000000000"
@ -623,10 +597,6 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "103000000000"
}
]
},
@ -848,7 +818,6 @@
"gov_denom": "ukava",
"params": {
"circuit_breaker": false,
"liquidation_block_interval": 500,
"collateral_params": [
{
"denom": "bnb",
@ -1020,7 +989,8 @@
"check_collateralization_index_count": "10",
"conversion_factor": "6"
}
],
]
,
"debt_auction_lot": "10000000000",
"debt_auction_threshold": "100000000000",
"debt_param": {
@ -1267,15 +1237,7 @@
"votes": []
},
"community": {
"params": {
"upgrade_time_disable_inflation": "2023-11-01T00:00:00Z",
"upgrade_time_set_staking_rewards_per_second": "744191",
"staking_rewards_per_second": "0"
},
"staking_rewards_state": {
"last_accumulation_time": "0001-01-01T00:00:00Z",
"last_truncation_error": "0"
}
"params": {}
},
"crisis": {
"constant_fee": {
@ -2101,25 +2063,6 @@
}
],
"nested_types": []
},
{
"msg_type_url": "/kava.committee.v1beta1.MsgVote",
"msg_value_type_name": "MsgValueCommitteeVote",
"value_types": [
{
"name": "proposal_id",
"type": "uint64"
},
{
"name": "voter",
"type": "string"
},
{
"name": "vote_type",
"type": "int32"
}
],
"nested_types": []
}
],
"allow_unprotected_txs": false
@ -2282,27 +2225,22 @@
"deposits": [],
"votes": [],
"proposals": [],
"deposit_params": null,
"voting_params": {
"voting_period": "604800s"
},
"tally_params": null,
"params": {
"deposit_params": {
"min_deposit": [
{
"denom": "ukava",
"amount": "10000000"
}
],
"max_deposit_period": "172800s",
"voting_period": "604800s",
"max_deposit_period": "172800s"
},
"voting_params": {
"voting_period": "600s"
},
"tally_params": {
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000",
"min_initial_deposit_ratio": "0.000000000000000000",
"burn_vote_quorum": false,
"burn_proposal_deposit_prevote": false,
"burn_vote_veto": true
"veto_threshold": "0.334000000000000000"
}
},
"hard": {
@ -2577,24 +2515,6 @@
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"borrow_limit": {
"has_max_limit": true,
"maximum_limit": "0.000000000000000000",
"loan_to_value": "0.000000000000000000"
},
"spot_market_id": "btc:usd:30",
"conversion_factor": "100000000",
"interest_rate_model": {
"base_rate_apy": "0.000000000000000000",
"base_multiplier": "0.050000000000000000",
"kink": "0.800000000000000000",
"jump_multiplier": "5.000000000000000000"
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
}
],
"minimum_borrow_usd_value": "10.000000000000000000"
@ -2810,18 +2730,6 @@
"amount": "787"
}
]
},
{
"active": true,
"collateral_type": "erc20/bitgo/wbtc",
"start": "2022-11-11T15:00:00Z",
"end": "2025-11-11T15:00:00Z",
"rewards_per_second": [
{
"denom": "ukava",
"amount": "787"
}
]
}
],
"hard_borrow_reward_periods": [],
@ -3258,16 +3166,6 @@
}
},
"params": null,
"packetfowardmiddleware": {
"params": {
"fee_percentage": "0.000000000000000000"
},
"in_flight_packets": {}
},
"precisebank": {
"balances": [],
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [
@ -3741,7 +3639,6 @@
}
]
},
"router": {},
"savings": {
"params": {
"supported_denoms": [
@ -3913,8 +3810,7 @@
"params": {
"send_enabled": true,
"receive_enabled": true
},
"total_escrowed": []
}
},
"upgrade": {},
"validatorvesting": null,

View File

@ -837,7 +837,6 @@
"gov_denom": "ukava",
"params": {
"circuit_breaker": false,
"liquidation_block_interval": 500,
"collateral_params": [
{
"auction_size": "50000000000",
@ -2178,23 +2177,6 @@
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
},
"params": {
"min_deposit": [
{
"denom": "ukava",
"amount": "10000000"
}
],
"max_deposit_period": "172800s",
"voting_period": "600s",
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000",
"min_initial_deposit_ratio": "0.000000000000000000",
"burn_vote_quorum": false,
"burn_proposal_deposit_prevote": false,
"burn_vote_veto": true
}
},
"hard": {
@ -3000,15 +2982,6 @@
}
},
"params": null,
"packetfowardmiddleware": {
"params": {
"fee_percentage": "0.000000000000000000"
},
"in_flight_packets": {}
},
"precisebank": {
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [

View File

@ -14,9 +14,9 @@ import (
"strings"
"testing"
"github.com/cometbft/cometbft/crypto/ed25519"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/tests"
@ -813,7 +813,7 @@ func TestKvCLISubmitCommunityPoolSpendProposal(t *testing.T) {
}
func TestKvCLIQueryTxPagination(t *testing.T) {
// Skip until https://github.com/cometbft/cometbft/issues/4432 has been
// Skip until https://github.com/tendermint/tendermint/issues/4432 has been
// resolved and included in a release.
t.SkipNow()

View File

@ -13,13 +13,13 @@ import (
"github.com/stretchr/testify/require"
tmtypes "github.com/cometbft/cometbft/types"
tmtypes "github.com/tendermint/tendermint/types"
"cosmossdk.io/simapp"
clientkeys "github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/tests"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth"

View File

@ -182,23 +182,6 @@
]
}
},
{
"url": "./out/swagger/kava/precisebank/v1/query.swagger.json",
"tags": {
"rename": {
"Query": "Precisebank"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Precisebank$1"
}
]
}
},
{
"url": "./out/swagger/kava/pricefeed/v1beta1/query.swagger.json",
"tags": {
@ -312,30 +295,6 @@
]
}
},
{
"url": "./client/docs/ethermint-swagger.yml",
"dereference": {
"circular": "ignore"
},
"tags": {
"rename": {
"Query": "Ethermint"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Ethermint$1"
}
]
},
"paths": {
"exclude": [
]
}
},
{
"url": "./client/docs/legacy-swagger.yml",
"dereference": {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,381 +0,0 @@
[
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "uint8",
"name": "decimals_",
"type": "uint8"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -1,74 +0,0 @@
# Kava gRPC Client
The Kava gRPC client is a tool for making gRPC queries on a Kava chain.
## Features
- Easy-to-use gRPC client for the Kava chain.
- Access all query clients for Cosmos and Kava modules using `client.Query` (e.g., `client.Query.Bank.Balance`).
- Utilize utility functions for common queries (e.g., `client.BaseAccount(str)`).
## Usage
### Creating a new client
```go
package main
import (
kavaGrpc "github.com/0glabs/0g-chain/client/grpc"
)
grpcUrl := "https://grpc.kava.io:443"
client, err := kavaGrpc.NewClient(grpcUrl)
if err != nil {
panic(err)
}
```
### Making grpc queries
Query clients for both Cosmos and Kava modules are available via `client.Query`.
Example: Query Cosmos module `x/bank` for address balance
```go
import (
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
)
rsp, err := client.Query.Bank.Balance(context.Background(), &banktypes.QueryBalanceRequest{
Address: "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92",
Denom: "ukava",
})
```
Example: Query Kava module `x/evmutil` for params
```go
import (
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
)
rsp, err := client.Query.Evmutil.Params(
context.Background(), &evmutiltypes.QueryParamsRequest{},
)
```
#### Query Utilities
Utility functions for common queries are available directly on the client.
Example: Util query to get a base account
```go
kavaAcc := "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92"
rsp, err := client.BaseAccount(kavaAcc)
if err != nil {
panic(err)
}
fmt.Printf("account sequence for %s: %d\n", kavaAcc, rsp.Sequence)
```
## Query Tests
To test queries, a Kava node is required. Therefore, the e2e tests for the gRPC client queries can be found in the `tests/e2e` directory. Tests for new utility queries should be added as e2e tests under the `test/e2e` directory.

View File

@ -1,50 +0,0 @@
package grpc
import (
"errors"
"github.com/0glabs/0g-chain/client/grpc/query"
"github.com/0glabs/0g-chain/client/grpc/util"
)
// ZgChainGrpcClient enables the usage of kava grpc query clients and query utils
type ZgChainGrpcClient struct {
config ZgChainGrpcClientConfig
// Query clients for cosmos and kava modules
Query *query.QueryClient
// Utils for common queries (ie fetch an unpacked BaseAccount)
*util.Util
}
// ZgChainGrpcClientConfig is a configuration struct for a ZgChainGrpcClient
type ZgChainGrpcClientConfig struct {
// note: add future config options here
}
// NewClient creates a new ZgChainGrpcClient via a grpc url
func NewClient(grpcUrl string) (*ZgChainGrpcClient, error) {
return NewClientWithConfig(grpcUrl, NewDefaultConfig())
}
// NewClientWithConfig creates a new ZgChainGrpcClient via a grpc url and config
func NewClientWithConfig(grpcUrl string, config ZgChainGrpcClientConfig) (*ZgChainGrpcClient, error) {
if grpcUrl == "" {
return nil, errors.New("grpc url cannot be empty")
}
query, error := query.NewQueryClient(grpcUrl)
if error != nil {
return nil, error
}
client := &ZgChainGrpcClient{
Query: query,
Util: util.NewUtil(query),
config: config,
}
return client, nil
}
func NewDefaultConfig() ZgChainGrpcClientConfig {
return ZgChainGrpcClientConfig{}
}

View File

@ -1,15 +0,0 @@
package grpc_test
import (
"testing"
"github.com/0glabs/0g-chain/client/grpc"
"github.com/stretchr/testify/require"
)
func TestNewClient_InvalidEndpoint(t *testing.T) {
_, err := grpc.NewClient("invalid-url")
require.ErrorContains(t, err, "unknown grpc url scheme")
_, err = grpc.NewClient("")
require.ErrorContains(t, err, "grpc url cannot be empty")
}

View File

@ -1,52 +0,0 @@
package query
import (
"context"
"crypto/tls"
"fmt"
"net/url"
"github.com/0glabs/0g-chain/app"
"github.com/cosmos/cosmos-sdk/codec"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
// newGrpcConnection parses a GRPC endpoint and creates a connection to it
func newGrpcConnection(ctx context.Context, endpoint string) (*grpc.ClientConn, error) {
grpcUrl, err := url.Parse(endpoint)
if err != nil {
return nil, fmt.Errorf("failed to parse grpc connection \"%s\": %v", endpoint, err)
}
var creds credentials.TransportCredentials
switch grpcUrl.Scheme {
case "http":
creds = insecure.NewCredentials()
case "https":
creds = credentials.NewTLS(&tls.Config{})
default:
return nil, fmt.Errorf("unknown grpc url scheme: %s", grpcUrl.Scheme)
}
// Ensure the encoding config is set up correctly with the query client
// otherwise it will produce panics like:
// invalid Go type math.Int for field ...
encodingConfig := app.MakeEncodingConfig()
protoCodec := codec.NewProtoCodec(encodingConfig.InterfaceRegistry)
grpcCodec := protoCodec.GRPCCodec()
secureOpt := grpc.WithTransportCredentials(creds)
grpcConn, err := grpc.DialContext(
ctx,
grpcUrl.Host,
secureOpt,
grpc.WithDefaultCallOptions(grpc.ForceCodec(grpcCodec)),
)
if err != nil {
return nil, err
}
return grpcConn, nil
}

View File

@ -1,7 +0,0 @@
/*
The query package includes Cosmos and Kava gRPC query clients.
To ensure that the `QueryClient` stays updated, add new module query clients
to the `QueryClient` whenever new modules with grpc queries are added to the Kava app.
*/
package query

View File

@ -1,108 +0,0 @@
package query
import (
"context"
"github.com/cosmos/cosmos-sdk/client/grpc/tmservice"
txtypes "github.com/cosmos/cosmos-sdk/types/tx"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
authz "github.com/cosmos/cosmos-sdk/x/authz"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
govv1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
govv1beta1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
paramstypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
bep3types "github.com/0glabs/0g-chain/x/bep3/types"
committeetypes "github.com/0glabs/0g-chain/x/committee/types"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
)
// QueryClient is a wrapper with all Cosmos and Kava grpc query clients
type QueryClient struct {
// cosmos-sdk query clients
Tm tmservice.ServiceClient
Tx txtypes.ServiceClient
Auth authtypes.QueryClient
Authz authz.QueryClient
Bank banktypes.QueryClient
Distribution disttypes.QueryClient
Evidence evidencetypes.QueryClient
Gov govv1types.QueryClient
GovBeta govv1beta1types.QueryClient
Mint minttypes.QueryClient
Params paramstypes.QueryClient
Slashing slashingtypes.QueryClient
Staking stakingtypes.QueryClient
Upgrade upgradetypes.QueryClient
Consensus consensustypes.QueryClient
// 3rd party query clients
Evm evmtypes.QueryClient
Feemarket feemarkettypes.QueryClient
IbcClient ibcclienttypes.QueryClient
IbcTransfer ibctransfertypes.QueryClient
// kava module query clients
Bep3 bep3types.QueryClient
Committee committeetypes.QueryClient
Evmutil evmutiltypes.QueryClient
Issuance issuancetypes.QueryClient
Pricefeed pricefeedtypes.QueryClient
Precisebank precisebanktypes.QueryClient
}
// NewQueryClient creates a new QueryClient and initializes all the module query clients
func NewQueryClient(grpcEndpoint string) (*QueryClient, error) {
conn, err := newGrpcConnection(context.Background(), grpcEndpoint)
if err != nil {
return &QueryClient{}, err
}
client := &QueryClient{
Tm: tmservice.NewServiceClient(conn),
Tx: txtypes.NewServiceClient(conn),
Auth: authtypes.NewQueryClient(conn),
Authz: authz.NewQueryClient(conn),
Bank: banktypes.NewQueryClient(conn),
Distribution: disttypes.NewQueryClient(conn),
Evidence: evidencetypes.NewQueryClient(conn),
Gov: govv1types.NewQueryClient(conn),
GovBeta: govv1beta1types.NewQueryClient(conn),
Mint: minttypes.NewQueryClient(conn),
Params: paramstypes.NewQueryClient(conn),
Slashing: slashingtypes.NewQueryClient(conn),
Staking: stakingtypes.NewQueryClient(conn),
Upgrade: upgradetypes.NewQueryClient(conn),
Consensus: consensustypes.NewQueryClient(conn),
Evm: evmtypes.NewQueryClient(conn),
Feemarket: feemarkettypes.NewQueryClient(conn),
IbcClient: ibcclienttypes.NewQueryClient(conn),
IbcTransfer: ibctransfertypes.NewQueryClient(conn),
Bep3: bep3types.NewQueryClient(conn),
Committee: committeetypes.NewQueryClient(conn),
Evmutil: evmutiltypes.NewQueryClient(conn),
Issuance: issuancetypes.NewQueryClient(conn),
Pricefeed: pricefeedtypes.NewQueryClient(conn),
Precisebank: precisebanktypes.NewQueryClient(conn),
}
return client, nil
}

View File

@ -1,64 +0,0 @@
package query_test
import (
"testing"
"github.com/0glabs/0g-chain/client/grpc/query"
"github.com/stretchr/testify/require"
)
func TestNewQueryClient_InvalidGprc(t *testing.T) {
t.Run("valid connection", func(t *testing.T) {
conn, err := query.NewQueryClient("http://localhost:1234")
require.NoError(t, err)
require.NotNil(t, conn)
})
t.Run("non-empty url", func(t *testing.T) {
_, err := query.NewQueryClient("")
require.ErrorContains(t, err, "unknown grpc url scheme")
})
t.Run("invalid url scheme", func(t *testing.T) {
_, err := query.NewQueryClient("ftp://localhost:1234")
require.ErrorContains(t, err, "unknown grpc url scheme")
})
}
func TestNewQueryClient_ValidClient(t *testing.T) {
t.Run("all clients are created", func(t *testing.T) {
client, err := query.NewQueryClient("http://localhost:1234")
require.NoError(t, err)
require.NotNil(t, client)
// validate cosmos clients
require.NotNil(t, client.Tm)
require.NotNil(t, client.Tx)
require.NotNil(t, client.Auth)
require.NotNil(t, client.Authz)
require.NotNil(t, client.Bank)
require.NotNil(t, client.Distribution)
require.NotNil(t, client.Evidence)
require.NotNil(t, client.Gov)
require.NotNil(t, client.GovBeta)
require.NotNil(t, client.Mint)
require.NotNil(t, client.Params)
require.NotNil(t, client.Slashing)
require.NotNil(t, client.Staking)
require.NotNil(t, client.Upgrade)
require.NotNil(t, client.Consensus)
// validate 3rd party clients
require.NotNil(t, client.Evm)
require.NotNil(t, client.Feemarket)
require.NotNil(t, client.IbcClient)
require.NotNil(t, client.IbcTransfer)
// validate kava clients
require.NotNil(t, client.Bep3)
require.NotNil(t, client.Committee)
require.NotNil(t, client.Evmutil)
require.NotNil(t, client.Issuance)
require.NotNil(t, client.Pricefeed)
})
}

View File

@ -1,41 +0,0 @@
package util
import (
"context"
"fmt"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
)
// Account fetches an account via an address and returns the unpacked account
func (u *Util) Account(addr string) (authtypes.AccountI, error) {
res, err := u.query.Auth.Account(context.Background(), &authtypes.QueryAccountRequest{
Address: addr,
})
if err != nil {
return nil, fmt.Errorf("failed to fetch account: %w", err)
}
var acc authtypes.AccountI
err = u.encodingConfig.Marshaler.UnpackAny(res.Account, &acc)
if err != nil {
return nil, fmt.Errorf("failed to unpack account: %w", err)
}
return acc, nil
}
// BaseAccount fetches a base account via an address or returns an error if
// the account is not a base account
func (u *Util) BaseAccount(addr string) (authtypes.BaseAccount, error) {
acc, err := u.Account(addr)
if err != nil {
return authtypes.BaseAccount{}, err
}
bAcc, ok := acc.(*authtypes.BaseAccount)
if !ok {
return authtypes.BaseAccount{}, fmt.Errorf("%s is not a base account", addr)
}
return *bAcc, nil
}

View File

@ -1,8 +0,0 @@
/*
The util package contains utility functions for the Kava gRPC client.
For example, `account.go` includes account-related query helpers.
In this file, utilities such as `client.Util.BaseAccount(addr)` is exposed to
query an account and return an unpacked `BaseAccount` instance.
*/
package util

View File

@ -1,32 +0,0 @@
package util
import (
"context"
"strconv"
grpctypes "github.com/cosmos/cosmos-sdk/types/grpc"
"google.golang.org/grpc/metadata"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
query "github.com/0glabs/0g-chain/client/grpc/query"
)
// Util contains utility functions for the Kava gRPC client
type Util struct {
query *query.QueryClient
encodingConfig params.EncodingConfig
}
// NewUtil creates a new Util instance
func NewUtil(query *query.QueryClient) *Util {
return &Util{
query: query,
encodingConfig: app.MakeEncodingConfig(),
}
}
func (u *Util) CtxAtHeight(height int64) context.Context {
heightStr := strconv.FormatInt(height, 10)
return metadata.AppendToOutgoingContext(context.Background(), grpctypes.GRPCBlockHeightHeader, heightStr)
}

View File

@ -1,14 +1,14 @@
package main
package client
import (
"bufio"
"github.com/cometbft/cometbft/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/keys"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/cli"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
ethclient "github.com/evmos/ethermint/client"
@ -18,9 +18,9 @@ import (
var ethFlag = "eth"
// keyCommands registers a sub-tree of commands to interact with
// KeyCommands registers a sub-tree of commands to interact with
// local private key storage.
func keyCommands(defaultNodeHome string) *cobra.Command {
func KeyCommands(defaultNodeHome string) *cobra.Command {
cmd := &cobra.Command{
Use: "keys",
Short: "Manage your application's keys",
@ -52,7 +52,7 @@ The pass backend requires GnuPG: https://gnupg.org/
addCmd := keys.AddKeyCommand()
addCmd.Flags().Bool(ethFlag, false, "use default evm coin-type (60) and key signing algorithm (\"eth_secp256k1\")")
algoFlag := addCmd.Flag(flags.FlagKeyType)
algoFlag := addCmd.Flag(flags.FlagKeyAlgorithm)
algoFlag.DefValue = string(hd.EthSecp256k1Type)
err := algoFlag.Value.Set(string(hd.EthSecp256k1Type))
if err != nil {
@ -107,7 +107,7 @@ func runAddCmd(cmd *cobra.Command, args []string) error {
eth, _ := cmd.Flags().GetBool(ethFlag)
if eth {
cmd.Print("eth flag specified: using coin-type 60 and signing algorithm eth_secp256k1\n")
cmd.Flags().Set(flags.FlagKeyType, string(hd.EthSecp256k1Type))
cmd.Flags().Set(flags.FlagKeyAlgorithm, string(hd.EthSecp256k1Type))
cmd.Flags().Set("coin-type", "60")
}

View File

@ -13,7 +13,7 @@ import (
"strconv"
"strings"
"github.com/cometbft/cometbft/types"
"github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
@ -132,7 +132,7 @@ func (br BaseReq) ValidateBasic(w http.ResponseWriter) bool {
return true
}
// ReadRESTReq reads and unmarshals a Request's body to the BaseReq struct.
// ReadRESTReq reads and unmarshals a Request's body to the the BaseReq struct.
// Writes an error response to ResponseWriter and returns false if errors occurred.
func ReadRESTReq(w http.ResponseWriter, r *http.Request, cdc *codec.LegacyAmino, req interface{}) bool {
body, err := io.ReadAll(r.Body)

View File

@ -12,7 +12,6 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
simappparams "cosmossdk.io/simapp/params"
"github.com/0glabs/0g-chain/client/rest"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
@ -20,6 +19,7 @@ import (
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
simappparams "github.com/cosmos/cosmos-sdk/simapp/params"
"github.com/cosmos/cosmos-sdk/types"
)

View File

@ -7,10 +7,6 @@ import (
"path/filepath"
"strings"
"github.com/Kava-Labs/opendb"
cometbftdb "github.com/cometbft/cometbft-db"
"github.com/cometbft/cometbft/libs/log"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/server"
@ -23,6 +19,8 @@ import (
ethermintflags "github.com/evmos/ethermint/server/flags"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
db "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
@ -31,7 +29,6 @@ import (
const (
flagMempoolEnableAuth = "mempool.enable-authentication"
flagMempoolAuthAddresses = "mempool.authorized-addresses"
flagSkipLoadLatest = "skip-load-latest"
)
// appCreator holds functions used by the sdk server to control the 0g-chain app.
@ -43,7 +40,7 @@ type appCreator struct {
// newApp loads config from AppOptions and returns a new app.
func (ac appCreator) newApp(
logger log.Logger,
db cometbftdb.DB,
db db.DB,
traceStore io.Writer,
appOpts servertypes.AppOptions,
) servertypes.Application {
@ -64,7 +61,7 @@ func (ac appCreator) newApp(
homeDir := cast.ToString(appOpts.Get(flags.FlagHome))
snapshotDir := filepath.Join(homeDir, "data", "snapshots") // TODO can these directory names be imported from somewhere?
snapshotDB, err := opendb.OpenDB(appOpts, snapshotDir, "metadata", server.GetAppDBBackend(appOpts))
snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir)
if err != nil {
panic(err)
}
@ -91,26 +88,10 @@ func (ac appCreator) newApp(
cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent)),
)
// Setup chainId
chainID := cast.ToString(appOpts.Get(flags.FlagChainID))
if len(chainID) == 0 {
// fallback to genesis chain-id
appGenesis, err := tmtypes.GenesisDocFromFile(filepath.Join(homeDir, "config", "genesis.json"))
if err != nil {
panic(err)
}
chainID = appGenesis.ChainID
}
skipLoadLatest := false
if appOpts.Get(flagSkipLoadLatest) != nil {
skipLoadLatest = cast.ToBool(appOpts.Get(flagSkipLoadLatest))
}
return app.NewApp(
logger, db, homeDir, traceStore, ac.encodingConfig,
app.Options{
SkipLoadLatest: skipLoadLatest,
SkipLoadLatest: false,
SkipUpgradeHeights: skipUpgradeHeights,
SkipGenesisInvariants: cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)),
InvariantCheckPeriod: cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
@ -131,20 +112,18 @@ func (ac appCreator) newApp(
baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))),
baseapp.SetIAVLDisableFastNode(cast.ToBool(iavlDisableFastNode)),
baseapp.SetIAVLLazyLoading(cast.ToBool(appOpts.Get(server.FlagIAVLLazyLoading))),
baseapp.SetChainID(chainID),
)
}
// appExport writes out an app's state to json.
func (ac appCreator) appExport(
logger log.Logger,
db cometbftdb.DB,
db db.DB,
traceStore io.Writer,
height int64,
forZeroHeight bool,
jailAllowedAddrs []string,
appOpts servertypes.AppOptions,
modulesToExport []string,
) (servertypes.ExportedApp, error) {
homePath, ok := appOpts.Get(flags.FlagHome).(string)
if !ok || homePath == "" {
@ -165,7 +144,7 @@ func (ac appCreator) appExport(
} else {
tempApp = app.NewApp(logger, db, homePath, traceStore, ac.encodingConfig, options)
}
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport)
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs)
}
// addStartCmdFlags adds flags to the server start command.

View File

@ -4,10 +4,10 @@ import (
"encoding/json"
"fmt"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/version"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
"github.com/spf13/cobra"
tmtypes "github.com/tendermint/tendermint/types"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"

View File

@ -1,53 +0,0 @@
package iavlviewer
import (
"crypto/sha256"
"fmt"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newDataCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "data <prefix> [version number]",
Short: "View all keys, hash, & size of tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
printKeys(tree)
hash := tree.Hash()
fmt.Printf("Hash: %X\n", hash)
fmt.Printf("Size: %X\n", tree.Size())
return nil
},
}
return cmd
}
func printKeys(tree *iavl.MutableTree) {
fmt.Println("Printing all keys with hashed values (to detect diff)")
tree.Iterate(func(key []byte, value []byte) bool { //nolint:errcheck
printKey := parseWeaveKey(key)
digest := sha256.Sum256(value)
fmt.Printf(" %s\n %X\n", printKey, digest)
return false
})
}

View File

@ -1,38 +0,0 @@
package iavlviewer
import (
"fmt"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newHashCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "hash <prefix> [version number]",
Short: "Print the root hash of the iavl tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
fmt.Printf("Hash: %X\n", tree.Hash())
return nil
},
}
return cmd
}

View File

@ -1,86 +0,0 @@
package iavlviewer
import (
"fmt"
"os"
"strconv"
"cosmossdk.io/log"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/store/wrapper"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
"github.com/cosmos/iavl"
iavldb "github.com/cosmos/iavl/db"
)
const (
DefaultCacheSize int = 10000
)
func NewCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "iavlviewer <data|hash|shape|versions> <prefix> [version number]",
Short: "Output various data, hashes, and calculations for an iavl tree",
}
cmd.AddCommand(newDataCmd(opts))
cmd.AddCommand(newHashCmd(opts))
cmd.AddCommand(newShapeCmd(opts))
cmd.AddCommand(newVersionsCmd(opts))
return cmd
}
func parseVersion(arg string) (int, error) {
version, err := strconv.Atoi(arg)
if err != nil {
return 0, fmt.Errorf("invalid version number: '%s'", arg)
}
return version, nil
}
func openPrefixTree(opts ethermintserver.StartOptions, cmd *cobra.Command, prefix string, version int) (*iavl.MutableTree, error) {
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
ctx.Config.SetRoot(clientCtx.HomeDir)
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
if err != nil {
return nil, fmt.Errorf("failed to open database at %s: %s", clientCtx.HomeDir, err)
}
defer func() {
if err := db.Close(); err != nil {
ctx.Logger.Error("error closing db", "error", err.Error())
}
}()
cosmosdb := wrapper.NewCosmosDB(db)
tree, err := readTree(cosmosdb, version, []byte(prefix))
if err != nil {
return nil, fmt.Errorf("failed to read tree with prefix %s: %s", prefix, err)
}
return tree, nil
}
// ReadTree loads an iavl tree from the directory
// If version is 0, load latest, otherwise, load named version
// The prefix represents which iavl tree you want to read. The iaviwer will always set a prefix.
func readTree(db dbm.DB, version int, prefix []byte) (*iavl.MutableTree, error) {
if len(prefix) != 0 {
db = dbm.NewPrefixDB(db, prefix)
}
tree := iavl.NewMutableTree(iavldb.NewWrapper(db), DefaultCacheSize, false, log.NewLogger(os.Stdout))
ver, err := tree.LoadVersion(int64(version))
if err != nil {
return nil, err
}
fmt.Printf("Latest version: %d\n", ver)
fmt.Printf("Got version: %d\n", version)
return tree, err
}

View File

@ -1,47 +0,0 @@
package iavlviewer
import (
"fmt"
"strings"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newShapeCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "shape <prefix> [version number]",
Short: "View shape of iavl tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
printShape(tree)
return nil
},
}
return cmd
}
func printShape(tree *iavl.MutableTree) {
// shape := tree.RenderShape(" ", nil)
// TODO: handle this error
shape, _ := tree.RenderShape(" ", nodeEncoder)
fmt.Println(strings.Join(shape, "\n"))
}

View File

@ -1,74 +0,0 @@
package iavlviewer
import (
"bytes"
"encoding/hex"
"fmt"
"strings"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newVersionsCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "versions <prefix>",
Short: "Print all versions of iavl tree",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
tree, err := openPrefixTree(opts, cmd, prefix, 15)
if err != nil {
return err
}
printVersions(tree)
return nil
},
}
return cmd
}
func printVersions(tree *iavl.MutableTree) {
versions := tree.AvailableVersions()
fmt.Println("Available versions:")
for _, v := range versions {
fmt.Printf(" %d\n", v)
}
}
// parseWeaveKey assumes a separating : where all in front should be ascii,
// and all afterwards may be ascii or binary
func parseWeaveKey(key []byte) string {
cut := bytes.IndexRune(key, ':')
if cut == -1 {
return encodeID(key)
}
prefix := key[:cut]
id := key[cut+1:]
return fmt.Sprintf("%s:%s", encodeID(prefix), encodeID(id))
}
// casts to a string if it is printable ascii, hex-encodes otherwise
func encodeID(id []byte) string {
for _, b := range id {
if b < 0x20 || b >= 0x80 {
return strings.ToUpper(hex.EncodeToString(id))
}
}
return string(id)
}
func nodeEncoder(id []byte, depth int, isLeaf bool) string {
prefix := fmt.Sprintf("-%d ", depth)
if isLeaf {
prefix = fmt.Sprintf("*%d ", depth)
}
if len(id) == 0 {
return fmt.Sprintf("%s<nil>", prefix)
}
return fmt.Sprintf("%s%s", prefix, parseWeaveKey(id))
}

View File

@ -11,7 +11,9 @@ import (
func main() {
chaincfg.SetSDKConfig().Seal()
rootCmd := NewRootCmd()
if err := svrcmd.Execute(rootCmd, chaincfg.EnvPrefix, chaincfg.DefaultNodeHome); err != nil {
switch e := err.(type) {
case server.ErrorCode:

View File

@ -1,216 +0,0 @@
//go:build rocksdb
// +build rocksdb
package rocksdb
import (
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"github.com/Kava-Labs/opendb"
)
const (
flagPrintStatsInterval = "print-stats-interval"
)
var allowedDBs = []string{"application", "blockstore", "state"}
func CompactRocksDBCmd() *cobra.Command {
cmd := &cobra.Command{
Use: fmt.Sprintf(
"compact <%s>",
strings.Join(allowedDBs, "|"),
),
Short: "force compacts RocksDB",
Long: `This is a utility command that performs a force compaction on the state or
blockstore. This should only be run once the node has stopped.`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
statsIntervalStr, err := cmd.Flags().GetString(flagPrintStatsInterval)
if err != nil {
return err
}
statsInterval, err := time.ParseDuration(statsIntervalStr)
if err != nil {
return fmt.Errorf("failed to parse duration for --%s: %w", flagPrintStatsInterval, err)
}
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
if server.GetAppDBBackend(ctx.Viper) != "rocksdb" {
return errors.New("compaction is currently only supported with rocksdb")
}
if !slices.Contains(allowedDBs, args[0]) {
return fmt.Errorf(
"invalid db name, must be one of the following: %s",
strings.Join(allowedDBs, ", "),
)
}
return compactRocksDBs(clientCtx.HomeDir, logger, args[0], statsInterval)
},
}
cmd.Flags().String(flagPrintStatsInterval, "1m", "duration string for how often to print compaction stats")
return cmd
}
// compactRocksDBs performs a manual compaction on the given db.
func compactRocksDBs(
rootDir string,
logger log.Logger,
dbName string,
statsInterval time.Duration,
) error {
dbPath := filepath.Join(rootDir, "data", dbName+".db")
dbOpts, cfOpts, err := opendb.LoadLatestOptions(dbPath)
if err != nil {
return err
}
logger.Info("opening db", "path", dbPath)
db, _, err := grocksdb.OpenDbColumnFamilies(
dbOpts,
dbPath,
[]string{opendb.DefaultColumnFamilyName},
[]*grocksdb.Options{cfOpts},
)
if err != nil {
return err
}
if err != nil {
logger.Error("failed to initialize cometbft db", "path", dbPath, "err", err)
return fmt.Errorf("failed to open db %s %w", dbPath, err)
}
defer db.Close()
logColumnFamilyMetadata(db, logger)
logger.Info("starting compaction...", "db", dbPath)
done := make(chan bool)
registerSignalHandler(db, logger, done)
startCompactionStatsOutput(db, logger, done, statsInterval)
// Actually run the compaction
db.CompactRange(grocksdb.Range{Start: nil, Limit: nil})
logger.Info("done compaction", "db", dbPath)
done <- true
return nil
}
// bytesToMB converts bytes to megabytes.
func bytesToMB(bytes uint64) float64 {
return float64(bytes) / 1024 / 1024
}
// logColumnFamilyMetadata outputs the column family and level metadata.
func logColumnFamilyMetadata(
db *grocksdb.DB,
logger log.Logger,
) {
metadata := db.GetColumnFamilyMetadata()
logger.Info(
"column family metadata",
"name", metadata.Name(),
"sizeMB", bytesToMB(metadata.Size()),
"fileCount", metadata.FileCount(),
"levels", len(metadata.LevelMetas()),
)
for _, level := range metadata.LevelMetas() {
logger.Info(
fmt.Sprintf("level %d metadata", level.Level()),
"sstMetas", strconv.Itoa(len(level.SstMetas())),
"sizeMB", strconv.FormatFloat(bytesToMB(level.Size()), 'f', 2, 64),
)
}
}
// startCompactionStatsOutput starts a goroutine that outputs compaction stats
// every minute.
func startCompactionStatsOutput(
db *grocksdb.DB,
logger log.Logger,
done chan bool,
statsInterval time.Duration,
) {
go func() {
ticker := time.NewTicker(statsInterval)
isClosed := false
for {
select {
// Make sure we don't try reading from the closed db.
// We continue the loop so that we can make sure the done channel
// does not stall indefinitely from repeated writes and no reader.
case <-done:
logger.Debug("stopping compaction stats output")
isClosed = true
case <-ticker.C:
if !isClosed {
compactionStats := db.GetProperty("rocksdb.stats")
fmt.Printf("%s\n", compactionStats)
}
}
}
}()
}
// registerSignalHandler registers a signal handler that will cancel any running
// compaction when the user presses Ctrl+C.
func registerSignalHandler(
db *grocksdb.DB,
logger log.Logger,
done chan bool,
) {
// https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ
// Q: Can I close the DB when a manual compaction is in progress?
//
// A: No, it's not safe to do that. However, you call
// CancelAllBackgroundWork(db, true) in another thread to abort the
// running compactions, so that you can close the DB sooner. Since
// 6.5, you can also speed it up using
// DB::DisableManualCompaction().
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
for sig := range c {
logger.Info(fmt.Sprintf(
"received %s signal, aborting running compaction... Do NOT kill me before compaction is cancelled. I will exit when compaction is cancelled.",
sig,
))
db.DisableManualCompaction()
logger.Info("manual compaction disabled")
// Stop the logging
done <- true
}
}()
}

View File

@ -1,19 +0,0 @@
//go:build rocksdb
// +build rocksdb
package rocksdb
import (
"github.com/spf13/cobra"
)
// RocksDBCmd defines the root command containing subcommands that assist in
// rocksdb related tasks such as manual compaction.
var RocksDBCmd = &cobra.Command{
Use: "rocksdb",
Short: "RocksDB util commands",
}
func init() {
RocksDBCmd.AddCommand(CompactRocksDBCmd())
}

View File

@ -1,14 +0,0 @@
//go:build !rocksdb
// +build !rocksdb
package rocksdb
import (
"github.com/spf13/cobra"
)
// RocksDBCmd defines the root command when the rocksdb build tag is not set.
var RocksDBCmd = &cobra.Command{
Use: "rocksdb",
Short: "RocksDB util commands, disabled because rocksdb build tag not set",
}

View File

@ -1,38 +1,32 @@
package main
import (
"fmt"
"os"
"path/filepath"
dbm "github.com/cometbft/cometbft-db"
tmcfg "github.com/cometbft/cometbft/config"
tmcli "github.com/cometbft/cometbft/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/config"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/cosmos/cosmos-sdk/server"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/x/genutil"
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
ethermintclient "github.com/evmos/ethermint/client"
"github.com/evmos/ethermint/crypto/hd"
ethermintserver "github.com/evmos/ethermint/server"
servercfg "github.com/evmos/ethermint/server/config"
"github.com/spf13/cobra"
tmcfg "github.com/tendermint/tendermint/config"
tmcli "github.com/tendermint/tendermint/libs/cli"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
"github.com/0glabs/0g-chain/chaincfg"
"github.com/0glabs/0g-chain/cmd/0gchaind/iavlviewer"
"github.com/0glabs/0g-chain/cmd/0gchaind/rocksdb"
kavaclient "github.com/0glabs/0g-chain/client"
"github.com/0glabs/0g-chain/cmd/opendb"
"github.com/0glabs/0g-chain/crypto/vrf"
"github.com/Kava-Labs/opendb"
)
func customKeyringOptions() keyring.Option {
@ -52,10 +46,11 @@ func NewRootCmd() *cobra.Command {
WithLegacyAmino(encodingConfig.Amino).
WithInput(os.Stdin).
WithAccountRetriever(types.AccountRetriever{}).
WithBroadcastMode(flags.FlagBroadcastMode).
WithBroadcastMode(flags.BroadcastBlock).
WithHomeDir(chaincfg.DefaultNodeHome).
WithKeyringOptions(hd.EthSecp256k1Option()).
WithKeyringOptions(customKeyringOptions()).
WithViper(chaincfg.EnvPrefix)
rootCmd := &cobra.Command{
Use: chaincfg.AppName,
Short: "Daemon and CLI for the 0g-chain blockchain.",
@ -89,29 +84,18 @@ func NewRootCmd() *cobra.Command {
}
addSubCmds(rootCmd, encodingConfig, chaincfg.DefaultNodeHome)
return rootCmd
}
// dbOpener is a function to open `application.db`, potentially with customized options.
// dbOpener sets dataDir to "data", dbName to "application" and calls generic OpenDB function.
func dbOpener(opts servertypes.AppOptions, rootDir string, backend dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(rootDir, "data")
return opendb.OpenDB(opts, dataDir, "application", backend)
}
// addSubCmds registers all the sub commands used by kava.
// addSubCmds registers all the sub commands used by 0g-chain.
func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, defaultNodeHome string) {
gentxModule, ok := app.ModuleBasics[genutiltypes.ModuleName].(genutil.AppModuleBasic)
if !ok {
panic(fmt.Errorf("expected %s module to be an instance of type %T", genutiltypes.ModuleName, genutil.AppModuleBasic{}))
}
rootCmd.AddCommand(
StatusCommand(),
ethermintclient.ValidateChainID(
genutilcli.InitCmd(app.ModuleBasics, defaultNodeHome),
),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome, gentxModule.GenTxValidator),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome),
AssertInvariantsCmd(encodingConfig),
genutilcli.GenTxCmd(app.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, defaultNodeHome),
genutilcli.ValidateGenesisCmd(app.ModuleBasics),
@ -129,7 +113,7 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
opts := ethermintserver.StartOptions{
AppCreator: ac.newApp,
DefaultNodeHome: chaincfg.DefaultNodeHome,
DBOpener: dbOpener,
DBOpener: opendb.OpenDB,
}
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
ethermintserver.AddCommands(
@ -139,13 +123,10 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
ac.addStartCmdFlags,
)
// add keybase, auxiliary RPC, query, and tx child commands
// add keybase, gas RPC, query, and tx child commands
rootCmd.AddCommand(
newQueryCmd(),
newTxCmd(),
keyCommands(chaincfg.DefaultNodeHome),
rocksdb.RocksDBCmd,
newShardCmd(opts),
iavlviewer.NewCmd(opts),
kavaclient.KeyCommands(chaincfg.DefaultNodeHome),
)
}

View File

@ -1,322 +0,0 @@
package main
import (
"fmt"
"strings"
"github.com/0glabs/0g-chain/app"
"github.com/spf13/cobra"
dbm "github.com/cometbft/cometbft-db"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/server"
pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
tmconfig "github.com/cometbft/cometbft/config"
"github.com/cometbft/cometbft/node"
tmstate "github.com/cometbft/cometbft/state"
"github.com/cometbft/cometbft/store"
ethermintserver "github.com/evmos/ethermint/server"
)
const (
flagShardStartBlock = "start"
flagShardEndBlock = "end"
flagShardOnlyAppState = "only-app-state"
flagShardForceAppVersion = "force-app-version"
flagShardOnlyCometbftState = "only-cometbft-state"
// TODO: --preserve flag for creating & operating on a copy?
// allow using -1 to mean "latest" (perform no rollbacks)
shardEndBlockLatest = -1
)
func newShardCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "shard --home <path-to-home-dir> --start <start-block> --end <end-block> [--only-app-state] [--only-cometbft-state] [--force-app-version <app-version>]",
Short: "Strip all blocks from the database outside of a given range",
Long: `shard opens a local kava home directory's databases and removes all blocks outside a range defined by --start and --end. The range is inclusive of the end block.
It works by first rolling back the latest state to the block before the end block, and then by pruning all state before the start block.
Setting the end block to -1 signals to keep the latest block (no rollbacks).
The application.db can be loaded at a particular height via the --force-app-version option. This is useful if the sharding process is prematurely terminated while the application.db is being sharded.
The --only-app-state flag can be used to skip the pruning of the blockstore and cometbft state. This matches the functionality of the cosmos-sdk's "prune" command. Note that rolled back blocks will still affect all stores.
Similarly, the --only-cometbft-state flag skips pruning app state. This can be useful if the shard command is prematurely terminated during the shard process.
The shard command only flags the iavl tree nodes for deletion. Actual removal from the databases will be performed when each database is compacted.
WARNING: this is a destructive action.`,
Example: `Create a 1M block data shard (keeps blocks kava 1,000,000 to 2,000,000)
$ kava shard --home path/to/.kava --start 1000000 --end 2000000
Prune all blocks up to 5,000,000:
$ kava shard --home path/to/.kava --start 5000000 --end -1
Prune first 1M blocks _without_ affecting blockstore or cometBFT state:
$ kava shard --home path/to/.kava --start 1000000 --end -1 --only-app-state`,
RunE: func(cmd *cobra.Command, args []string) error {
//////////////////////////
// parse & validate flags
//////////////////////////
startBlock, err := cmd.Flags().GetInt64(flagShardStartBlock)
if err != nil {
return err
}
endBlock, err := cmd.Flags().GetInt64(flagShardEndBlock)
if err != nil {
return err
}
if (endBlock == 0 || endBlock < startBlock) && endBlock != shardEndBlockLatest {
return fmt.Errorf("end block (%d) must be greater than start block (%d)", endBlock, startBlock)
}
onlyAppState, err := cmd.Flags().GetBool(flagShardOnlyAppState)
if err != nil {
return err
}
forceAppVersion, err := cmd.Flags().GetInt64(flagShardForceAppVersion)
if err != nil {
return err
}
onlyCometbftState, err := cmd.Flags().GetBool(flagShardOnlyCometbftState)
if err != nil {
return err
}
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
ctx.Config.SetRoot(clientCtx.HomeDir)
////////////////////////
// manage db connection
////////////////////////
// connect to database
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
if err != nil {
return err
}
// close db connection when done
defer func() {
if err := db.Close(); err != nil {
ctx.Logger.Error("error closing db", "error", err.Error())
}
}()
///////////////////
// load multistore
///////////////////
// create app in order to load the multistore
// skip loading the latest version so the desired height can be manually loaded
ctx.Viper.Set("skip-load-latest", true)
app := opts.AppCreator(ctx.Logger, db, nil, ctx.Viper).(*app.App)
if forceAppVersion == shardEndBlockLatest {
if err := app.LoadLatestVersion(); err != nil {
return err
}
} else {
if err := app.LoadVersion(forceAppVersion); err != nil {
return err
}
}
// get the multistore
cms := app.CommitMultiStore()
multistore, ok := cms.(*rootmulti.Store)
if !ok {
return fmt.Errorf("only sharding of rootmulti.Store type is supported")
}
////////////////////////
// shard application.db
////////////////////////
if !onlyCometbftState {
if err := shardApplicationDb(multistore, startBlock, endBlock); err != nil {
return err
}
} else {
fmt.Printf("[%s] skipping sharding of application.db\n", flagShardOnlyCometbftState)
}
//////////////////////////////////
// shard blockstore.db & state.db
//////////////////////////////////
// open block store & cometbft state
blockStore, stateStore, err := openCometBftDbs(ctx.Config)
if err != nil {
return fmt.Errorf("failed to open cometbft dbs: %s", err)
}
if !onlyAppState {
if err := shardCometBftDbs(blockStore, stateStore, startBlock, endBlock); err != nil {
return err
}
} else {
fmt.Printf("[%s] skipping sharding of blockstore.db and state.db\n", flagShardOnlyAppState)
fmt.Printf("blockstore contains blocks %d - %d\n", blockStore.Base(), blockStore.Height())
}
return nil
},
}
cmd.Flags().String(flags.FlagHome, opts.DefaultNodeHome, "The application home directory")
cmd.Flags().Int64(flagShardStartBlock, 1, "Start block of data shard (inclusive)")
cmd.Flags().Int64(flagShardEndBlock, 0, "End block of data shard (inclusive)")
cmd.Flags().Bool(flagShardOnlyAppState, false, "Skip pruning of blockstore & cometbft state")
cmd.Flags().Bool(flagShardOnlyCometbftState, false, "Skip pruning of application state")
cmd.Flags().Int64(flagShardForceAppVersion, shardEndBlockLatest, "Instead of loading latest, force set the version of the multistore that is loaded")
return cmd
}
// shardApplicationDb prunes the multistore up to startBlock and rolls it back to endBlock
func shardApplicationDb(multistore *rootmulti.Store, startBlock, endBlock int64) error {
//////////////////////////////
// Rollback state to endBlock
//////////////////////////////
// handle desired endblock being latest
latest := multistore.LastCommitID().Version
if latest == 0 {
return fmt.Errorf("failed to find latest height >0")
}
fmt.Printf("latest height: %d\n", latest)
if endBlock == shardEndBlockLatest {
endBlock = latest
}
shardSize := endBlock - startBlock + 1
// error if requesting block range the database does not have
if endBlock > latest {
return fmt.Errorf("data does not contain end block (%d): latest version is %d", endBlock, latest)
}
fmt.Printf("pruning data down to heights %d - %d (%d blocks)\n", startBlock, endBlock, shardSize)
// set pruning options to prevent no-ops from `PruneStores`
multistore.SetPruning(pruningtypes.PruningOptions{KeepRecent: uint64(shardSize), Interval: 0})
// rollback application state
if err := multistore.RollbackToVersion(endBlock); err != nil {
return fmt.Errorf("failed to rollback application state: %s", err)
}
//////////////////////////////
// Prune blocks to startBlock
//////////////////////////////
// enumerate all heights to prune
pruneHeights := make([]int64, 0, latest-shardSize)
for i := int64(1); i < startBlock; i++ {
pruneHeights = append(pruneHeights, i)
}
if len(pruneHeights) > 0 {
// prune application state
fmt.Printf("pruning application state to height %d\n", startBlock)
for _, pruneHeight := range pruneHeights {
if err := multistore.PruneStores(pruneHeight); err != nil {
return fmt.Errorf("failed to prune application state: %s", err)
}
}
}
return nil
}
// shardCometBftDbs shrinks blockstore.db & state.db down to the desired block range
func shardCometBftDbs(blockStore *store.BlockStore, stateStore tmstate.Store, startBlock, endBlock int64) error {
var err error
latest := blockStore.Height()
if endBlock == shardEndBlockLatest {
endBlock = latest
}
//////////////////////////////
// Rollback state to endBlock
//////////////////////////////
// prep for outputting progress repeatedly to same line
needsRollback := endBlock < latest
progress := "rolling back blockstore & cometbft state to height %d"
numChars := len(fmt.Sprintf(progress, latest))
clearLine := fmt.Sprintf("\r%s\r", strings.Repeat(" ", numChars))
printRollbackProgress := func(h int64) {
fmt.Print(clearLine)
fmt.Printf(progress, h)
}
// rollback tendermint db
height := latest
for height > endBlock {
beforeRollbackHeight := height
printRollbackProgress(height - 1)
height, _, err = tmstate.Rollback(blockStore, stateStore, true)
if err != nil {
return fmt.Errorf("failed to rollback cometbft state: %w", err)
}
if beforeRollbackHeight == height {
return fmt.Errorf("attempting to rollback cometbft state height %d failed (no rollback performed)", height)
}
}
if needsRollback {
fmt.Println()
} else {
fmt.Printf("latest store height is already %d\n", latest)
}
//////////////////////////////
// Prune blocks to startBlock
//////////////////////////////
// get starting block of block store
baseBlock := blockStore.Base()
// only prune if data exists, otherwise blockStore.PruneBlocks will panic
if baseBlock < startBlock {
// prune block store
fmt.Printf("pruning block store from %d - %d\n", baseBlock, startBlock)
if _, err := blockStore.PruneBlocks(startBlock); err != nil {
return fmt.Errorf("failed to prune block store (retainHeight=%d): %s", startBlock, err)
}
// prune cometbft state
fmt.Printf("pruning cometbft state from %d - %d\n", baseBlock, startBlock)
if err := stateStore.PruneStates(baseBlock, startBlock); err != nil {
return fmt.Errorf("failed to prune cometbft state store (%d - %d): %s", baseBlock, startBlock, err)
}
} else {
fmt.Printf("blockstore and cometbft state begins at block %d\n", baseBlock)
}
return nil
}
// inspired by https://github.com/Kava-Labs/cometbft/blob/277b0853db3f67865a55aa1c54f59790b5f591be/node/node.go#L234
func openCometBftDbs(config *tmconfig.Config) (blockStore *store.BlockStore, stateStore tmstate.Store, err error) {
dbProvider := node.DefaultDBProvider
var blockStoreDB dbm.DB
blockStoreDB, err = dbProvider(&node.DBContext{ID: "blockstore", Config: config})
if err != nil {
return
}
blockStore = store.NewBlockStore(blockStoreDB)
stateDB, err := dbProvider(&node.DBContext{ID: "state", Config: config})
if err != nil {
return
}
stateStore = tmstate.NewStore(stateDB, tmstate.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
return
}

View File

@ -5,9 +5,9 @@ import (
"github.com/spf13/cobra"
"github.com/cometbft/cometbft/libs/bytes"
"github.com/cometbft/cometbft/p2p"
coretypes "github.com/cometbft/cometbft/rpc/core/types"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/p2p"
coretypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"

499
cmd/opendb/metrics.go Normal file
View File

@ -0,0 +1,499 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
var rocksdbMetrics *Metrics
// Metrics contains all rocksdb metrics which will be reported to prometheus
type Metrics struct {
// Keys
NumberKeysWritten metrics.Gauge
NumberKeysRead metrics.Gauge
NumberKeysUpdated metrics.Gauge
EstimateNumKeys metrics.Gauge
// Files
NumberFileOpens metrics.Gauge
NumberFileErrors metrics.Gauge
// Memory
BlockCacheUsage metrics.Gauge
EstimateTableReadersMem metrics.Gauge
CurSizeAllMemTables metrics.Gauge
BlockCachePinnedUsage metrics.Gauge
// Cache
BlockCacheMiss metrics.Gauge
BlockCacheHit metrics.Gauge
BlockCacheAdd metrics.Gauge
BlockCacheAddFailures metrics.Gauge
// Detailed Cache
BlockCacheIndexMiss metrics.Gauge
BlockCacheIndexHit metrics.Gauge
BlockCacheIndexBytesInsert metrics.Gauge
BlockCacheFilterMiss metrics.Gauge
BlockCacheFilterHit metrics.Gauge
BlockCacheFilterBytesInsert metrics.Gauge
BlockCacheDataMiss metrics.Gauge
BlockCacheDataHit metrics.Gauge
BlockCacheDataBytesInsert metrics.Gauge
// Latency
DBGetMicrosP50 metrics.Gauge
DBGetMicrosP95 metrics.Gauge
DBGetMicrosP99 metrics.Gauge
DBGetMicrosP100 metrics.Gauge
DBGetMicrosCount metrics.Gauge
DBWriteMicrosP50 metrics.Gauge
DBWriteMicrosP95 metrics.Gauge
DBWriteMicrosP99 metrics.Gauge
DBWriteMicrosP100 metrics.Gauge
DBWriteMicrosCount metrics.Gauge
// Write Stall
StallMicros metrics.Gauge
DBWriteStallP50 metrics.Gauge
DBWriteStallP95 metrics.Gauge
DBWriteStallP99 metrics.Gauge
DBWriteStallP100 metrics.Gauge
DBWriteStallCount metrics.Gauge
DBWriteStallSum metrics.Gauge
// Bloom Filter
BloomFilterUseful metrics.Gauge
BloomFilterFullPositive metrics.Gauge
BloomFilterFullTruePositive metrics.Gauge
// LSM Tree Stats
LastLevelReadBytes metrics.Gauge
LastLevelReadCount metrics.Gauge
NonLastLevelReadBytes metrics.Gauge
NonLastLevelReadCount metrics.Gauge
GetHitL0 metrics.Gauge
GetHitL1 metrics.Gauge
GetHitL2AndUp metrics.Gauge
}
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
func registerMetrics() {
if rocksdbMetrics != nil {
// metrics already registered
return
}
labels := make([]string, 0)
rocksdbMetrics = &Metrics{
// Keys
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_written",
Help: "",
}, labels),
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_read",
Help: "",
}, labels),
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_updated",
Help: "",
}, labels),
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "estimate_num_keys",
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
}, labels),
// Files
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_opens",
Help: "",
}, labels),
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_errors",
Help: "",
}, labels),
// Memory
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_usage",
Help: "memory size for the entries residing in block cache",
}, labels),
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "estimate_table_readers_mem",
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
}, labels),
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "cur_size_all_mem_tables",
Help: "approximate size of active and unflushed immutable memtables (bytes)",
}, labels),
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_pinned_usage",
Help: "returns the memory size for the entries being pinned",
}, labels),
// Cache
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_miss",
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
}, labels),
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_hit",
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
}, labels),
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add",
Help: "number of blocks added to block cache",
}, labels),
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add_failures",
Help: "number of failures when adding blocks to block cache",
}, labels),
// Detailed Cache
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_miss",
Help: "",
}, labels),
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_hit",
Help: "",
}, labels),
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_bytes_insert",
Help: "",
}, labels),
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_miss",
Help: "",
}, labels),
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_hit",
Help: "",
}, labels),
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_bytes_insert",
Help: "",
}, labels),
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_miss",
Help: "",
}, labels),
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_hit",
Help: "",
}, labels),
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_bytes_insert",
Help: "",
}, labels),
// Latency
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p50",
Help: "",
}, labels),
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p95",
Help: "",
}, labels),
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p99",
Help: "",
}, labels),
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p100",
Help: "",
}, labels),
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_count",
Help: "",
}, labels),
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p50",
Help: "",
}, labels),
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p95",
Help: "",
}, labels),
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p99",
Help: "",
}, labels),
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p100",
Help: "",
}, labels),
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_count",
Help: "",
}, labels),
// Write Stall
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "stall_micros",
Help: "Writer has to wait for compaction or flush to finish.",
}, labels),
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p50",
Help: "",
}, labels),
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p95",
Help: "",
}, labels),
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p99",
Help: "",
}, labels),
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p100",
Help: "",
}, labels),
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_count",
Help: "",
}, labels),
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_sum",
Help: "",
}, labels),
// Bloom Filter
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_useful",
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
}, labels),
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_positive",
Help: "number of times bloom FullFilter has not avoided the reads.",
}, labels),
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_true_positive",
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
}, labels),
// LSM Tree Stats
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_bytes",
Help: "",
}, labels),
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_count",
Help: "",
}, labels),
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_bytes",
Help: "",
}, labels),
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_count",
Help: "",
}, labels),
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l0",
Help: "number of Get() queries served by L0",
}, labels),
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l1",
Help: "number of Get() queries served by L1",
}, labels),
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l2_and_up",
Help: "number of Get() queries served by L2 and up",
}, labels),
}
}
// report reports metrics to prometheus based on rocksdb props and stats
func (m *Metrics) report(props *properties, stats *stats) {
// Keys
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
// Files
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
// Memory
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
// Cache
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
// Detailed Cache
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
// Latency
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
// Write Stall
m.StallMicros.Set(float64(stats.StallMicros))
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
// Bloom Filter
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
// LSM Tree Stats
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
m.GetHitL0.Set(float64(stats.GetHitL0))
m.GetHitL1.Set(float64(stats.GetHitL1))
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
}

18
cmd/opendb/opendb.go Normal file
View File

@ -0,0 +1,18 @@
//go:build !rocksdb
// +build !rocksdb
package opendb
import (
"path/filepath"
"github.com/cosmos/cosmos-sdk/server/types"
dbm "github.com/tendermint/tm-db"
)
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
return dbm.NewDB("application", backendType, dataDir)
}

View File

@ -0,0 +1,398 @@
//go:build rocksdb
// +build rocksdb
// Copyright 2023 Kava Labs, Inc.
// Copyright 2023 Cronos Labs, Inc.
//
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opendb
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/cosmos/cosmos-sdk/server/types"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cast"
dbm "github.com/tendermint/tm-db"
)
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
const (
// default tm-db block cache size for RocksDB
defaultBlockCacheSize = 1 << 30
defaultColumnFamilyName = "default"
enableMetricsOptName = "rocksdb.enable-metrics"
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
defaultReportMetricsIntervalSecs = 15
maxOpenFilesDBOptName = "rocksdb.max-open-files"
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
useFsyncDBOptName = "rocksdb.use_fsync"
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
numLevelsCFOptName = "rocksdb.num-levels"
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
blockSizeBBTOOptName = "rocksdb.block_size"
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "rocksdb.format_version"
asyncIOReadOptName = "rocksdb.read-async-io"
)
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
if backendType == dbm.RocksDBBackend {
return openRocksdb(dataDir, appOpts)
}
return dbm.NewDB("application", backendType, dataDir)
}
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
// option will be overridden only in case if it explicitly specified in appOpts
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
optionsPath := filepath.Join(dir, "application.db")
dbOpts, cfOpts, err := loadLatestOptions(optionsPath)
if err != nil {
return nil, err
}
// customize rocksdb options
bbtoOpts := bbtoFromAppOpts(appOpts)
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
dbOpts = overrideDBOpts(dbOpts, appOpts)
cfOpts = overrideCFOpts(cfOpts, appOpts)
readOpts := readOptsFromAppOpts(appOpts)
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
if reportMetricsIntervalSecs == 0 {
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
}
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
}
// loadLatestOptions loads and returns database and column family options
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
// if database exists it should have only one column family named default
func loadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
return newDefaultOptions(), newDefaultOptions(), nil
}
if err != nil {
return nil, nil, err
}
cfNames := latestOpts.ColumnFamilyNames()
cfOpts := latestOpts.ColumnFamilyOpts()
// db should have only one column family named default
ok := len(cfNames) == 1 && cfNames[0] == defaultColumnFamilyName
if !ok {
return nil, nil, ErrUnexpectedConfiguration
}
// return db and cf opts
return latestOpts.Options(), &cfOpts[0], nil
}
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
if maxOpenFiles != nil {
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
}
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
if maxFileOpeningThreads != nil {
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
}
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
if tableCacheNumshardbits != nil {
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
}
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
if allowMMAPWrites != nil {
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
}
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
if allowMMAPReads != nil {
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
}
useFsync := appOpts.Get(useFsyncDBOptName)
if useFsync != nil {
dbOpts.SetUseFsync(cast.ToBool(useFsync))
}
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
if useAdaptiveMutex != nil {
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
}
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
if bytesPerSync != nil {
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
}
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
if maxBackgroundJobs != nil {
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
}
return dbOpts
}
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
if writeBufferSize != nil {
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
}
numLevels := appOpts.Get(numLevelsCFOptName)
if numLevels != nil {
cfOpts.SetNumLevels(cast.ToInt(numLevels))
}
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
if maxWriteBufferNumber != nil {
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
}
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
if minWriteBufferNumberToMerge != nil {
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
}
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
if maxBytesForLevelBase != nil {
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
}
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
if maxBytesForLevelMultiplier != nil {
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
}
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
if targetFileSizeBase != nil {
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
}
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
if targetFileSizeMultiplier != nil {
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
}
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
if level0FileNumCompactionTrigger != nil {
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
}
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
if level0SlowdownWritesTrigger != nil {
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
}
return cfOpts
}
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
ro := grocksdb.NewDefaultReadOptions()
asyncIO := appOpts.Get(asyncIOReadOptName)
if asyncIO != nil {
ro.SetAsyncIO(cast.ToBool(asyncIO))
}
return ro
}
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
bbto := defaultBBTO()
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
if blockCacheSize != nil {
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
bbto.SetBlockCache(cache)
}
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
if bitsPerKey != nil {
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
bbto.SetFilterPolicy(filter)
}
blockSize := appOpts.Get(blockSizeBBTOOptName)
if blockSize != nil {
bbto.SetBlockSize(cast.ToInt(blockSize))
}
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
if cacheIndexAndFilterBlocks != nil {
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
}
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
if pinL0FilterAndIndexBlocksInCache != nil {
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
}
formatVersion := appOpts.Get(formatVersionBBTOOptName)
if formatVersion != nil {
bbto.SetFormatVersion(cast.ToInt(formatVersion))
}
return bbto
}
// newRocksDBWithOptions opens rocksdb with provided database and column family options
// newRocksDBWithOptions expects that db has only one column family named default
func newRocksDBWithOptions(
name string,
dir string,
dbOpts *grocksdb.Options,
cfOpts *grocksdb.Options,
readOpts *grocksdb.ReadOptions,
enableMetrics bool,
reportMetricsIntervalSecs int64,
) (*dbm.RocksDB, error) {
dbPath := filepath.Join(dir, name+".db")
// Ensure path exists
if err := os.MkdirAll(dbPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create db path: %w", err)
}
// EnableStatistics adds overhead so shouldn't be enabled in production
if enableMetrics {
dbOpts.EnableStatistics()
}
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
if err != nil {
return nil, err
}
if enableMetrics {
registerMetrics()
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
}
wo := grocksdb.NewDefaultWriteOptions()
woSync := grocksdb.NewDefaultWriteOptions()
woSync.SetSync(true)
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
}
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func newDefaultOptions() *grocksdb.Options {
// default rocksdb option, good enough for most cases, including heavy workloads.
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
// compression: snappy as default, need to -lsnappy to enable.
bbto := defaultBBTO()
opts := grocksdb.NewDefaultOptions()
opts.SetBlockBasedTableFactory(bbto)
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
opts.SetMaxOpenFiles(4096)
opts.SetCreateIfMissing(true)
opts.IncreaseParallelism(runtime.NumCPU())
// 1.5GB maximum memory use for writebuffer.
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
return opts
}
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
return bbto
}
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
// NOTE: should be launched as a goroutine
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
ticker := time.NewTicker(interval)
for {
select {
case <-ticker.C:
props, stats, err := getPropsAndStats(db)
if err != nil {
continue
}
rocksdbMetrics.report(props, stats)
}
}
}
// getPropsAndStats gets statistics from rocksdb
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
propsLoader := newPropsLoader(db)
props, err := propsLoader.load()
if err != nil {
return nil, nil, err
}
statMap, err := parseSerializedStats(props.OptionsStatistics)
if err != nil {
return nil, nil, err
}
statLoader := newStatLoader(statMap)
stats, err := statLoader.load()
if err != nil {
return nil, nil, err
}
return props, stats, nil
}

View File

@ -0,0 +1,384 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"os"
"path/filepath"
"testing"
"github.com/linxGnu/grocksdb"
"github.com/stretchr/testify/require"
)
type mockAppOptions struct {
opts map[string]interface{}
}
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
return &mockAppOptions{
opts: opts,
}
}
func (m *mockAppOptions) Get(key string) interface{} {
return m.opts[key]
}
func TestOpenRocksdb(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
writeBufferSizeCFOptName: 999_999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := openRocksdb(dir, tc.mockAppOptions)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
mockAppOpts := newMockAppOptions(map[string]interface{}{})
db, err := openRocksdb(dir, mockAppOpts)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestLoadLatestOptions(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
const testCasesNum = 3
dbOptsList := make([]*grocksdb.Options, testCasesNum)
cfOptsList := make([]*grocksdb.Options, testCasesNum)
dbOptsList[0] = newDefaultOptions()
cfOptsList[0] = newDefaultOptions()
dbOptsList[1] = newDefaultOptions()
dbOptsList[1].SetMaxOpenFiles(999)
cfOptsList[1] = newDefaultOptions()
cfOptsList[1].SetWriteBufferSize(999_999)
dbOptsList[2] = newDefaultOptions()
dbOptsList[2].SetMaxOpenFiles(999)
dbOptsList[2].SetMaxFileOpeningThreads(9)
cfOptsList[2] = newDefaultOptions()
cfOptsList[2].SetWriteBufferSize(999_999)
cfOptsList[2].SetNumLevels(9)
for _, tc := range []struct {
desc string
dbOpts *grocksdb.Options
cfOpts *grocksdb.Options
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
dbOpts: dbOptsList[0],
cfOpts: cfOptsList[0],
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
dbOpts: dbOptsList[1],
cfOpts: cfOptsList[1],
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
dbOpts: dbOptsList[2],
cfOpts: cfOptsList[2],
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestOverrideDBOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-open-files",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: 9,
},
{
desc: "override max-open-files and max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dbOpts := newDefaultOptions()
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
})
}
}
func TestOverrideCFOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
writeBufferSize uint64
numLevels int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override write-buffer-size",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
}),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
numLevelsCFOptName: 9,
}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: 9,
},
{
desc: "override write-buffer-size and num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
cfOpts := newDefaultOptions()
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
}
func TestReadOptsFromAppOpts(t *testing.T) {
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
asyncIO bool
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
asyncIO: false,
},
{
desc: "set asyncIO option to true",
mockAppOptions: newMockAppOptions(map[string]interface{}{
asyncIOReadOptName: true,
}),
asyncIO: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
})
}
}
func TestNewRocksDBWithOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts := newDefaultOptions()
dbOpts.SetMaxOpenFiles(999)
cfOpts := newDefaultOptions()
cfOpts.SetWriteBufferSize(999_999)
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err = loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
}
func TestNewDefaultOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
require.Equal(t, 4096, maxOpenFiles)
}

View File

@ -0,0 +1,87 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
type propsGetter interface {
GetProperty(propName string) (value string)
GetIntProperty(propName string) (value uint64, success bool)
}
type propsLoader struct {
db propsGetter
errorMsgs []string
}
func newPropsLoader(db propsGetter) *propsLoader {
return &propsLoader{
db: db,
errorMsgs: make([]string, 0),
}
}
func (l *propsLoader) load() (*properties, error) {
props := &properties{
BaseLevel: l.getIntProperty("rocksdb.base-level"),
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
}
if len(l.errorMsgs) != 0 {
errorMsg := strings.Join(l.errorMsgs, ";")
return nil, errors.New(errorMsg)
}
return props, nil
}
func (l *propsLoader) getProperty(propName string) string {
value := l.db.GetProperty(propName)
if value == "" {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
return ""
}
return value
}
func (l *propsLoader) getIntProperty(propName string) uint64 {
value, ok := l.db.GetIntProperty(propName)
if !ok {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
return 0
}
return value
}
type properties struct {
BaseLevel uint64
BlockCacheCapacity uint64
BlockCachePinnedUsage uint64
BlockCacheUsage uint64
CurSizeActiveMemTable uint64
CurSizeAllMemTables uint64
EstimateLiveDataSize uint64
EstimateNumKeys uint64
EstimateTableReadersMem uint64
LiveSSTFilesSize uint64
SizeAllMemTables uint64
OptionsStatistics string
}

Some files were not shown because too many files have changed in this diff Show More