mirror of
https://github.com/0glabs/0g-chain.git
synced 2025-04-04 15:55:23 +00:00
Compare commits
290 Commits
v0.3.1.alp
...
dev
Author | SHA1 | Date | |
---|---|---|---|
![]() |
80208ed9b7 | ||
![]() |
96926b4cbf | ||
![]() |
da2e0feffb | ||
![]() |
2c436a7d45 | ||
![]() |
4c48f7ea63 | ||
![]() |
2cc584be0b | ||
![]() |
d8e968146b | ||
![]() |
962943d32b | ||
![]() |
b42c82d59c | ||
![]() |
08000544c9 | ||
![]() |
248db0f47c | ||
![]() |
0b027e10ed | ||
![]() |
e41c65c92d | ||
![]() |
c70b0a1c2b | ||
![]() |
9b171dbd4c | ||
![]() |
4ff1ab24d1 | ||
![]() |
384d899eff | ||
![]() |
c2fdb3109e | ||
![]() |
45508f5954 | ||
![]() |
72e8508651 | ||
![]() |
c066af2a47 | ||
![]() |
d1b83b5ac8 | ||
![]() |
dc888ceb78 | ||
![]() |
ea3e4b84e8 | ||
![]() |
553d111f40 | ||
![]() |
351c2cb132 | ||
![]() |
6f83c22853 | ||
![]() |
4d95fd5f30 | ||
![]() |
e54d5ba99a | ||
![]() |
a0426c63f7 | ||
![]() |
a32dad8373 | ||
![]() |
4f53e59af7 | ||
![]() |
8ecfda7bd2 | ||
![]() |
bbd02215f7 | ||
![]() |
d4066b6a3d | ||
![]() |
7900795654 | ||
![]() |
6df54c84b5 | ||
![]() |
ec7e0c3163 | ||
![]() |
ac043ff438 | ||
![]() |
e6b13d85a1 | ||
![]() |
f04c87506b | ||
![]() |
65a4f9128b | ||
![]() |
0a79fe53a2 | ||
![]() |
aed6a6161a | ||
![]() |
75cccf2c8f | ||
![]() |
db1d6463ec | ||
![]() |
1a039a0d13 | ||
![]() |
6ed21ea3fb | ||
![]() |
8b43aa4064 | ||
![]() |
2500f6cb31 | ||
![]() |
88304562cc | ||
![]() |
58871957a8 | ||
![]() |
aff086bf7b | ||
![]() |
0ff16c798a | ||
![]() |
e2668ad80d | ||
![]() |
2fa73957a9 | ||
![]() |
32cd094bbc | ||
![]() |
6037a00869 | ||
![]() |
559d1beb03 | ||
![]() |
4db9ee49b4 | ||
![]() |
ac57a9b4b4 | ||
![]() |
5051f75b52 | ||
![]() |
5c8ced4b37 | ||
![]() |
47b6341324 | ||
![]() |
ae7d1ad2a7 | ||
![]() |
3e427e5bdd | ||
![]() |
9e1ec04010 | ||
![]() |
76eebc57c1 | ||
![]() |
100bad3471 | ||
![]() |
605a71e826 | ||
![]() |
1602c96a40 | ||
![]() |
759d08a6eb | ||
![]() |
f2abb98d6c | ||
![]() |
77bfade203 | ||
![]() |
e308e44dd6 | ||
![]() |
58875c1bc9 | ||
![]() |
c80874b0ea | ||
![]() |
96e70d3cbd | ||
![]() |
f9ef0bac6e | ||
![]() |
5f2089b4c5 | ||
![]() |
a4b8d77411 | ||
![]() |
f5ce18dd5f | ||
![]() |
890e858558 | ||
![]() |
afacc89c8d | ||
![]() |
38764453a5 | ||
![]() |
a90bd43999 | ||
![]() |
46d159a18c | ||
![]() |
840deea660 | ||
![]() |
6c3360f102 | ||
![]() |
80b2dacbc2 | ||
![]() |
1152537679 | ||
![]() |
5bd6ac39ee | ||
![]() |
e0fcd07a08 | ||
![]() |
70ac592012 | ||
![]() |
de22587a5b | ||
![]() |
0c02c27a9d | ||
![]() |
4409bfc996 | ||
![]() |
723241f484 | ||
![]() |
84d1a89bec | ||
![]() |
94ddf20305 | ||
![]() |
4ebbb886bf | ||
![]() |
57943ec0e0 | ||
![]() |
04ce67f6a9 | ||
![]() |
500e66733d | ||
![]() |
8b691e61f8 | ||
![]() |
a0bdd2a142 | ||
![]() |
53dcea2867 | ||
![]() |
d31a599c60 | ||
![]() |
07cf4ad258 | ||
![]() |
cb4e6e006e | ||
![]() |
0e37d518ec | ||
![]() |
822e374be6 | ||
![]() |
9ca8359202 | ||
![]() |
32bcc7f4e3 | ||
![]() |
f50a429527 | ||
![]() |
8ff2277450 | ||
![]() |
cdf029c87a | ||
![]() |
5f9325c2a0 | ||
![]() |
5f4f1851cb | ||
![]() |
4c28427089 | ||
![]() |
0f40b721ee | ||
![]() |
ec3733a2c6 | ||
![]() |
8df7625ac1 | ||
![]() |
31c96eeb93 | ||
![]() |
ac1af4ae92 | ||
![]() |
0d54bb9202 | ||
![]() |
73158cd738 | ||
![]() |
73b7d800a3 | ||
![]() |
27ddc91956 | ||
![]() |
9962b7b0db | ||
![]() |
f415fb1332 | ||
![]() |
28b9c07e02 | ||
![]() |
45b7920181 | ||
![]() |
56d337df16 | ||
![]() |
85059d734e | ||
![]() |
6b4e8415da | ||
![]() |
77b817f9b8 | ||
![]() |
46378d6157 | ||
![]() |
d0721fd172 | ||
![]() |
5e34f5b289 | ||
![]() |
d6bca1b221 | ||
![]() |
8dc89ad08d | ||
![]() |
e4989f10cd | ||
![]() |
9839a244bf | ||
![]() |
c9043ca158 | ||
![]() |
8d48dadb02 | ||
![]() |
c80be7bbf7 | ||
![]() |
17fa02b554 | ||
![]() |
e348bd3748 | ||
![]() |
f44d7cc94d | ||
![]() |
0bfbd114c9 | ||
![]() |
483a939724 | ||
![]() |
547b0057c7 | ||
![]() |
1da9745903 | ||
![]() |
e952a4a705 | ||
![]() |
69a4a6298e | ||
![]() |
d05c2f9563 | ||
![]() |
82f54a1974 | ||
![]() |
3f1140dcd4 | ||
![]() |
849c95d93e | ||
![]() |
eee50a3f75 | ||
![]() |
1d2820a3b6 | ||
![]() |
950e4766d2 | ||
![]() |
91698d388f | ||
![]() |
4cf57457a7 | ||
![]() |
337f1c5cc8 | ||
![]() |
a437523ea2 | ||
![]() |
77ec52e16b | ||
![]() |
b1365fb792 | ||
![]() |
d61f4e94fd | ||
![]() |
8bc3b15c46 | ||
![]() |
e8008c9a3a | ||
![]() |
28fa4b7993 | ||
![]() |
bd0acdbd4b | ||
![]() |
7f62518464 | ||
![]() |
0b4c5da294 | ||
![]() |
ad93042155 | ||
![]() |
a7dd451e44 | ||
![]() |
c99879e9f7 | ||
![]() |
820a676709 | ||
![]() |
27feb30bb9 | ||
![]() |
17bd9a6c71 | ||
![]() |
c172fb3c55 | ||
![]() |
0eb947b594 | ||
![]() |
a2746657a1 | ||
![]() |
493ce0516f | ||
![]() |
65d091d458 | ||
![]() |
8023be0067 | ||
![]() |
eaacd83de5 | ||
![]() |
6862cde560 | ||
![]() |
b8e6e584b8 | ||
![]() |
27d63f157c | ||
![]() |
7aede3390d | ||
![]() |
49f7be8486 | ||
![]() |
fbce24abef | ||
![]() |
7e50ce8142 | ||
![]() |
43dd1a7c41 | ||
![]() |
72d30dde8a | ||
![]() |
c18ca45188 | ||
![]() |
f50d847c4f | ||
![]() |
ab3cf7c994 | ||
![]() |
33932e8ad6 | ||
![]() |
ab10ce628c | ||
![]() |
edf2935f31 | ||
![]() |
a4583be44b | ||
![]() |
3c4d91a443 | ||
![]() |
774e2efce8 | ||
![]() |
272f82ec99 | ||
![]() |
e198eeb3b4 | ||
![]() |
bbfaa54ddf | ||
![]() |
4e66a56208 | ||
![]() |
9c629ad113 | ||
![]() |
b0d737d354 | ||
![]() |
a8df31b31a | ||
![]() |
6243944db6 | ||
![]() |
7f339d20ca | ||
![]() |
916ec6d30c | ||
![]() |
b4c04656ab | ||
![]() |
837e57ec2e | ||
![]() |
5f802fcfbd | ||
![]() |
f229afce1a | ||
![]() |
608f70b20a | ||
![]() |
74f76d125c | ||
![]() |
3853e276a6 | ||
![]() |
7aef2f09e9 | ||
![]() |
58d7c89f8e | ||
![]() |
d2d661276e | ||
![]() |
9de9de671e | ||
![]() |
ce6aac3a72 | ||
![]() |
23ce7d8169 | ||
![]() |
60a8073574 | ||
![]() |
2d07988994 | ||
![]() |
6a9eda8634 | ||
![]() |
4788c064bf | ||
![]() |
1743cf5275 | ||
![]() |
9aef8e4971 | ||
![]() |
38230d35e3 | ||
![]() |
af5eea690b | ||
![]() |
1c1db357f5 | ||
![]() |
409841c79c | ||
![]() |
4c3f6533a0 | ||
![]() |
e1bd6ffa2f | ||
![]() |
5b0e7c8c58 | ||
![]() |
8d85c1ae1e | ||
![]() |
80f2370d68 | ||
![]() |
16233d6031 | ||
![]() |
828f17897e | ||
![]() |
a79d852d1c | ||
![]() |
0306bec0ae | ||
![]() |
5c51530b8e | ||
![]() |
21dc0e21b3 | ||
![]() |
8d07d9cb3b | ||
![]() |
e7cc89a642 | ||
![]() |
2e8c7ce337 | ||
![]() |
110adcab2c | ||
![]() |
3d5f5902b8 | ||
![]() |
4cf41d18c2 | ||
![]() |
dbc3ad7fd2 | ||
![]() |
7990021431 | ||
![]() |
fa33947496 | ||
![]() |
4ff43eb270 | ||
![]() |
d66b7d2705 | ||
![]() |
025b7b2cdb | ||
![]() |
94914d4ca1 | ||
![]() |
3c53e72220 | ||
![]() |
871e26670c | ||
![]() |
da2f835bf7 | ||
![]() |
6a7fd4c8bd | ||
![]() |
f72b628b71 | ||
![]() |
3e877aca88 | ||
![]() |
360f21f9f8 | ||
![]() |
d981070ede | ||
![]() |
346f4be683 | ||
![]() |
1b6f1468ec | ||
![]() |
72e8641c8d | ||
![]() |
ac2e46f91e | ||
![]() |
4686a2a3e9 | ||
![]() |
543417c01f | ||
![]() |
41b79e44af | ||
![]() |
0ea92335de | ||
![]() |
2a93c41fcc | ||
![]() |
3033529d9f | ||
![]() |
198b620cb4 | ||
![]() |
d3233d65d5 | ||
![]() |
6ea518960a | ||
![]() |
673790465d | ||
![]() |
3afb656d1f | ||
![]() |
969614d555 | ||
![]() |
7866ee2f74 | ||
![]() |
66e41733e7 |
@ -11,5 +11,10 @@ docs/
|
||||
networks/
|
||||
scratch/
|
||||
|
||||
# Ignore build cache directories to avoid
|
||||
# errors when addings these to docker images
|
||||
build/.cache
|
||||
build/.golangci-lint
|
||||
|
||||
go.work
|
||||
go.work.sum
|
||||
|
3
.github/CODEOWNERS
vendored
Normal file
3
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
|
||||
# Global rule:
|
||||
* @rhuairahrighairidh @karzak @pirtleshell @drklee3 @nddeluca @DracoLi @evgeniy-scherbina @sesheffield @boodyvo @lbayas
|
57
.github/scripts/seed-internal-testnet.sh
vendored
57
.github/scripts/seed-internal-testnet.sh
vendored
@ -83,6 +83,31 @@ TETHER_USDT_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NA
|
||||
TETHER_USDT_CONTRACT_ADDRESS=${TETHER_USDT_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000
|
||||
|
||||
# deploy and fund axlBNB ERC20 contract
|
||||
AXL_BNB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBNB" axlBNB 18)
|
||||
AXL_BNB_CONTRACT_ADDRESS=${AXL_BNB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlBUSD ERC20 contract
|
||||
AXL_BUSD_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBUSD" axlBUSD 18)
|
||||
AXL_BUSD_CONTRACT_ADDRESS=${AXL_BUSD_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlXRPB ERC20 contract
|
||||
AXL_XRPB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlXRPB" axlXRPB 18)
|
||||
AXL_XRPB_CONTRACT_ADDRESS=${AXL_XRPB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund axlBTC ERC20 contract
|
||||
AXL_BTCB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBTCB" axlBTCB 18)
|
||||
AXL_BTCB_CONTRACT_ADDRESS=${AXL_BTCB_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
|
||||
|
||||
# deploy and fund native wBTC ERC20 contract
|
||||
WBTC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "wBTC" wBTC 8)
|
||||
WBTC_CONTRACT_ADDRESS=${WBTC_CONTRACT_DEPLOY: -42}
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 100000000000000000
|
||||
|
||||
# seed some evm wallets
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
@ -91,6 +116,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
|
||||
# seed webapp E2E whale account
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 100000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
|
||||
@ -99,6 +129,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
|
||||
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$WBTC_CONTRACT_ADDRESS" 10000000000000
|
||||
|
||||
# give dev-wallet enough delegation power to pass proposals by itself
|
||||
|
||||
@ -138,7 +173,7 @@ PARAM_CHANGE_PROP_TEMPLATE=$(
|
||||
{
|
||||
"subspace": "evmutil",
|
||||
"key": "EnabledConversionPairs",
|
||||
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"}]"
|
||||
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"},{\"kava_erc20_address\":\"AXL_BNB_CONTRACT_ADDRESS\",\"denom\":\"bnb\"},{\"kava_erc20_address\":\"AXL_BUSD_CONTRACT_ADDRESS\",\"denom\":\"busd\"},{\"kava_erc20_address\":\"AXL_BTCB_CONTRACT_ADDRESS\",\"denom\":\"btcb\"},{\"kava_erc20_address\":\"AXL_XRPB_CONTRACT_ADDRESS\",\"denom\":\"xrpb\"},{\"kava_erc20_address\":\"WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/bitgo/wbtc\"}]"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -155,6 +190,11 @@ finalProposal="${finalProposal/AXL_USDC_CONTRACT_ADDRESS/$AXL_USDC_CONTRACT_ADDR
|
||||
finalProposal="${finalProposal/AXL_WBTC_CONTRACT_ADDRESS/$AXL_WBTC_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/wETH_CONTRACT_ADDRESS/$wETH_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/TETHER_USDT_CONTRACT_ADDRESS/$TETHER_USDT_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BNB_CONTRACT_ADDRESS/$AXL_BNB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BUSD_CONTRACT_ADDRESS/$AXL_BUSD_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_BTCB_CONTRACT_ADDRESS/$AXL_BTCB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/AXL_XRPB_CONTRACT_ADDRESS/$AXL_XRPB_CONTRACT_ADDRESS}"
|
||||
finalProposal="${finalProposal/WBTC_CONTRACT_ADDRESS/$WBTC_CONTRACT_ADDRESS}"
|
||||
|
||||
# create unique proposal filename
|
||||
proposalFileName="$(date +%s)-proposal.json"
|
||||
@ -185,6 +225,21 @@ sleep $AVG_SECONDS_BETWEEN_BLOCKS
|
||||
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
|
||||
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
|
||||
|
||||
# submit a kava token committee proposal
|
||||
COMMITTEE_PROP_TEMPLATE=$(
|
||||
cat <<'END_HEREDOC'
|
||||
{
|
||||
"@type": "/cosmos.gov.v1beta1.TextProposal",
|
||||
"title": "The next big thing signaling proposal.",
|
||||
"description": "The purpose of this proposal is to signal support/opposition to the next big thing"
|
||||
}
|
||||
END_HEREDOC
|
||||
)
|
||||
committeeProposalFileName="$(date +%s)-committee-proposal.json"
|
||||
echo "$COMMITTEE_PROP_TEMPLATE" >$committeeProposalFileName
|
||||
tokenCommitteeId=4
|
||||
kava tx committee submit-proposal "$tokenCommitteeId" "$committeeProposalFileName" --gas auto --gas-adjustment 1.5 --gas-prices 0.01ukava --from god -y
|
||||
|
||||
# if adding more cosmos coins -> er20s, ensure that the deployment order below remains the same.
|
||||
# convert 1 HARD to an erc20. doing this ensures the contract is deployed.
|
||||
kava tx evmutil convert-cosmos-coin-to-erc20 \
|
||||
|
54
.github/workflows/cd-internal-testnet-manual.yml
vendored
54
.github/workflows/cd-internal-testnet-manual.yml
vendored
@ -1,54 +0,0 @@
|
||||
name: Manual Deployment (Internal Testnet)
|
||||
# allow to be triggered manually
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# take ebs + zfs snapshots
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: kava_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: reset-internal-testnet-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
|
||||
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
|
||||
start-chain-api:
|
||||
uses: ./.github/workflows/cd-start-chain.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: kava_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: start-chain-api-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
needs: [reset-chain-to-zero-state]
|
||||
|
||||
# setup test and development accounts and balances, deploy contracts by calling the chain's api
|
||||
seed-chain-state:
|
||||
uses: ./.github/workflows/cd-seed-chain.yml
|
||||
with:
|
||||
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
|
||||
chain-id: kava_2221-17000
|
||||
seed-script-filename: seed-internal-testnet.sh
|
||||
erc20-deployer-network-name: internal_testnet
|
||||
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
|
||||
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
|
||||
secrets: inherit
|
||||
needs: [start-chain-api]
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.deploys.testnet.internal
|
||||
namespace: Kava/ContinuousDeployment
|
||||
secrets: inherit
|
||||
needs: [seed-chain-state]
|
79
.github/workflows/cd-internal-testnet.yml
vendored
79
.github/workflows/cd-internal-testnet.yml
vendored
@ -1,79 +0,0 @@
|
||||
name: Continuous Deployment (Internal Testnet)
|
||||
# run after every successful CI job of new commits to the master branch
|
||||
# if deploy version or config has changed
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [Continuous Integration (Kava Master)]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
# define output for first job forwarding output of changedInternalTestnetConfig job
|
||||
outputs:
|
||||
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
|
||||
- name: Get all changed internal testnet files
|
||||
id: changed-internal-testnet-config
|
||||
uses: tj-actions/changed-files@v42
|
||||
with:
|
||||
# Avoid using single or double quotes for multiline patterns
|
||||
files: |
|
||||
ci/env/kava-internal-testnet/**
|
||||
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# take ebs + zfs snapshots
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
needs: [changed_files]
|
||||
# only start cd pipeline if last ci run was successful
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: kava_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: reset-internal-testnet-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
|
||||
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
|
||||
start-chain-api:
|
||||
uses: ./.github/workflows/cd-start-chain.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: kava_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: start-chain-api-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
needs: [reset-chain-to-zero-state]
|
||||
|
||||
# setup test and development accounts and balances, deploy contracts by calling the chain's api
|
||||
seed-chain-state:
|
||||
uses: ./.github/workflows/cd-seed-chain.yml
|
||||
with:
|
||||
chain-api-url: https://rpc.app.internal.testnet.us-east.production.kava.io:443
|
||||
chain-id: kava_2221-17000
|
||||
seed-script-filename: seed-internal-testnet.sh
|
||||
erc20-deployer-network-name: internal_testnet
|
||||
genesis_validator_addresses: "kavavaloper1xcgtffvv2yeqmgs3yz4gv29kgjrj8usxrnrlwp kavavaloper1w66m9hdzwgd6uc8g93zqkcumgwzrpcw958sh3s"
|
||||
kava_version_filepath: ./ci/env/kava-internal-testnet/KAVA.VERSION
|
||||
secrets: inherit
|
||||
needs: [start-chain-api]
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.deploys.testnet.internal
|
||||
namespace: Kava/ContinuousDeployment
|
||||
secrets: inherit
|
||||
needs: [seed-chain-state]
|
54
.github/workflows/cd-protonet-manual.yml
vendored
54
.github/workflows/cd-protonet-manual.yml
vendored
@ -1,54 +0,0 @@
|
||||
name: Manual Deployment (Protonet)
|
||||
# allow to be triggered manually
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# take ebs + zfs snapshots
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: reset-protonet-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
|
||||
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
|
||||
start-chain-api:
|
||||
uses: ./.github/workflows/cd-start-chain.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: start-chain-api-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
needs: [reset-chain-to-zero-state]
|
||||
|
||||
# setup test and development accounts and balances, deploy contracts by calling the chain's api
|
||||
seed-chain-state:
|
||||
uses: ./.github/workflows/cd-seed-chain.yml
|
||||
with:
|
||||
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
|
||||
chain-id: proto_2221-17000
|
||||
seed-script-filename: seed-protonet.sh
|
||||
erc20-deployer-network-name: protonet
|
||||
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
|
||||
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
|
||||
secrets: inherit
|
||||
needs: [start-chain-api]
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.deploys.testnet.proto
|
||||
namespace: Kava/ContinuousDeployment
|
||||
secrets: inherit
|
||||
needs: [seed-chain-state]
|
60
.github/workflows/cd-protonet.yml
vendored
60
.github/workflows/cd-protonet.yml
vendored
@ -1,60 +0,0 @@
|
||||
name: Continuous Deployment (Protonet)
|
||||
# run after every successful CI job of new commits to the master branch
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [Continuous Integration (Kava Master)]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# take ebs + zfs snapshots
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
reset-chain-to-zero-state:
|
||||
# only start cd pipeline if last ci run was successful
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
uses: ./.github/workflows/cd-reset-internal-testnet.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: reset-protonet-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
|
||||
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
|
||||
start-chain-api:
|
||||
uses: ./.github/workflows/cd-start-chain.yml
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
chain-id: proto_2221-17000
|
||||
ssm-document-name: kava-testnet-internal-node-update
|
||||
playbook-name: start-chain-api-playbook.yml
|
||||
playbook-infrastructure-branch: master
|
||||
secrets: inherit
|
||||
needs: [reset-chain-to-zero-state]
|
||||
|
||||
# setup test and development accounts and balances, deploy contracts by calling the chain's api
|
||||
seed-chain-state:
|
||||
uses: ./.github/workflows/cd-seed-chain.yml
|
||||
with:
|
||||
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
|
||||
chain-id: proto_2221-17000
|
||||
seed-script-filename: seed-protonet.sh
|
||||
erc20-deployer-network-name: protonet
|
||||
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
|
||||
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
|
||||
secrets: inherit
|
||||
needs: [start-chain-api]
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.deploys.testnet.proto
|
||||
namespace: Kava/ContinuousDeployment
|
||||
secrets: inherit
|
||||
needs: [seed-chain-state]
|
80
.github/workflows/cd-reset-internal-testnet.yml
vendored
80
.github/workflows/cd-reset-internal-testnet.yml
vendored
@ -1,80 +0,0 @@
|
||||
name: Reset Internal Testnet
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
chain-id:
|
||||
required: true
|
||||
type: string
|
||||
aws-region:
|
||||
required: true
|
||||
type: string
|
||||
ssm-document-name:
|
||||
required: true
|
||||
type: string
|
||||
playbook-name:
|
||||
required: true
|
||||
type: string
|
||||
playbook-infrastructure-branch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
CI_AWS_KEY_ID:
|
||||
required: true
|
||||
CI_AWS_KEY_SECRET:
|
||||
required: true
|
||||
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
|
||||
required: true
|
||||
|
||||
# in order:
|
||||
# enter standby (prevents autoscaling group from killing node during deploy)
|
||||
# stop kava
|
||||
# download updated binary and genesis
|
||||
# reset application database state (only done on internal testnet)
|
||||
jobs:
|
||||
place-chain-nodes-on-standby:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: take the chain offline
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
|
||||
env:
|
||||
CHAIN_ID: ${{ inputs.chain-id }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
- name: checkout infrastructure repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: Kava-Labs/infrastructure
|
||||
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
|
||||
path: infrastructure
|
||||
ref: master
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build kava node updater
|
||||
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
|
||||
- name: run reset playbook on all chain nodes
|
||||
run: |
|
||||
kava-node-updater \
|
||||
--debug \
|
||||
--max-retries=2 \
|
||||
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
|
||||
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
|
||||
--update-playbook-filename=$PLAYBOOK_NAME \
|
||||
--chain-id=$CHAIN_ID \
|
||||
--max-upgrade-batch-size=0 \
|
||||
--node-states=Standby \
|
||||
--wait-for-node-sync-after-upgrade=false
|
||||
env:
|
||||
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
|
||||
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
|
||||
CHAIN_ID: ${{ inputs.chain-id }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
AWS_SDK_LOAD_CONFIG: 1
|
||||
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}
|
94
.github/workflows/cd-seed-chain.yml
vendored
94
.github/workflows/cd-seed-chain.yml
vendored
@ -1,94 +0,0 @@
|
||||
name: Seed Chain
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
chain-api-url:
|
||||
required: true
|
||||
type: string
|
||||
chain-id:
|
||||
required: true
|
||||
type: string
|
||||
seed-script-filename:
|
||||
required: true
|
||||
type: string
|
||||
erc20-deployer-network-name:
|
||||
required: true
|
||||
type: string
|
||||
genesis_validator_addresses:
|
||||
required: true
|
||||
type: string
|
||||
kava_version_filepath:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
DEV_WALLET_MNEMONIC:
|
||||
required: true
|
||||
KAVA_TESTNET_GOD_MNEMONIC:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
seed-chain-state:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: checkout version of kava used by network
|
||||
run: |
|
||||
git pull -p
|
||||
git checkout $(cat ${KAVA_VERSION_FILEPATH})
|
||||
env:
|
||||
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build kava binary
|
||||
run: make install
|
||||
- name: checkout go evm tools repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ethereum/go-ethereum
|
||||
path: go-ethereum
|
||||
ref: v1.10.26
|
||||
- name: install go evm tools
|
||||
run: |
|
||||
make
|
||||
make devtools
|
||||
working-directory: go-ethereum
|
||||
- name: checkout kava bridge repo for deploying evm contracts
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: Kava-Labs/kava-bridge
|
||||
path: kava-bridge
|
||||
ref: main
|
||||
- name: install nodeJS
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
cache: npm
|
||||
node-version: 18
|
||||
cache-dependency-path: kava-bridge/contract/package.json
|
||||
- name: "install ERC20 contract deployment dependencies"
|
||||
run: "npm install"
|
||||
working-directory: kava-bridge/contract
|
||||
- name: compile default erc20 contracts
|
||||
run: make compile-contracts
|
||||
working-directory: kava-bridge
|
||||
- name: download seed script from current commit
|
||||
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
|
||||
working-directory: kava-bridge/contract
|
||||
env:
|
||||
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
|
||||
- name: run seed scripts
|
||||
run: bash ./${SEED_SCRIPT_FILENAME}
|
||||
working-directory: kava-bridge/contract
|
||||
env:
|
||||
CHAIN_API_URL: ${{ inputs.chain-api-url }}
|
||||
CHAIN_ID: ${{ inputs.chain-id }}
|
||||
DEV_WALLET_MNEMONIC: ${{ secrets.DEV_WALLET_MNEMONIC }}
|
||||
KAVA_TESTNET_GOD_MNEMONIC: ${{ secrets.KAVA_TESTNET_GOD_MNEMONIC }}
|
||||
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}
|
||||
ERC20_DEPLOYER_NETWORK_NAME: ${{ inputs.erc20-deployer-network-name }}
|
||||
GENESIS_VALIDATOR_ADDRESSES: ${{ inputs.genesis_validator_addresses }}
|
78
.github/workflows/cd-start-chain.yml
vendored
78
.github/workflows/cd-start-chain.yml
vendored
@ -1,78 +0,0 @@
|
||||
name: Start Chain
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
chain-id:
|
||||
required: true
|
||||
type: string
|
||||
aws-region:
|
||||
required: true
|
||||
type: string
|
||||
ssm-document-name:
|
||||
required: true
|
||||
type: string
|
||||
playbook-name:
|
||||
required: true
|
||||
type: string
|
||||
playbook-infrastructure-branch:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
CI_AWS_KEY_ID:
|
||||
required: true
|
||||
CI_AWS_KEY_SECRET:
|
||||
required: true
|
||||
KAVA_PRIVATE_GITHUB_ACCESS_TOKEN:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
# start kava, allow nodes to start processing requests from users once they are synced to live
|
||||
serve-traffic:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: take the chain offline
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/put-all-chain-nodes-on-standby.sh
|
||||
env:
|
||||
CHAIN_ID: ${{ inputs.chain-id }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
- name: checkout infrastructure repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: Kava-Labs/infrastructure
|
||||
token: ${{ secrets.KAVA_PRIVATE_GITHUB_ACCESS_TOKEN }}
|
||||
path: infrastructure
|
||||
ref: master
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build kava node updater
|
||||
run: cd infrastructure/cli/kava-node-updater && make install && cd ../../../
|
||||
- name: run start-chain playbook on all chain nodes
|
||||
run: |
|
||||
kava-node-updater \
|
||||
--debug \
|
||||
--max-retries=2 \
|
||||
--aws-ssm-document-name=$SSM_DOCUMENT_NAME \
|
||||
--infrastructure-git-pointer=$PLAYBOOK_INFRASTRUCTURE_BRANCH \
|
||||
--update-playbook-filename=$PLAYBOOK_NAME \
|
||||
--chain-id=$CHAIN_ID \
|
||||
--max-upgrade-batch-size=0 \
|
||||
--node-states=Standby \
|
||||
--wait-for-node-sync-after-upgrade=true
|
||||
env:
|
||||
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}
|
||||
PLAYBOOK_NAME: ${{ inputs.playbook-name }}
|
||||
CHAIN_ID: ${{ inputs.chain-id }}
|
||||
AWS_REGION: ${{ inputs.aws-region }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
AWS_SDK_LOAD_CONFIG: 1
|
||||
PLAYBOOK_INFRASTRUCTURE_BRANCH: ${{ inputs.playbook-infrastructure-branch }}
|
||||
- name: bring the chain online
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/exit-standby-all-chain-nodes.sh
|
7
.github/workflows/ci-commit.yml
vendored
7
.github/workflows/ci-commit.yml
vendored
@ -1,7 +0,0 @@
|
||||
name: Continuous Integration (Commit)
|
||||
on:
|
||||
push:
|
||||
# run per commit ci checks against this commit
|
||||
jobs:
|
||||
lint:
|
||||
uses: ./.github/workflows/ci-lint.yml
|
79
.github/workflows/ci-default.yml
vendored
79
.github/workflows/ci-default.yml
vendored
@ -1,79 +0,0 @@
|
||||
name: Continuous Integration (Default Checks)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
tests/e2e/kvtool/go.sum
|
||||
- name: build application
|
||||
run: make build
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
tests/e2e/kvtool/go.sum
|
||||
- name: run unit tests
|
||||
run: make test
|
||||
- name: run e2e tests
|
||||
run: make docker-build test-e2e
|
||||
validate-internal-testnet-genesis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: save version of kava that will be deployed if this pr is merged
|
||||
id: kava-version
|
||||
run: |
|
||||
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
|
||||
- name: checkout repo from master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: checkout version of kava that will be deployed if this pr is merged
|
||||
run: |
|
||||
git pull -p
|
||||
git checkout $KAVA_VERSION
|
||||
env:
|
||||
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build kava cli
|
||||
run: make install
|
||||
- name: checkout repo from current commit to validate current branch's genesis
|
||||
uses: actions/checkout@v4
|
||||
- name: validate testnet genesis
|
||||
run: kava validate-genesis ci/env/kava-internal-testnet/genesis.json
|
||||
validate-protonet-genesis:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build kava cli
|
||||
run: make install
|
||||
- name: validate protonet genesis
|
||||
run: kava validate-genesis ci/env/kava-protonet/genesis.json
|
102
.github/workflows/ci-docker.yml
vendored
102
.github/workflows/ci-docker.yml
vendored
@ -1,102 +0,0 @@
|
||||
name: Build & Publish Docker Images
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
dockerhub-username:
|
||||
required: true
|
||||
type: string
|
||||
# this workflow publishes a rocksdb & goleveldb docker images with these tags:
|
||||
# - <commit-hash>-goleveldb
|
||||
# - <extra-image-tag>-goleveldb
|
||||
# - <commit-hash>-rocksdb
|
||||
# - <extra-image-tag>-rocksdb
|
||||
extra-image-tag:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
CI_DOCKERHUB_TOKEN:
|
||||
required: true
|
||||
|
||||
# runs in ci-master after successful checks
|
||||
# you can use images built by this action in future jobs.
|
||||
# https://docs.docker.com/build/ci/github-actions/examples/#share-built-image-between-jobs
|
||||
jobs:
|
||||
docker-goleveldb:
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# ensure working with latest code
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# generate a git commit hash to be used as image tag
|
||||
- name: Generate short hash
|
||||
id: commit-hash
|
||||
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
|
||||
|
||||
# qemu is used to emulate different platform architectures
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
# cross-platform build of the image
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# authenticate for publish to docker hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ inputs.dockerhub-username }}
|
||||
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
|
||||
|
||||
# publish to docker hub, tag with short git hash
|
||||
- name: Build and push (goleveldb)
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-goleveldb,kava/kava:${{ inputs.extra-image-tag }}-goleveldb
|
||||
|
||||
docker-rocksdb:
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# ensure working with latest code
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# generate a git commit hash to be used as image tag
|
||||
- name: Generate short hash
|
||||
id: commit-hash
|
||||
run: echo "short=$( git rev-parse --short $GITHUB_SHA )" >> $GITHUB_OUTPUT
|
||||
|
||||
# qemu is used to emulate different platform architectures
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
# cross-platform build of the image
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# authenticate for publish to docker hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ inputs.dockerhub-username }}
|
||||
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
|
||||
|
||||
# publish to docker hub, tag with short git hash
|
||||
- name: Build and push (rocksdb)
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile-rocksdb
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: kava/kava:${{ steps.commit-hash.outputs.short }}-rocksdb,kava/kava:${{ inputs.extra-image-tag }}-rocksdb
|
17
.github/workflows/ci-lint.yml
vendored
17
.github/workflows/ci-lint.yml
vendored
@ -1,17 +0,0 @@
|
||||
name: Lint Checks
|
||||
on:
|
||||
workflow_call:
|
||||
# run per commit ci checks against this commit
|
||||
jobs:
|
||||
proto-lint:
|
||||
uses: ./.github/workflows/proto.yml
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: golangci-lint
|
||||
uses: reviewdog/action-golangci-lint@v2
|
||||
with:
|
||||
github_token: ${{ secrets.github_token }}
|
||||
reporter: github-pr-review
|
||||
golangci_lint_flags: --timeout 10m
|
56
.github/workflows/ci-master.yml
vendored
56
.github/workflows/ci-master.yml
vendored
@ -1,56 +0,0 @@
|
||||
name: Continuous Integration (Kava Master)
|
||||
on:
|
||||
push:
|
||||
# run CI on any push to the master branch
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
# run per commit ci checks against master branch
|
||||
lint-checks:
|
||||
uses: ./.github/workflows/ci-lint.yml
|
||||
# run default ci checks against master branch
|
||||
default-checks:
|
||||
uses: ./.github/workflows/ci-default.yml
|
||||
# build and upload versions of kava for use on internal infrastructure
|
||||
# configurations for databases, cpu architectures and operating systems
|
||||
publish-internal:
|
||||
# only run if all checks pass
|
||||
needs: [lint-checks, default-checks]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: set build tag
|
||||
run: echo "BUILD_TAG=$(date +%s)-$(git rev-parse HEAD | cut -c 1-8)" >> $GITHUB_ENV
|
||||
- name: build rocksdb dependency
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
|
||||
env:
|
||||
ROCKSDB_VERSION: v8.10.0
|
||||
- name: Build and upload release artifacts
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
|
||||
env:
|
||||
BUILD_TAG: ${{ env.BUILD_TAG }}
|
||||
AWS_REGION: us-east-1
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
docker:
|
||||
# only run if all checks pass
|
||||
needs: [lint-checks, default-checks]
|
||||
uses: ./.github/workflows/ci-docker.yml
|
||||
with:
|
||||
dockerhub-username: kavaops
|
||||
extra-image-tag: master
|
||||
secrets: inherit
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.releases.merge
|
||||
namespace: Kava/ContinuousIntegration
|
||||
secrets: inherit
|
||||
needs: [publish-internal]
|
23
.github/workflows/ci-pr.yml
vendored
23
.github/workflows/ci-pr.yml
vendored
@ -1,23 +0,0 @@
|
||||
name: Continuous Integration (PR)
|
||||
on:
|
||||
pull_request:
|
||||
# run CI on pull requests to master or a release branch
|
||||
branches:
|
||||
- master
|
||||
- 'release/**'
|
||||
- 'releases/**'
|
||||
# run default ci checks against current PR
|
||||
jobs:
|
||||
default:
|
||||
uses: ./.github/workflows/ci-default.yml
|
||||
rocksdb:
|
||||
uses: ./.github/workflows/ci-rocksdb-build.yml
|
||||
post-pipeline-metrics:
|
||||
uses: ./.github/workflows/metric-pipeline.yml
|
||||
if: always() # always run so we metric failures and successes
|
||||
with:
|
||||
aws-region: us-east-1
|
||||
metric-name: kava.releases.pr
|
||||
namespace: Kava/ContinuousIntegration
|
||||
secrets: inherit
|
||||
needs: [default]
|
35
.github/workflows/ci-release.yml
vendored
35
.github/workflows/ci-release.yml
vendored
@ -1,35 +0,0 @@
|
||||
name: Continuous Integration (Release)
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+*"
|
||||
jobs:
|
||||
# run per commit ci checks against released version
|
||||
lint-checks:
|
||||
uses: ./.github/workflows/ci-lint.yml
|
||||
# run default ci checks against released version
|
||||
default-checks:
|
||||
uses: ./.github/workflows/ci-default.yml
|
||||
|
||||
# get the version tag that triggered this workflow
|
||||
get-version-tag:
|
||||
# prep version release only if all checks pass
|
||||
needs: [lint-checks, default-checks]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
git-tag: ${{ steps.git-tag.outputs.tag }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- id: git-tag
|
||||
run: echo "tag=$(git describe --always --tags --match='v*')" >> $GITHUB_OUTPUT
|
||||
|
||||
# build and upload versions of kava for use on internal infrastructure
|
||||
# configurations for databases, cpu architectures and operating systems
|
||||
docker:
|
||||
# only run if all checks pass
|
||||
needs: get-version-tag
|
||||
uses: ./.github/workflows/ci-docker.yml
|
||||
with:
|
||||
dockerhub-username: kavaops
|
||||
extra-image-tag: ${{ needs.get-version-tag.outputs.git-tag }}
|
||||
secrets: inherit
|
43
.github/workflows/ci-rocksdb-build.yml
vendored
43
.github/workflows/ci-rocksdb-build.yml
vendored
@ -1,43 +0,0 @@
|
||||
name: Continuous Integration (Rocksdb Build)
|
||||
|
||||
env:
|
||||
ROCKSDB_VERSION: v8.10.0
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: build rocksdb dependency
|
||||
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
|
||||
- name: build application
|
||||
run: make build COSMOS_BUILD_OPTIONS=rocksdb
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: install RocksDB dependencies
|
||||
run: sudo apt-get update
|
||||
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
|
||||
- name: install RocksDB as shared library
|
||||
run: git clone https://github.com/facebook/rocksdb.git
|
||||
&& cd rocksdb
|
||||
&& git checkout $ROCKSDB_VERSION
|
||||
&& sudo make -j$(nproc) install-shared
|
||||
&& sudo ldconfig
|
||||
- name: checkout repo from current commit
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: run unit tests
|
||||
run: make test-rocksdb
|
45
.github/workflows/metric-pipeline.yml
vendored
45
.github/workflows/metric-pipeline.yml
vendored
@ -1,45 +0,0 @@
|
||||
name: Metric Pipeline
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
aws-region:
|
||||
required: true
|
||||
type: string
|
||||
metric-name:
|
||||
required: true
|
||||
type: string
|
||||
namespace:
|
||||
required: true
|
||||
type: string
|
||||
secrets:
|
||||
CI_AWS_KEY_ID:
|
||||
required: true
|
||||
CI_AWS_KEY_SECRET:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
metric-pipeline-result:
|
||||
runs-on: ubuntu-latest
|
||||
if: always() # always run to capture workflow success or failure
|
||||
steps:
|
||||
# Make sure the secrets are stored in you repo settings
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.CI_AWS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.CI_AWS_KEY_SECRET }}
|
||||
aws-region: ${{ inputs.aws-region }}
|
||||
- name: Calculate Pipleline Success
|
||||
# run this action to get the workflow conclusion
|
||||
# You can get the conclusion via env (env.WORKFLOW_CONCLUSION)
|
||||
# values: neutral, success, skipped, cancelled, timed_out,
|
||||
# action_required, failure
|
||||
uses: technote-space/workflow-conclusion-action@v3
|
||||
- name: Metric Pipleline Success
|
||||
# replace TAG by the latest tag in the repository
|
||||
uses: ros-tooling/action-cloudwatch-metrics@0.0.5
|
||||
with:
|
||||
metric-value: ${{ env.WORKFLOW_CONCLUSION == 'success' }}
|
||||
metric-name: ${{ inputs.metric-name }}
|
||||
namespace: ${{ inputs.namespace }}
|
26
.github/workflows/proto.yml
vendored
26
.github/workflows/proto.yml
vendored
@ -1,26 +0,0 @@
|
||||
name: Protobuf Checks
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
check-proto:
|
||||
name: "Check Proto"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- run: go mod download
|
||||
- run: make install-build-deps
|
||||
- run: make check-proto-deps
|
||||
- run: make check-proto-lint
|
||||
- run: make check-proto-format
|
||||
- run: make check-proto-breaking-remote
|
||||
- run: BUF_CHECK_BREAKING_AGAINST_REMOTE="branch=$GITHUB_BASE_REF" make check-proto-breaking-remote
|
||||
if: github.event_name == 'pull_request'
|
||||
- run: make check-proto-gen
|
||||
- run: make check-proto-gen-doc
|
||||
- run: make check-proto-gen-swagger
|
8
.gitignore
vendored
8
.gitignore
vendored
@ -31,6 +31,9 @@ out
|
||||
# Ignore build cache dir
|
||||
build/.cache
|
||||
|
||||
# Ignore make lint cache
|
||||
build/.golangci-lint
|
||||
|
||||
# Ignore installed binaires
|
||||
build/bin
|
||||
|
||||
@ -46,3 +49,8 @@ go.work.sum
|
||||
|
||||
# runtime
|
||||
run
|
||||
|
||||
# contracts
|
||||
precompiles/interfaces/build
|
||||
precompiles/interfaces/node_modules
|
||||
precompiles/interfaces/abis
|
1
.golangci-version
Normal file
1
.golangci-version
Normal file
@ -0,0 +1 @@
|
||||
v1.59
|
130
.golangci.yml
Normal file
130
.golangci.yml
Normal file
@ -0,0 +1,130 @@
|
||||
run:
|
||||
timeout: 20m # set maximum time allowed for the linter to run. If the linting process exceeds this duration, it will be terminated
|
||||
modules-download-mode: readonly # Ensures that modules are not modified during the linting process
|
||||
allow-parallel-runners: true # enables parallel execution of linters to speed up linting process
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- decorder
|
||||
- dogsled
|
||||
# - dupl
|
||||
# - dupword
|
||||
- durationcheck
|
||||
- errcheck
|
||||
- errchkjson
|
||||
- errname
|
||||
- errorlint
|
||||
# - exhaustive
|
||||
- exportloopref
|
||||
- funlen
|
||||
- gci
|
||||
- ginkgolinter
|
||||
- gocheckcompilerdirectives
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- godox
|
||||
- gofmt
|
||||
# - gofumpt
|
||||
- goheader
|
||||
- goimports
|
||||
- mnd
|
||||
# - gomodguard
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- grouper
|
||||
- importas
|
||||
- ineffassign
|
||||
# - interfacebloat
|
||||
- lll
|
||||
- loggercheck
|
||||
- makezero
|
||||
- mirror
|
||||
- misspell
|
||||
- musttag
|
||||
# - nakedret
|
||||
# - nestif
|
||||
- nilerr
|
||||
# - nilnil
|
||||
# - noctx
|
||||
- nolintlint
|
||||
# - nonamedreturns
|
||||
- nosprintfhostport
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
# - reassign
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- staticcheck
|
||||
# - stylecheck
|
||||
- tagalign
|
||||
# - testpackage
|
||||
# - thelper
|
||||
# - tparallel
|
||||
- typecheck
|
||||
# - unconvert
|
||||
- unparam
|
||||
- unused
|
||||
# - usestdlibvars
|
||||
- wastedassign
|
||||
# - whitespace
|
||||
- wrapcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# Disable funlen for "func Test..." or func (suite *Suite) Test..." type functions
|
||||
# These functions tend to be descriptive and exceed length limits.
|
||||
- source: "^func (\\(.*\\) )?Test"
|
||||
linters:
|
||||
- funlen
|
||||
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: true # check for assignments to the blank identifier '_' when errors are returned
|
||||
check-type-assertions: false # check type assertion
|
||||
errorlint:
|
||||
check-generated: false # disabled linting of generated files
|
||||
default-signifies-exhaustive: false # exhaustive handling of error types
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: false # exhaustive handling of error types
|
||||
gci:
|
||||
sections: # defines the order of import sections
|
||||
- standard
|
||||
- default
|
||||
- localmodule
|
||||
goconst:
|
||||
min-len: 3 # min length for string constants to be checked
|
||||
min-occurrences: 3 # min occurrences of the same constant before it's flagged
|
||||
godox:
|
||||
keywords: # specific keywords to flag for further action
|
||||
- BUG
|
||||
- FIXME
|
||||
- HACK
|
||||
gosec:
|
||||
exclude-generated: true
|
||||
lll:
|
||||
line-length: 120
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words: expect
|
||||
nolintlint:
|
||||
allow-leading-space: false
|
||||
require-explanation: true
|
||||
require-specific: true
|
||||
prealloc:
|
||||
simple: true # enables simple preallocation checks
|
||||
range-loops: true # enabled preallocation checks in range loops
|
||||
for-loops: false # disables preallocation checks in for loops
|
||||
unparam:
|
||||
check-exported: true # checks exported functions and methods for unused params
|
16
.mockery.yaml
Normal file
16
.mockery.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
# Generate EXPECT() methods, type-safe methods to generate call expectations
|
||||
with-expecter: true
|
||||
|
||||
# Generate mocks in adjacent mocks directory to the interfaces
|
||||
dir: "{{.InterfaceDir}}/mocks"
|
||||
mockname: "Mock{{.InterfaceName}}"
|
||||
outpkg: "mocks"
|
||||
filename: "Mock{{.InterfaceName}}.go"
|
||||
|
||||
packages:
|
||||
github.com/0glabs/0g-chain/x/precisebank/types:
|
||||
# package-specific config
|
||||
config:
|
||||
interfaces:
|
||||
AccountKeeper:
|
||||
BankKeeper:
|
@ -1,2 +1,2 @@
|
||||
golang 1.21
|
||||
nodejs 18.16.0
|
||||
golang 1.21.9
|
||||
nodejs 20.16.0
|
||||
|
19
CHANGELOG.md
19
CHANGELOG.md
@ -38,6 +38,16 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
|
||||
## [v0.26.0]
|
||||
|
||||
### Features
|
||||
- (precisebank) [#1906] Add new `x/precisebank` module with bank decimal extension for EVM usage.
|
||||
- (cli) [#1922] Add `iavlviewer` CLI command for low-level iavl db debugging.
|
||||
|
||||
### Improvements
|
||||
- (rocksdb) [#1903] Bump cometbft-db dependency for use with rocksdb v8.10.0
|
||||
- (deps) [#1988] Bump cometbft to v0.37.9-kava.1
|
||||
|
||||
## [v0.26.0]
|
||||
|
||||
### Features
|
||||
|
||||
- (cli) [#1785] Add `shard` CLI command to support creating partitions of data for standalone nodes
|
||||
@ -330,6 +340,10 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
|
||||
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
|
||||
large-scale simulations remotely using aws-batch
|
||||
|
||||
[#1988]: https://github.com/Kava-Labs/kava/pull/1988
|
||||
[#1922]: https://github.com/Kava-Labs/kava/pull/1922
|
||||
[#1906]: https://github.com/Kava-Labs/kava/pull/1906
|
||||
[#1903]: https://github.com/Kava-Labs/kava/pull/1903
|
||||
[#1846]: https://github.com/Kava-Labs/kava/pull/1846
|
||||
[#1848]: https://github.com/Kava-Labs/kava/pull/1848
|
||||
[#1839]: https://github.com/Kava-Labs/kava/pull/1839
|
||||
@ -392,8 +406,9 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
|
||||
[#750]: https://github.com/Kava-Labs/kava/pull/750
|
||||
[#751]: https://github.com/Kava-Labs/kava/pull/751
|
||||
[#780]: https://github.com/Kava-Labs/kava/pull/780
|
||||
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.25.0...HEAD
|
||||
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.24.3
|
||||
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.26.0...HEAD
|
||||
[v0.26.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.26.0
|
||||
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.25.0
|
||||
[v0.24.3]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.24.1
|
||||
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.24.0
|
||||
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.23.2
|
||||
|
42
Dockerfile-node
Normal file
42
Dockerfile-node
Normal file
@ -0,0 +1,42 @@
|
||||
FROM --platform=linux/amd64 ubuntu:24.04
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
git \
|
||||
sudo \
|
||||
wget \
|
||||
jq \
|
||||
make \
|
||||
gcc \
|
||||
unzip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Go
|
||||
RUN wget https://golang.org/dl/go1.22.5.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go1.22.5.linux-amd64.tar.gz && \
|
||||
rm go1.22.5.linux-amd64.tar.gz
|
||||
# Set Go environment variables
|
||||
ENV GOPATH=/root/go
|
||||
ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
|
||||
# Create Go workspace directory
|
||||
RUN mkdir -p /root/go
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# https://docs.0g.ai/0g-doc/run-a-node/validator-node
|
||||
RUN git clone -b v0.2.3 https://github.com/0glabs/0g-chain.git
|
||||
RUN ./0g-chain/networks/testnet/install.sh
|
||||
|
||||
RUN 0gchaind config chain-id zgtendermint_16600-2
|
||||
|
||||
RUN 0gchaind init testnetnode --chain-id zgtendermint_16600-2
|
||||
|
||||
RUN rm ~/.0gchain/config/genesis.json
|
||||
RUN wget -P ~/.0gchain/config https://github.com/0glabs/0g-chain/releases/download/v0.2.3/genesis.json
|
||||
|
||||
RUN 0gchaind validate-genesis
|
||||
|
||||
RUN sed -i 's|seeds = ""|seeds = "81987895a11f6689ada254c6b57932ab7ed909b6@54.241.167.190:26656,010fb4de28667725a4fef26cdc7f9452cc34b16d@54.176.175.48:26656,e9b4bc203197b62cc7e6a80a64742e752f4210d5@54.193.250.204:26656,68b9145889e7576b652ca68d985826abd46ad660@18.166.164.232:26656"|' $HOME/.0gchain/config/config.toml
|
||||
|
||||
ENTRYPOINT ["0gchaind", "start"]
|
@ -1,23 +1,6 @@
|
||||
FROM golang:1.20-bullseye AS chain-builder
|
||||
FROM kava/rocksdb:v8.10.1-go1.21 AS kava-builder
|
||||
|
||||
# Set up dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory for the build
|
||||
WORKDIR /root
|
||||
# default home directory is /root
|
||||
|
||||
# install rocksdb
|
||||
ARG rocksdb_version=v8.10.0
|
||||
ENV ROCKSDB_VERSION=$rocksdb_version
|
||||
|
||||
RUN git clone https://github.com/facebook/rocksdb.git \
|
||||
&& cd rocksdb \
|
||||
&& git checkout $ROCKSDB_VERSION \
|
||||
&& make -j$(nproc) install-shared \
|
||||
&& ldconfig
|
||||
RUN apt-get update
|
||||
|
||||
WORKDIR /root/0gchain
|
||||
# Copy dependency files first to facilitate dependency caching
|
||||
|
22
Dockerfile-rocksdb-base
Normal file
22
Dockerfile-rocksdb-base
Normal file
@ -0,0 +1,22 @@
|
||||
# published to https://hub.docker.com/repository/docker/kava/rocksdb/tags
|
||||
# docker buildx build --platform linux/amd64,linux/arm64 -t kava/rocksdb:v8.10.1-go1.21 -f Dockerfile-rocksdb-base . --push
|
||||
FROM golang:1.21-bullseye
|
||||
|
||||
# Set up dependencies
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set working directory for the build
|
||||
WORKDIR /root
|
||||
# default home directory is /root
|
||||
|
||||
# install rocksdb
|
||||
ARG rocksdb_version=v8.10.0
|
||||
ENV ROCKSDB_VERSION=$rocksdb_version
|
||||
|
||||
RUN git clone https://github.com/facebook/rocksdb.git \
|
||||
&& cd rocksdb \
|
||||
&& git checkout $ROCKSDB_VERSION \
|
||||
&& make -j$(nproc) install-shared \
|
||||
&& ldconfig
|
49
Makefile
49
Makefile
@ -105,6 +105,8 @@ include $(BUILD_DIR)/deps.mk
|
||||
include $(BUILD_DIR)/proto.mk
|
||||
include $(BUILD_DIR)/proto-deps.mk
|
||||
|
||||
include $(BUILD_DIR)/lint.mk
|
||||
|
||||
#export GO111MODULE = on
|
||||
# process build tags
|
||||
build_tags = netgo
|
||||
@ -208,6 +210,14 @@ build-release: go.sum
|
||||
build-linux: go.sum
|
||||
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
# build on rocksdb-backed kava on macOS with shared libs from brew
|
||||
# this assumes you are on macOS & these deps have been installed with brew:
|
||||
# rocksdb, snappy, lz4, and zstd
|
||||
# use like `make build-rocksdb-brew COSMOS_BUILD_OPTIONS=rocksdb`
|
||||
build-rocksdb-brew:
|
||||
export CGO_CFLAGS := -I$(shell brew --prefix rocksdb)/include
|
||||
export CGO_LDFLAGS := -L$(shell brew --prefix rocksdb)/lib -lrocksdb -lstdc++ -lm -lz -L$(shell brew --prefix snappy)/lib -L$(shell brew --prefix lz4)/lib -L$(shell brew --prefix zstd)/lib
|
||||
|
||||
install: go.sum
|
||||
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) $(MAIN_ENTRY)
|
||||
|
||||
@ -234,13 +244,6 @@ link-check:
|
||||
# TODO: replace kava in following line with project name
|
||||
liche -r . --exclude "^http://127.*|^https://riot.im/app*|^http://kava-testnet*|^https://testnet-dex*|^https://kava3.data.kava.io*|^https://ipfs.io*|^https://apps.apple.com*|^https://kava.quicksync.io*"
|
||||
|
||||
|
||||
lint:
|
||||
golangci-lint run
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" | xargs gofmt -d -s
|
||||
$(GO_BIN) mod verify
|
||||
.PHONY: lint
|
||||
|
||||
format:
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs gofmt -w -s
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs misspell -w
|
||||
@ -265,11 +268,11 @@ build-docker-local-0gchain:
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: build-linux localnet-stop
|
||||
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z $(DOCKER_IMAGE_NAME)-node testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
|
||||
docker-compose up -d
|
||||
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z kava/kavanode testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
|
||||
$(DOCKER) compose up -d
|
||||
|
||||
localnet-stop:
|
||||
docker-compose down
|
||||
$(DOCKER) compose down
|
||||
|
||||
# Launch a new single validator chain
|
||||
start:
|
||||
@ -311,12 +314,14 @@ test-basic: test
|
||||
test-e2e: docker-build
|
||||
$(GO_BIN) test -failfast -count=1 -v ./tests/e2e/...
|
||||
|
||||
# run interchaintest tests (./tests/e2e-ibc)
|
||||
test-ibc: docker-build
|
||||
cd tests/e2e-ibc && KAVA_TAG=local $(GO_BIN) test -timeout 10m .
|
||||
.PHONY: test-ibc
|
||||
|
||||
test:
|
||||
@$(GO_BIN) test $$($(GO_BIN) list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
|
||||
|
||||
test-rocksdb:
|
||||
@go test -tags=rocksdb $(MAIN_ENTRY)/opendb
|
||||
|
||||
# Run cli integration tests
|
||||
# `-p 4` to use 4 cores, `-tags cli_test` to tell $(GO_BIN) not to ignore the cli package
|
||||
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.
|
||||
@ -327,6 +332,18 @@ test-cli: build
|
||||
test-migrate:
|
||||
@$(GO_BIN) test -v -count=1 ./migrate/...
|
||||
|
||||
# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169
|
||||
ifeq ($(OS_FAMILY),Darwin)
|
||||
FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic
|
||||
endif
|
||||
|
||||
test-fuzz:
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzMintCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzBurnCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzSendCoins ./x/precisebank/keeper
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_NonZeroRemainder ./x/precisebank/types
|
||||
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_ZeroRemainder ./x/precisebank/types
|
||||
|
||||
# Kick start lots of sims on an AWS cluster.
|
||||
# This submits an AWS Batch job to run a lot of sims, each within a docker image. Results are uploaded to S3
|
||||
start-remote-sims:
|
||||
@ -337,14 +354,14 @@ start-remote-sims:
|
||||
# submit an array job on AWS Batch, using 1000 seeds, spot instances
|
||||
aws batch submit-job \
|
||||
-—job-name "master-$(VERSION)" \
|
||||
-—job-queue “simulation-1-queue-spot" \
|
||||
-—job-queue "simulation-1-queue-spot" \
|
||||
-—array-properties size=1000 \
|
||||
-—job-definition $(BINARY_NAME)-sim-master \
|
||||
-—container-override environment=[{SIM_NAME=master-$(VERSION)}]
|
||||
|
||||
update-kvtool:
|
||||
git submodule init || true
|
||||
git submodule update
|
||||
git submodule update --remote
|
||||
cd tests/e2e/kvtool && make install
|
||||
|
||||
.PHONY: all build-linux install clean build test test-cli test-all test-rest test-basic start-remote-sims
|
||||
.PHONY: all build-linux install build test test-cli test-all test-rest test-basic test-fuzz start-remote-sims
|
||||
|
68
README.md
68
README.md
@ -1,59 +1,35 @@
|
||||
<!---
|
||||
<br />
|
||||
<p align="center">
|
||||
<img src="./0g-logo.svg" width="300">
|
||||
<img src="https://framerusercontent.com/images/JJi9BT4FAjp4W63c3jjNz0eezQ.png" alt="Logo" width="140" height="140">
|
||||
</p>
|
||||
--->
|
||||
<p align="center">
|
||||
<b><font size="5">0G is limitless scalability</font></b>
|
||||
</p>
|
||||
<br />
|
||||
|
||||
# 0G Chain
|
||||
Zero Gravity (0G) is the foundational infrastructure for high-performance dapps and chains particularly for AI.
|
||||
|
||||
<div align="center">
|
||||
It efficiently orchestrates utilization of hardware resources such as storage and compute and software assets such as data and models to handle the scale and complexity of AI workloads.
|
||||
|
||||
### [Telegram](https://t.me/web3_0glabs) | [Discord](https://discord.com/invite/0glabs)
|
||||
Continue reading [here](https://docs.0g.ai/intro) if you want to learn more about 0G dAIOS and how its various layers enable limitless scalability.
|
||||
|
||||
</div>
|
||||
## 0G Product Suite
|
||||
- DA: ultra high-performance data availability layer with KZG and quorum-based DAS
|
||||
- Storage: decentralized storage with erasure coding and replication
|
||||
- Inference Serving: flexible serving framework for inferences and finetuning
|
||||
- Network: high-performance, low-latency, and decentralized network
|
||||
|
||||
Reference implementation of 0G Chain, the first modular AI chain. Built using the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk).
|
||||
## Documentation
|
||||
- If you want to build with 0G's network, DA layer, inference serving, or storage SDK, please refer to the [Build with 0G Documentation](https://docs.0g.ai/build-with-0g/contracts).
|
||||
|
||||
<!---
|
||||
## Mainnet
|
||||
- If you want to run a validator node, DA node, or storage node, please refer to the [Run a Node Documentation](https://docs.0g.ai/run-a-node/overview).
|
||||
|
||||
The current recommended version of the software for mainnet is [v0.25.0](https://github.com/Kava-Labs/kava/releases/tag/v0.25.0) The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
|
||||
|
||||
### Installation and Setup
|
||||
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/participate/validator-node).
|
||||
## Support and Additional Resources
|
||||
We want to do everything we can to help you be successful while working on your contribution and projects. Here you'll find various resources and communities that may help you complete a project or contribute to 0G.
|
||||
|
||||
```bash
|
||||
git checkout v0.25.0
|
||||
make install
|
||||
```
|
||||
|
||||
End-to-end tests of Kava use a tool for generating networks with different configurations: [kvtool](https://github.com/Kava-Labs/kvtool).
|
||||
This is included as a git submodule at [`tests/e2e/kvtool`](tests/e2e/kvtool/).
|
||||
When first cloning the repository, if you intend to run the e2e integration tests, you must also
|
||||
clone the submodules:
|
||||
```bash
|
||||
git clone --recurse-submodules https://github.com/Kava-Labs/kava.git
|
||||
```
|
||||
|
||||
Or, if you have already cloned the repo: `git submodule update --init`
|
||||
|
||||
## Testnet
|
||||
|
||||
For further information on joining the testnet, head over to the [testnet repo](https://github.com/Kava-Labs/kava-testnets).
|
||||
|
||||
## Docs
|
||||
|
||||
Kava protocol and client documentation can be found in the [Kava docs](https://docs.kava.io).
|
||||
|
||||
If you have technical questions or concerns, ask a developer or community member in the [Kava discord](https://discord.com/invite/kQzh3Uv).
|
||||
|
||||
## Security
|
||||
|
||||
If you find a security issue, please report it to security [at] kava.io. Depending on the verification and severity, a bug bounty may be available.
|
||||
|
||||
## License
|
||||
|
||||
Copyright © Kava Labs, Inc. All rights reserved.
|
||||
|
||||
Licensed under the [Apache v2 License](LICENSE.md).
|
||||
--->
|
||||
### Communities
|
||||
- [0G Telegram](https://t.me/web3_0glabs)
|
||||
- [0G Discord](https://discord.com/invite/0glabs)
|
341
app/abci_utils.go
Normal file
341
app/abci_utils.go
Normal file
@ -0,0 +1,341 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/mempool"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/signing"
|
||||
)
|
||||
|
||||
type (
|
||||
// GasTx defines the contract that a transaction with a gas limit must implement.
|
||||
GasTx interface {
|
||||
GetGas() uint64
|
||||
}
|
||||
|
||||
// ProposalTxVerifier defines the interface that is implemented by BaseApp,
|
||||
// that any custom ABCI PrepareProposal and ProcessProposal handler can use
|
||||
// to verify a transaction.
|
||||
ProposalTxVerifier interface {
|
||||
PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error)
|
||||
ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error)
|
||||
}
|
||||
|
||||
// DefaultProposalHandler defines the default ABCI PrepareProposal and
|
||||
// ProcessProposal handlers.
|
||||
DefaultProposalHandler struct {
|
||||
mempool mempool.Mempool
|
||||
txVerifier ProposalTxVerifier
|
||||
txSelector TxSelector
|
||||
}
|
||||
)
|
||||
|
||||
func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler {
|
||||
return &DefaultProposalHandler{
|
||||
mempool: mp,
|
||||
txVerifier: txVerifier,
|
||||
txSelector: NewDefaultTxSelector(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetTxSelector sets the TxSelector function on the DefaultProposalHandler.
|
||||
func (h *DefaultProposalHandler) SetTxSelector(ts TxSelector) {
|
||||
h.txSelector = ts
|
||||
}
|
||||
|
||||
// PrepareProposalHandler returns the default implementation for processing an
|
||||
// ABCI proposal. The application's mempool is enumerated and all valid
|
||||
// transactions are added to the proposal. Transactions are valid if they:
|
||||
//
|
||||
// 1) Successfully encode to bytes.
|
||||
// 2) Are valid (i.e. pass runTx, AnteHandler only).
|
||||
//
|
||||
// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is
|
||||
// reached or the mempool is exhausted.
|
||||
//
|
||||
// Note:
|
||||
//
|
||||
// - Step (2) is identical to the validation step performed in
|
||||
// DefaultProcessProposal. It is very important that the same validation logic
|
||||
// is used in both steps, and applications must ensure that this is the case in
|
||||
// non-default handlers.
|
||||
//
|
||||
// - If no mempool is set or if the mempool is a no-op mempool, the transactions
|
||||
// requested from CometBFT will simply be returned, which, by default, are in
|
||||
// FIFO order.
|
||||
func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHandler {
|
||||
return func(ctx sdk.Context, req abci.RequestPrepareProposal) abci.ResponsePrepareProposal {
|
||||
var maxBlockGas uint64
|
||||
if b := ctx.ConsensusParams().Block; b != nil {
|
||||
maxBlockGas = uint64(b.MaxGas)
|
||||
}
|
||||
|
||||
defer h.txSelector.Clear()
|
||||
|
||||
// If the mempool is nil or NoOp we simply return the transactions
|
||||
// requested from CometBFT, which, by default, should be in FIFO order.
|
||||
//
|
||||
// Note, we still need to ensure the transactions returned respect req.MaxTxBytes.
|
||||
_, isNoOp := h.mempool.(mempool.NoOpMempool)
|
||||
if h.mempool == nil || isNoOp {
|
||||
for _, txBz := range req.Txs {
|
||||
// XXX: We pass nil as the memTx because we have no way of decoding the
|
||||
// txBz. We'd need to break (update) the ProposalTxVerifier interface.
|
||||
// As a result, we CANNOT account for block max gas.
|
||||
stop := h.txSelector.SelectTxForProposal(uint64(req.MaxTxBytes), maxBlockGas, nil, txBz)
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs()}
|
||||
}
|
||||
|
||||
selectedTxsSignersSeqs := make(map[string]uint64)
|
||||
var selectedTxsNums int
|
||||
|
||||
var waitRemoveTxs []sdk.Tx
|
||||
|
||||
mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx) bool {
|
||||
sigs, err := memTx.(signing.SigVerifiableTx).GetSignaturesV2()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to get signatures: %w", err))
|
||||
}
|
||||
// If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before
|
||||
// so we add them and continue given that we don't need to check the sequence.
|
||||
shouldAdd := true
|
||||
txSignersSeqs := make(map[string]uint64)
|
||||
if len(sigs) == 0 {
|
||||
msgs := memTx.GetMsgs()
|
||||
if len(msgs) == 1 {
|
||||
msgEthTx, ok := msgs[0].(*evmtypes.MsgEthereumTx)
|
||||
if ok {
|
||||
ethTx := msgEthTx.AsTransaction()
|
||||
signer := gethtypes.NewEIP2930Signer(ethTx.ChainId())
|
||||
ethSender, err := signer.Sender(ethTx)
|
||||
if err == nil {
|
||||
signer := sdk.AccAddress(ethSender.Bytes()).String()
|
||||
nonce := ethTx.Nonce()
|
||||
seq, ok := selectedTxsSignersSeqs[signer]
|
||||
if !ok {
|
||||
txSignersSeqs[signer] = nonce
|
||||
} else {
|
||||
// If we have seen this signer before in this block, we must make
|
||||
// sure that the current sequence is seq+1; otherwise is invalid
|
||||
// and we skip it.
|
||||
if seq+1 != nonce {
|
||||
shouldAdd = false
|
||||
} else {
|
||||
txSignersSeqs[signer] = nonce
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, sig := range sigs {
|
||||
signer := sdk.AccAddress(sig.PubKey.Address()).String()
|
||||
seq, ok := selectedTxsSignersSeqs[signer]
|
||||
if !ok {
|
||||
txSignersSeqs[signer] = sig.Sequence
|
||||
continue
|
||||
}
|
||||
|
||||
// If we have seen this signer before in this block, we must make
|
||||
// sure that the current sequence is seq+1; otherwise is invalid
|
||||
// and we skip it.
|
||||
if seq+1 != sig.Sequence {
|
||||
shouldAdd = false
|
||||
break
|
||||
}
|
||||
txSignersSeqs[signer] = sig.Sequence
|
||||
}
|
||||
}
|
||||
|
||||
if shouldAdd {
|
||||
// NOTE: Since transaction verification was already executed in CheckTx,
|
||||
// which calls mempool.Insert, in theory everything in the pool should be
|
||||
// valid. But some mempool implementations may insert invalid txs, so we
|
||||
// check again.
|
||||
txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx)
|
||||
if err != nil {
|
||||
waitRemoveTxs = append(waitRemoveTxs, memTx)
|
||||
} else {
|
||||
stop := h.txSelector.SelectTxForProposal(uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz)
|
||||
if stop {
|
||||
return false
|
||||
}
|
||||
|
||||
txsLen := len(h.txSelector.SelectedTxs())
|
||||
for sender, seq := range txSignersSeqs {
|
||||
// If txsLen != selectedTxsNums is true, it means that we've
|
||||
// added a new tx to the selected txs, so we need to update
|
||||
// the sequence of the sender.
|
||||
if txsLen != selectedTxsNums {
|
||||
selectedTxsSignersSeqs[sender] = seq
|
||||
} else if _, ok := selectedTxsSignersSeqs[sender]; !ok {
|
||||
// The transaction hasn't been added but it passed the
|
||||
// verification, so we know that the sequence is correct.
|
||||
// So we set this sender's sequence to seq-1, in order
|
||||
// to avoid unnecessary calls to PrepareProposalVerifyTx.
|
||||
selectedTxsSignersSeqs[sender] = seq - 1
|
||||
}
|
||||
}
|
||||
selectedTxsNums = txsLen
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for i := range waitRemoveTxs {
|
||||
err := h.mempool.Remove(waitRemoveTxs[i])
|
||||
if err != nil && !errors.Is(err, mempool.ErrTxNotFound) {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs()}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessProposalHandler returns the default implementation for processing an
|
||||
// ABCI proposal. Every transaction in the proposal must pass 2 conditions:
|
||||
//
|
||||
// 1. The transaction bytes must decode to a valid transaction.
|
||||
// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only)
|
||||
//
|
||||
// If any transaction fails to pass either condition, the proposal is rejected.
|
||||
// Note that step (2) is identical to the validation step performed in
|
||||
// DefaultPrepareProposal. It is very important that the same validation logic
|
||||
// is used in both steps, and applications must ensure that this is the case in
|
||||
// non-default handlers.
|
||||
func (h *DefaultProposalHandler) ProcessProposalHandler() sdk.ProcessProposalHandler {
|
||||
// If the mempool is nil or NoOp we simply return ACCEPT,
|
||||
// because PrepareProposal may have included txs that could fail verification.
|
||||
_, isNoOp := h.mempool.(mempool.NoOpMempool)
|
||||
if h.mempool == nil || isNoOp {
|
||||
return NoOpProcessProposal()
|
||||
}
|
||||
|
||||
return func(ctx sdk.Context, req abci.RequestProcessProposal) abci.ResponseProcessProposal {
|
||||
var totalTxGas uint64
|
||||
|
||||
var maxBlockGas int64
|
||||
if b := ctx.ConsensusParams().Block; b != nil {
|
||||
maxBlockGas = b.MaxGas
|
||||
}
|
||||
|
||||
for _, txBytes := range req.Txs {
|
||||
tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes)
|
||||
if err != nil {
|
||||
return abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
|
||||
if maxBlockGas > 0 {
|
||||
gasTx, ok := tx.(GasTx)
|
||||
if ok {
|
||||
totalTxGas += gasTx.GetGas()
|
||||
}
|
||||
|
||||
if totalTxGas > uint64(maxBlockGas) {
|
||||
return abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
}
|
||||
|
||||
// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always
|
||||
// return the transactions sent by the client's request.
|
||||
func NoOpPrepareProposal() sdk.PrepareProposalHandler {
|
||||
return func(_ sdk.Context, req abci.RequestPrepareProposal) abci.ResponsePrepareProposal {
|
||||
return abci.ResponsePrepareProposal{Txs: req.Txs}
|
||||
}
|
||||
}
|
||||
|
||||
// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always
|
||||
// return ACCEPT.
|
||||
func NoOpProcessProposal() sdk.ProcessProposalHandler {
|
||||
return func(_ sdk.Context, _ abci.RequestProcessProposal) abci.ResponseProcessProposal {
|
||||
return abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}
|
||||
}
|
||||
}
|
||||
|
||||
// TxSelector defines a helper type that assists in selecting transactions during
|
||||
// mempool transaction selection in PrepareProposal. It keeps track of the total
|
||||
// number of bytes and total gas of the selected transactions. It also keeps
|
||||
// track of the selected transactions themselves.
|
||||
type TxSelector interface {
|
||||
// SelectedTxs should return a copy of the selected transactions.
|
||||
SelectedTxs() [][]byte
|
||||
|
||||
// Clear should clear the TxSelector, nulling out all relevant fields.
|
||||
Clear()
|
||||
|
||||
// SelectTxForProposal should attempt to select a transaction for inclusion in
|
||||
// a proposal based on inclusion criteria defined by the TxSelector. It must
|
||||
// return <true> if the caller should halt the transaction selection loop
|
||||
// (typically over a mempool) or <false> otherwise.
|
||||
SelectTxForProposal(maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool
|
||||
}
|
||||
|
||||
type defaultTxSelector struct {
|
||||
totalTxBytes uint64
|
||||
totalTxGas uint64
|
||||
selectedTxs [][]byte
|
||||
}
|
||||
|
||||
func NewDefaultTxSelector() TxSelector {
|
||||
return &defaultTxSelector{}
|
||||
}
|
||||
|
||||
func (ts *defaultTxSelector) SelectedTxs() [][]byte {
|
||||
txs := make([][]byte, len(ts.selectedTxs))
|
||||
copy(txs, ts.selectedTxs)
|
||||
return txs
|
||||
}
|
||||
|
||||
func (ts *defaultTxSelector) Clear() {
|
||||
ts.totalTxBytes = 0
|
||||
ts.totalTxGas = 0
|
||||
ts.selectedTxs = nil
|
||||
}
|
||||
|
||||
func (ts *defaultTxSelector) SelectTxForProposal(maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool {
|
||||
txSize := uint64(len(txBz))
|
||||
|
||||
var txGasLimit uint64
|
||||
if memTx != nil {
|
||||
if gasTx, ok := memTx.(GasTx); ok {
|
||||
txGasLimit = gasTx.GetGas()
|
||||
}
|
||||
}
|
||||
|
||||
// only add the transaction to the proposal if we have enough capacity
|
||||
if (txSize + ts.totalTxBytes) <= maxTxBytes {
|
||||
// If there is a max block gas limit, add the tx only if the limit has
|
||||
// not been met.
|
||||
if maxBlockGas > 0 {
|
||||
if (txGasLimit + ts.totalTxGas) <= maxBlockGas {
|
||||
ts.totalTxGas += txGasLimit
|
||||
ts.totalTxBytes += txSize
|
||||
ts.selectedTxs = append(ts.selectedTxs, txBz)
|
||||
}
|
||||
} else {
|
||||
ts.totalTxBytes += txSize
|
||||
ts.selectedTxs = append(ts.selectedTxs, txBz)
|
||||
}
|
||||
}
|
||||
|
||||
// check if we've reached capacity; if so, we cannot select any more transactions
|
||||
return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas))
|
||||
}
|
@ -168,6 +168,7 @@ func newEthAnteHandler(options HandlerOptions) sdk.AnteHandler {
|
||||
evmante.NewEthSetUpContextDecorator(options.EvmKeeper), // outermost AnteDecorator. SetUpContext must be called first
|
||||
evmante.NewEthMempoolFeeDecorator(options.EvmKeeper), // Check eth effective gas price against minimal-gas-prices
|
||||
evmante.NewEthValidateBasicDecorator(options.EvmKeeper),
|
||||
evmante.NewEvmMinGasPriceDecorator(options.EvmKeeper),
|
||||
evmante.NewEthSigVerificationDecorator(options.EvmKeeper),
|
||||
evmante.NewEthAccountVerificationDecorator(options.AccountKeeper, options.EvmKeeper),
|
||||
evmante.NewCanTransferDecorator(options.EvmKeeper),
|
||||
|
@ -52,13 +52,16 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
|
||||
|
||||
tApp := app.TestApp{
|
||||
App: *app.NewApp(
|
||||
log.NewNopLogger(),
|
||||
tmdb.NewMemDB(),
|
||||
chaincfg.DefaultNodeHome,
|
||||
nil,
|
||||
encodingConfig,
|
||||
opts,
|
||||
baseapp.SetChainID(app.TestChainId),
|
||||
app.NewBaseApp(
|
||||
log.NewNopLogger(),
|
||||
tmdb.NewMemDB(),
|
||||
encodingConfig,
|
||||
baseapp.SetChainID(app.TestChainId),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
|
@ -405,7 +405,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
|
||||
// usdxToMintAmt: 99,
|
||||
// },
|
||||
{
|
||||
name: "fails when convertion more erc20 usdc than balance",
|
||||
name: "fails when conversion more erc20 usdc than balance",
|
||||
usdcDepositAmt: 51_000,
|
||||
usdxToMintAmt: 100,
|
||||
errMsg: "transfer amount exceeds balance",
|
||||
|
126
app/app.go
126
app/app.go
@ -95,6 +95,7 @@ import (
|
||||
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
|
||||
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
|
||||
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
evmante "github.com/evmos/ethermint/app/ante"
|
||||
ethermintconfig "github.com/evmos/ethermint/server/config"
|
||||
"github.com/evmos/ethermint/x/evm"
|
||||
@ -109,6 +110,8 @@ import (
|
||||
chainparams "github.com/0glabs/0g-chain/app/params"
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
dasignersprecompile "github.com/0glabs/0g-chain/precompiles/dasigners"
|
||||
stakingprecompile "github.com/0glabs/0g-chain/precompiles/staking"
|
||||
wrappeda0gibaseprecompile "github.com/0glabs/0g-chain/precompiles/wrapped-a0gi-base"
|
||||
|
||||
"github.com/0glabs/0g-chain/x/bep3"
|
||||
bep3keeper "github.com/0glabs/0g-chain/x/bep3/keeper"
|
||||
@ -129,14 +132,19 @@ import (
|
||||
issuance "github.com/0glabs/0g-chain/x/issuance"
|
||||
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
|
||||
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
|
||||
"github.com/0glabs/0g-chain/x/precisebank"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
pricefeed "github.com/0glabs/0g-chain/x/pricefeed"
|
||||
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
|
||||
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
|
||||
validatorvesting "github.com/0glabs/0g-chain/x/validator-vesting"
|
||||
validatorvestingrest "github.com/0glabs/0g-chain/x/validator-vesting/client/rest"
|
||||
validatorvestingtypes "github.com/0glabs/0g-chain/x/validator-vesting/types"
|
||||
wrappeda0gibase "github.com/0glabs/0g-chain/x/wrapped-a0gi-base"
|
||||
wrappeda0gibasekeeper "github.com/0glabs/0g-chain/x/wrapped-a0gi-base/keeper"
|
||||
wrappeda0gibasetypes "github.com/0glabs/0g-chain/x/wrapped-a0gi-base/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -178,10 +186,12 @@ var (
|
||||
validatorvesting.AppModuleBasic{},
|
||||
evmutil.AppModuleBasic{},
|
||||
mint.AppModuleBasic{},
|
||||
precisebank.AppModuleBasic{},
|
||||
council.AppModuleBasic{},
|
||||
dasigners.AppModuleBasic{},
|
||||
consensus.AppModuleBasic{},
|
||||
ibcwasm.AppModuleBasic{},
|
||||
wrappeda0gibase.AppModuleBasic{},
|
||||
)
|
||||
|
||||
// module account permissions
|
||||
@ -199,6 +209,8 @@ var (
|
||||
issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner},
|
||||
bep3types.ModuleName: {authtypes.Burner, authtypes.Minter},
|
||||
minttypes.ModuleName: {authtypes.Minter},
|
||||
precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, // used for reserve account to back fractional amounts
|
||||
wrappeda0gibasetypes.ModuleName: {authtypes.Minter, authtypes.Burner},
|
||||
}
|
||||
)
|
||||
|
||||
@ -255,7 +267,7 @@ type App struct {
|
||||
packetForwardKeeper *packetforwardkeeper.Keeper
|
||||
evmKeeper *evmkeeper.Keeper
|
||||
evmutilKeeper evmutilkeeper.Keeper
|
||||
feeMarketKeeper feemarketkeeper.Keeper
|
||||
feeMarketKeeper *feemarketkeeper.Keeper
|
||||
upgradeKeeper upgradekeeper.Keeper
|
||||
evidenceKeeper evidencekeeper.Keeper
|
||||
transferKeeper ibctransferkeeper.Keeper
|
||||
@ -268,6 +280,8 @@ type App struct {
|
||||
mintKeeper mintkeeper.Keeper
|
||||
dasignersKeeper dasignerskeeper.Keeper
|
||||
consensusParamsKeeper consensusparamkeeper.Keeper
|
||||
precisebankKeeper precisebankkeeper.Keeper
|
||||
wrappeda0gibaseKeeper wrappeda0gibasekeeper.Keeper
|
||||
|
||||
// make scoped keepers public for test purposes
|
||||
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
|
||||
@ -286,21 +300,24 @@ type App struct {
|
||||
func init() {
|
||||
}
|
||||
|
||||
func NewBaseApp(logger tmlog.Logger, db dbm.DB, encodingConfig chainparams.EncodingConfig,
|
||||
baseAppOptions ...func(*baseapp.BaseApp)) *baseapp.BaseApp {
|
||||
bApp := baseapp.NewBaseApp(chaincfg.AppName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...)
|
||||
return bApp
|
||||
}
|
||||
|
||||
// NewApp returns a reference to an initialized App.
|
||||
func NewApp(
|
||||
logger tmlog.Logger,
|
||||
db dbm.DB,
|
||||
homePath string,
|
||||
traceStore io.Writer,
|
||||
encodingConfig chainparams.EncodingConfig,
|
||||
options Options,
|
||||
baseAppOptions ...func(*baseapp.BaseApp),
|
||||
bApp *baseapp.BaseApp,
|
||||
) *App {
|
||||
appCodec := encodingConfig.Marshaler
|
||||
legacyAmino := encodingConfig.Amino
|
||||
interfaceRegistry := encodingConfig.InterfaceRegistry
|
||||
|
||||
bApp := baseapp.NewBaseApp(chaincfg.AppName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...)
|
||||
bApp.SetCommitMultiStoreTracer(traceStore)
|
||||
bApp.SetVersion(version.Version)
|
||||
bApp.SetInterfaceRegistry(interfaceRegistry)
|
||||
@ -318,8 +335,9 @@ func NewApp(
|
||||
counciltypes.StoreKey,
|
||||
dasignerstypes.StoreKey,
|
||||
vestingtypes.StoreKey,
|
||||
consensusparamtypes.StoreKey, crisistypes.StoreKey,
|
||||
consensusparamtypes.StoreKey, crisistypes.StoreKey, precisebanktypes.StoreKey,
|
||||
ibcwasmtypes.StoreKey,
|
||||
wrappeda0gibasetypes.StoreKey,
|
||||
)
|
||||
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
|
||||
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
|
||||
@ -361,7 +379,6 @@ func NewApp(
|
||||
feemarketSubspace := app.paramsKeeper.Subspace(feemarkettypes.ModuleName)
|
||||
evmSubspace := app.paramsKeeper.Subspace(evmtypes.ModuleName)
|
||||
evmutilSubspace := app.paramsKeeper.Subspace(evmutiltypes.ModuleName)
|
||||
mintSubspace := app.paramsKeeper.Subspace(minttypes.ModuleName)
|
||||
|
||||
// set the BaseApp's parameter store
|
||||
app.consensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, keys[consensusparamtypes.StoreKey], govAuthAddrStr)
|
||||
@ -472,6 +489,7 @@ func NewApp(
|
||||
keys[feemarkettypes.StoreKey],
|
||||
tkeys[feemarkettypes.TransientKey],
|
||||
feemarketSubspace,
|
||||
bApp.Mempool(),
|
||||
)
|
||||
|
||||
app.evmutilKeeper = evmutilkeeper.NewKeeper(
|
||||
@ -482,26 +500,47 @@ func NewApp(
|
||||
app.accountKeeper,
|
||||
)
|
||||
|
||||
evmBankKeeper := evmutilkeeper.NewEvmBankKeeper(app.evmutilKeeper, app.bankKeeper, app.accountKeeper)
|
||||
// dasigners keeper
|
||||
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper)
|
||||
app.precisebankKeeper = precisebankkeeper.NewKeeper(
|
||||
app.appCodec,
|
||||
keys[precisebanktypes.StoreKey],
|
||||
app.bankKeeper,
|
||||
app.accountKeeper,
|
||||
)
|
||||
|
||||
// precopmiles
|
||||
precompiles := make(map[common.Address]vm.PrecompiledContract)
|
||||
// dasigners
|
||||
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper, govAuthAddrStr)
|
||||
daSignersPrecompile, err := dasignersprecompile.NewDASignersPrecompile(app.dasignersKeeper)
|
||||
if err != nil {
|
||||
panic("initialize precompile failed")
|
||||
panic(fmt.Sprintf("initialize dasigners precompile failed: %v", err))
|
||||
}
|
||||
precompiles[daSignersPrecompile.Address()] = daSignersPrecompile
|
||||
// evm keeper
|
||||
// staking
|
||||
stakingPrecompile, err := stakingprecompile.NewStakingPrecompile(app.stakingKeeper)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("initialize staking precompile failed: %v", err))
|
||||
}
|
||||
precompiles[stakingPrecompile.Address()] = stakingPrecompile
|
||||
// wrapped wrapped a0gi base
|
||||
app.wrappeda0gibaseKeeper = wrappeda0gibasekeeper.NewKeeper(keys[wrappeda0gibasetypes.StoreKey], appCodec, app.precisebankKeeper, govAuthAddrStr)
|
||||
wrappeda0gibasePrecompile, err := wrappeda0gibaseprecompile.NewWrappedA0giBasePrecompile(app.wrappeda0gibaseKeeper)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("initialize wrapped a0gi base precompile failed: %v", err))
|
||||
}
|
||||
precompiles[wrappeda0gibasePrecompile.Address()] = wrappeda0gibasePrecompile
|
||||
|
||||
app.evmKeeper = evmkeeper.NewKeeper(
|
||||
appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey],
|
||||
govAuthAddr,
|
||||
app.accountKeeper, evmBankKeeper, app.stakingKeeper, app.feeMarketKeeper,
|
||||
app.accountKeeper,
|
||||
app.precisebankKeeper, // x/precisebank in place of x/bank
|
||||
app.stakingKeeper,
|
||||
app.feeMarketKeeper,
|
||||
options.EVMTrace,
|
||||
evmSubspace,
|
||||
precompiles,
|
||||
)
|
||||
|
||||
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
|
||||
|
||||
// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper
|
||||
@ -577,7 +616,6 @@ func NewApp(
|
||||
app.accountKeeper,
|
||||
app.bankKeeper,
|
||||
authtypes.FeeCollectorName,
|
||||
govAuthAddrStr,
|
||||
)
|
||||
|
||||
// create committee keeper with router
|
||||
@ -668,10 +706,12 @@ func NewApp(
|
||||
committee.NewAppModule(app.committeeKeeper, app.accountKeeper),
|
||||
evmutil.NewAppModule(app.evmutilKeeper, app.bankKeeper, app.accountKeeper),
|
||||
// nil InflationCalculationFn, use SDK's default inflation function
|
||||
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, nil, mintSubspace),
|
||||
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper),
|
||||
precisebank.NewAppModule(app.precisebankKeeper, app.bankKeeper, app.accountKeeper),
|
||||
council.NewAppModule(app.CouncilKeeper),
|
||||
ibcwasm.NewAppModule(app.ibcWasmClientKeeper),
|
||||
dasigners.NewAppModule(app.dasignersKeeper, *app.stakingKeeper),
|
||||
wrappeda0gibase.NewAppModule(app.wrappeda0gibaseKeeper),
|
||||
)
|
||||
|
||||
// Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list.
|
||||
@ -716,8 +756,10 @@ func NewApp(
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
precisebanktypes.ModuleName,
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
wrappeda0gibasetypes.ModuleName,
|
||||
)
|
||||
|
||||
// Warning: Some end blockers must run before others. Ensure the dependencies are understood before modifying this list.
|
||||
@ -752,8 +794,10 @@ func NewApp(
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
precisebanktypes.ModuleName,
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
wrappeda0gibasetypes.ModuleName,
|
||||
)
|
||||
|
||||
// Warning: Some init genesis methods must run before others. Ensure the dependencies are understood before modifying this list
|
||||
@ -786,9 +830,11 @@ func NewApp(
|
||||
counciltypes.ModuleName,
|
||||
consensusparamtypes.ModuleName,
|
||||
packetforwardtypes.ModuleName,
|
||||
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
|
||||
precisebanktypes.ModuleName, // Must be run after x/bank to verify reserve balance
|
||||
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
|
||||
ibcwasmtypes.ModuleName,
|
||||
dasignerstypes.ModuleName,
|
||||
wrappeda0gibasetypes.ModuleName,
|
||||
)
|
||||
|
||||
app.mm.RegisterInvariants(&app.crisisKeeper)
|
||||
@ -1022,3 +1068,47 @@ func GetMaccPerms() map[string][]string {
|
||||
}
|
||||
return perms
|
||||
}
|
||||
|
||||
type accountNonceOp struct {
|
||||
ak evmtypes.AccountKeeper
|
||||
}
|
||||
|
||||
type AccountNonceOp interface {
|
||||
GetAccountNonce(ctx sdk.Context, address string) uint64
|
||||
SetAccountNonce(ctx sdk.Context, address string, nonce uint64)
|
||||
}
|
||||
|
||||
func NewAccountNonceOp(app *App) AccountNonceOp {
|
||||
return &accountNonceOp{
|
||||
ak: app.accountKeeper,
|
||||
}
|
||||
}
|
||||
|
||||
func (ano *accountNonceOp) GetAccountNonce(ctx sdk.Context, address string) uint64 {
|
||||
bzAcc, err := sdk.AccAddressFromBech32(address)
|
||||
if err != nil {
|
||||
ctx.Logger().Error("GetAccountNonce: failed to parse address", "address", address, "error", err)
|
||||
return 0
|
||||
}
|
||||
acc := ano.ak.GetAccount(ctx, bzAcc)
|
||||
if acc == nil {
|
||||
ctx.Logger().Error("GetAccountNonce: account not found", "address", address)
|
||||
return 0
|
||||
}
|
||||
return acc.GetSequence()
|
||||
}
|
||||
|
||||
func (ano *accountNonceOp) SetAccountNonce(ctx sdk.Context, address string, nonce uint64) {
|
||||
bzAcc, err := sdk.AccAddressFromBech32(address)
|
||||
if err != nil {
|
||||
ctx.Logger().Error("SetAccountNonce: failed to parse address", "address", address, "nonce", nonce, "error", err)
|
||||
return
|
||||
}
|
||||
acc := ano.ak.GetAccount(ctx, bzAcc)
|
||||
if acc != nil {
|
||||
acc.SetSequence(nonce)
|
||||
ano.ak.SetAccount(ctx, acc)
|
||||
} else {
|
||||
ctx.Logger().Error("SetAccountNonce: account not found", "address", address)
|
||||
}
|
||||
}
|
||||
|
@ -28,19 +28,34 @@ import (
|
||||
func TestNewApp(t *testing.T) {
|
||||
chaincfg.SetSDKConfig()
|
||||
NewApp(
|
||||
log.NewTMLogger(log.NewSyncWriter(os.Stdout)),
|
||||
db.NewMemDB(),
|
||||
chaincfg.DefaultNodeHome,
|
||||
nil,
|
||||
MakeEncodingConfig(),
|
||||
DefaultOptions,
|
||||
NewBaseApp(
|
||||
log.NewTMLogger(log.NewSyncWriter(os.Stdout)),
|
||||
db.NewMemDB(),
|
||||
MakeEncodingConfig(),
|
||||
baseapp.SetChainID(TestChainId),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func TestExport(t *testing.T) {
|
||||
chaincfg.SetSDKConfig()
|
||||
db := db.NewMemDB()
|
||||
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions, baseapp.SetChainID(TestChainId))
|
||||
app := NewApp(
|
||||
chaincfg.DefaultNodeHome,
|
||||
nil,
|
||||
MakeEncodingConfig(),
|
||||
DefaultOptions,
|
||||
NewBaseApp(
|
||||
log.NewTMLogger(log.NewSyncWriter(os.Stdout)),
|
||||
db,
|
||||
MakeEncodingConfig(),
|
||||
baseapp.SetChainID(TestChainId),
|
||||
),
|
||||
)
|
||||
|
||||
genesisState := GenesisStateWithSingleValidator(&TestApp{App: *app}, NewDefaultGenesisState())
|
||||
|
||||
|
@ -3,9 +3,9 @@ Package params defines the simulation parameters for the 0gChain app.
|
||||
|
||||
It contains the default weights used for each transaction used on the module's
|
||||
simulation. These weights define the chance for a transaction to be simulated at
|
||||
any gived operation.
|
||||
any given operation.
|
||||
|
||||
You can repace the default values for the weights by providing a params.json
|
||||
You can replace the default values for the weights by providing a params.json
|
||||
file with the weights defined for each of the transaction operations:
|
||||
|
||||
{
|
||||
|
735
app/priority_nonce.go
Normal file
735
app/priority_nonce.go
Normal file
@ -0,0 +1,735 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/huandu/skiplist"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/mempool"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/signing"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
)
|
||||
|
||||
const MAX_TXS_PRE_SENDER_IN_MEMPOOL int = 48
|
||||
|
||||
var (
|
||||
_ mempool.Mempool = (*PriorityNonceMempool)(nil)
|
||||
_ mempool.Iterator = (*PriorityNonceIterator)(nil)
|
||||
|
||||
errMempoolTxGasPriceTooLow = errors.New("gas price is too low")
|
||||
errMempoolTooManyTxs = errors.New("tx sender has too many txs in mempool")
|
||||
errMempoolIsFull = errors.New("mempool is full")
|
||||
errTxInMempool = errors.New("tx already in mempool")
|
||||
)
|
||||
|
||||
// PriorityNonceMempool is a mempool implementation that stores txs
|
||||
// in a partially ordered set by 2 dimensions: priority, and sender-nonce
|
||||
// (sequence number). Internally it uses one priority ordered skip list and one
|
||||
// skip list per sender ordered by sender-nonce (sequence number). When there
|
||||
// are multiple txs from the same sender, they are not always comparable by
|
||||
// priority to other sender txs and must be partially ordered by both sender-nonce
|
||||
// and priority.
|
||||
type PriorityNonceMempool struct {
|
||||
mtx sync.Mutex
|
||||
priorityIndex *skiplist.SkipList
|
||||
priorityCounts map[int64]int
|
||||
senderIndices map[string]*skiplist.SkipList
|
||||
scores map[txMeta]txMeta
|
||||
onRead func(tx sdk.Tx)
|
||||
txReplacement func(op, np int64, oTx, nTx sdk.Tx) bool
|
||||
maxTx int
|
||||
|
||||
senderTxCntLock sync.RWMutex
|
||||
counterBySender map[string]int
|
||||
txRecord map[txMeta]struct{}
|
||||
|
||||
txReplacedCallback func(ctx context.Context, oldTx, newTx *TxInfo)
|
||||
}
|
||||
|
||||
type PriorityNonceIterator struct {
|
||||
senderCursors map[string]*skiplist.Element
|
||||
nextPriority int64
|
||||
sender string
|
||||
priorityNode *skiplist.Element
|
||||
mempool *PriorityNonceMempool
|
||||
}
|
||||
|
||||
// txMeta stores transaction metadata used in indices
|
||||
type txMeta struct {
|
||||
// nonce is the sender's sequence number
|
||||
nonce uint64
|
||||
// priority is the transaction's priority
|
||||
priority int64
|
||||
// sender is the transaction's sender
|
||||
sender string
|
||||
// weight is the transaction's weight, used as a tiebreaker for transactions with the same priority
|
||||
weight int64
|
||||
// senderElement is a pointer to the transaction's element in the sender index
|
||||
senderElement *skiplist.Element
|
||||
}
|
||||
|
||||
// txMetaLess is a comparator for txKeys that first compares priority, then weight,
|
||||
// then sender, then nonce, uniquely identifying a transaction.
|
||||
//
|
||||
// Note, txMetaLess is used as the comparator in the priority index.
|
||||
func txMetaLess(a, b any) int {
|
||||
keyA := a.(txMeta)
|
||||
keyB := b.(txMeta)
|
||||
res := skiplist.Int64.Compare(keyA.priority, keyB.priority)
|
||||
if res != 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
// Weight is used as a tiebreaker for transactions with the same priority.
|
||||
// Weight is calculated in a single pass in .Select(...) and so will be 0
|
||||
// on .Insert(...).
|
||||
res = skiplist.Int64.Compare(keyA.weight, keyB.weight)
|
||||
if res != 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
// Because weight will be 0 on .Insert(...), we must also compare sender and
|
||||
// nonce to resolve priority collisions. If we didn't then transactions with
|
||||
// the same priority would overwrite each other in the priority index.
|
||||
res = skiplist.String.Compare(keyA.sender, keyB.sender)
|
||||
if res != 0 {
|
||||
return res
|
||||
}
|
||||
|
||||
return skiplist.Uint64.Compare(keyA.nonce, keyB.nonce)
|
||||
}
|
||||
|
||||
type PriorityNonceMempoolOption func(*PriorityNonceMempool)
|
||||
|
||||
// PriorityNonceWithOnRead sets a callback to be called when a tx is read from
|
||||
// the mempool.
|
||||
func PriorityNonceWithOnRead(onRead func(tx sdk.Tx)) PriorityNonceMempoolOption {
|
||||
return func(mp *PriorityNonceMempool) {
|
||||
mp.onRead = onRead
|
||||
}
|
||||
}
|
||||
|
||||
// PriorityNonceWithTxReplacement sets a callback to be called when duplicated
|
||||
// transaction nonce detected during mempool insert. An application can define a
|
||||
// transaction replacement rule based on tx priority or certain transaction fields.
|
||||
func PriorityNonceWithTxReplacement(txReplacementRule func(op, np int64, oTx, nTx sdk.Tx) bool) PriorityNonceMempoolOption {
|
||||
return func(mp *PriorityNonceMempool) {
|
||||
mp.txReplacement = txReplacementRule
|
||||
}
|
||||
}
|
||||
|
||||
// PriorityNonceWithMaxTx sets the maximum number of transactions allowed in the
|
||||
// mempool with the semantics:
|
||||
//
|
||||
// <0: disabled, `Insert` is a no-op
|
||||
// 0: unlimited
|
||||
// >0: maximum number of transactions allowed
|
||||
func PriorityNonceWithMaxTx(maxTx int) PriorityNonceMempoolOption {
|
||||
return func(mp *PriorityNonceMempool) {
|
||||
mp.maxTx = maxTx
|
||||
}
|
||||
}
|
||||
|
||||
func PriorityNonceWithTxReplacedCallback(cb func(ctx context.Context, oldTx, newTx *TxInfo)) PriorityNonceMempoolOption {
|
||||
return func(mp *PriorityNonceMempool) {
|
||||
mp.txReplacedCallback = cb
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultPriorityMempool returns a priorityNonceMempool with no options.
|
||||
func DefaultPriorityMempool() mempool.Mempool {
|
||||
return NewPriorityMempool()
|
||||
}
|
||||
|
||||
// NewPriorityMempool returns the SDK's default mempool implementation which
|
||||
// returns txs in a partial order by 2 dimensions; priority, and sender-nonce.
|
||||
func NewPriorityMempool(opts ...PriorityNonceMempoolOption) *PriorityNonceMempool {
|
||||
mp := &PriorityNonceMempool{
|
||||
priorityIndex: skiplist.New(skiplist.LessThanFunc(txMetaLess)),
|
||||
priorityCounts: make(map[int64]int),
|
||||
senderIndices: make(map[string]*skiplist.SkipList),
|
||||
scores: make(map[txMeta]txMeta),
|
||||
counterBySender: make(map[string]int),
|
||||
txRecord: make(map[txMeta]struct{}),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(mp)
|
||||
}
|
||||
|
||||
return mp
|
||||
}
|
||||
|
||||
// NextSenderTx returns the next transaction for a given sender by nonce order,
|
||||
// i.e. the next valid transaction for the sender. If no such transaction exists,
|
||||
// nil will be returned.
|
||||
func (mp *PriorityNonceMempool) NextSenderTx(sender string) sdk.Tx {
|
||||
senderIndex, ok := mp.senderIndices[sender]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
cursor := senderIndex.Front()
|
||||
return cursor.Value.(sdk.Tx)
|
||||
}
|
||||
|
||||
// Insert attempts to insert a Tx into the app-side mempool in O(log n) time,
|
||||
// returning an error if unsuccessful. Sender and nonce are derived from the
|
||||
// transaction's first signature.
|
||||
//
|
||||
// Transactions are unique by sender and nonce. Inserting a duplicate tx is an
|
||||
// O(log n) no-op.
|
||||
//
|
||||
// Inserting a duplicate tx with a different priority overwrites the existing tx,
|
||||
// changing the total order of the mempool.
|
||||
func (mp *PriorityNonceMempool) Insert(ctx context.Context, tx sdk.Tx) error {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
// if mp.maxTx > 0 && mp.CountTx() >= mp.maxTx {
|
||||
// return mempool.ErrMempoolTxMaxCapacity
|
||||
// } else
|
||||
if mp.maxTx < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sdkContext := sdk.UnwrapSDKContext(ctx)
|
||||
priority := sdkContext.Priority()
|
||||
|
||||
txInfo, err := extractTxInfo(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !mp.canInsert(txInfo.Sender) {
|
||||
return errors.Wrapf(errMempoolTooManyTxs, "[%d@%s]sender has too many txs in mempool", txInfo.Nonce, txInfo.Sender)
|
||||
}
|
||||
|
||||
// init sender index if not exists
|
||||
senderIndex, ok := mp.senderIndices[txInfo.Sender]
|
||||
if !ok {
|
||||
senderIndex = skiplist.New(skiplist.LessThanFunc(func(a, b any) int {
|
||||
return skiplist.Uint64.Compare(b.(txMeta).nonce, a.(txMeta).nonce)
|
||||
}))
|
||||
|
||||
// initialize sender index if not found
|
||||
mp.senderIndices[txInfo.Sender] = senderIndex
|
||||
}
|
||||
|
||||
newKey := txMeta{nonce: txInfo.Nonce, priority: priority, sender: txInfo.Sender}
|
||||
|
||||
// Since mp.priorityIndex is scored by priority, then sender, then nonce, a
|
||||
// changed priority will create a new key, so we must remove the old key and
|
||||
// re-insert it to avoid having the same tx with different priorityIndex indexed
|
||||
// twice in the mempool.
|
||||
//
|
||||
// This O(log n) remove operation is rare and only happens when a tx's priority
|
||||
// changes.
|
||||
|
||||
sk := txMeta{nonce: txInfo.Nonce, sender: txInfo.Sender}
|
||||
if oldScore, txExists := mp.scores[sk]; txExists {
|
||||
if oldScore.priority < priority {
|
||||
oldTx := senderIndex.Get(newKey).Value.(sdk.Tx)
|
||||
return mp.doTxReplace(ctx, newKey, oldScore, oldTx, tx)
|
||||
}
|
||||
return errors.Wrapf(errTxInMempool, "[%d@%s] tx already in mempool", txInfo.Nonce, txInfo.Sender)
|
||||
} else {
|
||||
mempoolSize := mp.priorityIndex.Len()
|
||||
if mempoolSize >= mp.maxTx {
|
||||
lowestPriority := mp.getLowestPriority()
|
||||
// find one to replace
|
||||
if lowestPriority > 0 && priority <= lowestPriority {
|
||||
return errors.Wrapf(errMempoolTxGasPriceTooLow, "[%d@%s]tx with priority %d is too low, current lowest priority is %d", newKey.nonce, newKey.sender, priority, lowestPriority)
|
||||
}
|
||||
|
||||
var maxIndexSize int
|
||||
var lowerPriority int64 = math.MaxInt64
|
||||
var selectedElement *skiplist.Element
|
||||
for sender, index := range mp.senderIndices {
|
||||
indexSize := index.Len()
|
||||
if sender == txInfo.Sender {
|
||||
continue
|
||||
}
|
||||
|
||||
if indexSize > 0 {
|
||||
tail := index.Back()
|
||||
if tail != nil {
|
||||
tailKey := tail.Key().(txMeta)
|
||||
if tailKey.priority < lowerPriority {
|
||||
lowerPriority = tailKey.priority
|
||||
maxIndexSize = indexSize
|
||||
selectedElement = tail
|
||||
} else if tailKey.priority == lowerPriority {
|
||||
if indexSize > maxIndexSize {
|
||||
maxIndexSize = indexSize
|
||||
selectedElement = tail
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if selectedElement != nil {
|
||||
key := selectedElement.Key().(txMeta)
|
||||
replacedTx, _ := mp.doRemove(key, true)
|
||||
|
||||
// insert new tx
|
||||
mp.doInsert(newKey, tx, true)
|
||||
|
||||
if mp.txReplacedCallback != nil && replacedTx != nil {
|
||||
sdkContext.Logger().Debug("txn replaced caused by full of mempool", "old", fmt.Sprintf("%d@%s", key.nonce, key.sender), "new", fmt.Sprintf("%d@%s", newKey.nonce, newKey.sender), "mempoolSize", mempoolSize)
|
||||
mp.txReplacedCallback(ctx,
|
||||
&TxInfo{Sender: key.sender, Nonce: key.nonce, Tx: replacedTx},
|
||||
&TxInfo{Sender: newKey.sender, Nonce: newKey.nonce, Tx: tx},
|
||||
)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(errMempoolIsFull, "%d@%s with priority%d", newKey.nonce, newKey.sender, newKey.priority)
|
||||
}
|
||||
} else {
|
||||
mp.doInsert(newKey, tx, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) doInsert(newKey txMeta, tx sdk.Tx, incrCnt bool) {
|
||||
senderIndex, ok := mp.senderIndices[newKey.sender]
|
||||
if !ok {
|
||||
senderIndex = skiplist.New(skiplist.LessThanFunc(func(a, b any) int {
|
||||
return skiplist.Uint64.Compare(b.(txMeta).nonce, a.(txMeta).nonce)
|
||||
}))
|
||||
|
||||
// initialize sender index if not found
|
||||
mp.senderIndices[newKey.sender] = senderIndex
|
||||
}
|
||||
|
||||
mp.priorityCounts[newKey.priority]++
|
||||
newKey.senderElement = senderIndex.Set(newKey, tx)
|
||||
|
||||
mp.scores[txMeta{nonce: newKey.nonce, sender: newKey.sender}] = txMeta{priority: newKey.priority}
|
||||
mp.priorityIndex.Set(newKey, tx)
|
||||
|
||||
if incrCnt {
|
||||
mp.incrSenderTxCnt(newKey.sender, newKey.nonce)
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) doRemove(oldKey txMeta, decrCnt bool) (sdk.Tx, error) {
|
||||
scoreKey := txMeta{nonce: oldKey.nonce, sender: oldKey.sender}
|
||||
score, ok := mp.scores[scoreKey]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(mempool.ErrTxNotFound, "%d@%s not found", oldKey.nonce, oldKey.sender)
|
||||
}
|
||||
tk := txMeta{nonce: oldKey.nonce, priority: score.priority, sender: oldKey.sender, weight: score.weight}
|
||||
|
||||
senderTxs, ok := mp.senderIndices[oldKey.sender]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%d@%s not found", oldKey.nonce, oldKey.sender)
|
||||
}
|
||||
|
||||
mp.priorityIndex.Remove(tk)
|
||||
removedElem := senderTxs.Remove(tk)
|
||||
delete(mp.scores, scoreKey)
|
||||
mp.priorityCounts[score.priority]--
|
||||
|
||||
if decrCnt {
|
||||
mp.decrSenderTxCnt(oldKey.sender, oldKey.nonce)
|
||||
}
|
||||
|
||||
if removedElem == nil {
|
||||
return nil, mempool.ErrTxNotFound
|
||||
}
|
||||
|
||||
return removedElem.Value.(sdk.Tx), nil
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) doTxReplace(ctx context.Context, newMate, oldMate txMeta, oldTx, newTx sdk.Tx) error {
|
||||
if mp.txReplacement != nil && !mp.txReplacement(oldMate.priority, newMate.priority, oldTx, newTx) {
|
||||
return fmt.Errorf(
|
||||
"tx doesn't fit the replacement rule, oldPriority: %v, newPriority: %v, oldTx: %v, newTx: %v",
|
||||
oldMate.priority,
|
||||
newMate.priority,
|
||||
oldTx,
|
||||
newTx,
|
||||
)
|
||||
}
|
||||
|
||||
e := mp.priorityIndex.Remove(txMeta{
|
||||
nonce: newMate.nonce,
|
||||
sender: newMate.sender,
|
||||
priority: oldMate.priority,
|
||||
weight: oldMate.weight,
|
||||
})
|
||||
replacedTx := e.Value.(sdk.Tx)
|
||||
mp.priorityCounts[oldMate.priority]--
|
||||
|
||||
mp.doInsert(newMate, newTx, false)
|
||||
|
||||
if mp.txReplacedCallback != nil && replacedTx != nil {
|
||||
sdkContext := sdk.UnwrapSDKContext(ctx)
|
||||
sdkContext.Logger().Debug("txn update", "txn", fmt.Sprintf("%d@%s", newMate.nonce, newMate.sender), "oldPriority", oldMate.priority, "newPriority", newMate.priority)
|
||||
mp.txReplacedCallback(ctx,
|
||||
&TxInfo{Sender: newMate.sender, Nonce: newMate.nonce, Tx: replacedTx},
|
||||
&TxInfo{Sender: newMate.sender, Nonce: newMate.nonce, Tx: newTx},
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *PriorityNonceIterator) iteratePriority() mempool.Iterator {
|
||||
// beginning of priority iteration
|
||||
if i.priorityNode == nil {
|
||||
i.priorityNode = i.mempool.priorityIndex.Front()
|
||||
} else {
|
||||
i.priorityNode = i.priorityNode.Next()
|
||||
}
|
||||
|
||||
// end of priority iteration
|
||||
if i.priorityNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
i.sender = i.priorityNode.Key().(txMeta).sender
|
||||
|
||||
nextPriorityNode := i.priorityNode.Next()
|
||||
if nextPriorityNode != nil {
|
||||
i.nextPriority = nextPriorityNode.Key().(txMeta).priority
|
||||
} else {
|
||||
i.nextPriority = math.MinInt64
|
||||
}
|
||||
|
||||
return i.Next()
|
||||
}
|
||||
|
||||
func (i *PriorityNonceIterator) Next() mempool.Iterator {
|
||||
if i.priorityNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cursor, ok := i.senderCursors[i.sender]
|
||||
if !ok {
|
||||
// beginning of sender iteration
|
||||
cursor = i.mempool.senderIndices[i.sender].Front()
|
||||
} else {
|
||||
// middle of sender iteration
|
||||
cursor = cursor.Next()
|
||||
}
|
||||
|
||||
// end of sender iteration
|
||||
if cursor == nil {
|
||||
return i.iteratePriority()
|
||||
}
|
||||
|
||||
key := cursor.Key().(txMeta)
|
||||
|
||||
// We've reached a transaction with a priority lower than the next highest
|
||||
// priority in the pool.
|
||||
if key.priority < i.nextPriority {
|
||||
return i.iteratePriority()
|
||||
} else if key.priority == i.nextPriority && i.priorityNode.Next() != nil {
|
||||
// Weight is incorporated into the priority index key only (not sender index)
|
||||
// so we must fetch it here from the scores map.
|
||||
weight := i.mempool.scores[txMeta{nonce: key.nonce, sender: key.sender}].weight
|
||||
if weight < i.priorityNode.Next().Key().(txMeta).weight {
|
||||
return i.iteratePriority()
|
||||
}
|
||||
}
|
||||
|
||||
i.senderCursors[i.sender] = cursor
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *PriorityNonceIterator) Tx() sdk.Tx {
|
||||
return i.senderCursors[i.sender].Value.(sdk.Tx)
|
||||
}
|
||||
|
||||
// Select returns a set of transactions from the mempool, ordered by priority
|
||||
// and sender-nonce in O(n) time. The passed in list of transactions are ignored.
|
||||
// This is a readonly operation, the mempool is not modified.
|
||||
//
|
||||
// The maxBytes parameter defines the maximum number of bytes of transactions to
|
||||
// return.
|
||||
//
|
||||
// NOTE: It is not safe to use this iterator while removing transactions from
|
||||
// the underlying mempool.
|
||||
func (mp *PriorityNonceMempool) Select(ctx context.Context, txs [][]byte) mempool.Iterator {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
return mp.doSelect(ctx, txs)
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) SelectBy(ctx context.Context, txs [][]byte, callback func(sdk.Tx) bool) {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
iter := mp.doSelect(ctx, txs)
|
||||
for iter != nil && callback(iter.Tx()) {
|
||||
iter = iter.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) doSelect(_ context.Context, _ [][]byte) mempool.Iterator {
|
||||
if mp.priorityIndex.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
mp.reorderPriorityTies()
|
||||
|
||||
iterator := &PriorityNonceIterator{
|
||||
mempool: mp,
|
||||
senderCursors: make(map[string]*skiplist.Element),
|
||||
}
|
||||
|
||||
return iterator.iteratePriority()
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) GetSenderUncommittedTxnCount(ctx context.Context, sender string) int {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
if _, exists := mp.counterBySender[sender]; exists {
|
||||
return mp.counterBySender[sender]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type reorderKey struct {
|
||||
deleteKey txMeta
|
||||
insertKey txMeta
|
||||
tx sdk.Tx
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) reorderPriorityTies() {
|
||||
node := mp.priorityIndex.Front()
|
||||
|
||||
var reordering []reorderKey
|
||||
for node != nil {
|
||||
key := node.Key().(txMeta)
|
||||
if mp.priorityCounts[key.priority] > 1 {
|
||||
newKey := key
|
||||
newKey.weight = senderWeight(key.senderElement)
|
||||
reordering = append(reordering, reorderKey{deleteKey: key, insertKey: newKey, tx: node.Value.(sdk.Tx)})
|
||||
}
|
||||
|
||||
node = node.Next()
|
||||
}
|
||||
|
||||
for _, k := range reordering {
|
||||
mp.priorityIndex.Remove(k.deleteKey)
|
||||
delete(mp.scores, txMeta{nonce: k.deleteKey.nonce, sender: k.deleteKey.sender})
|
||||
mp.priorityIndex.Set(k.insertKey, k.tx)
|
||||
mp.scores[txMeta{nonce: k.insertKey.nonce, sender: k.insertKey.sender}] = k.insertKey
|
||||
}
|
||||
}
|
||||
|
||||
// senderWeight returns the weight of a given tx (t) at senderCursor. Weight is
|
||||
// defined as the first (nonce-wise) same sender tx with a priority not equal to
|
||||
// t. It is used to resolve priority collisions, that is when 2 or more txs from
|
||||
// different senders have the same priority.
|
||||
func senderWeight(senderCursor *skiplist.Element) int64 {
|
||||
if senderCursor == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
weight := senderCursor.Key().(txMeta).priority
|
||||
senderCursor = senderCursor.Next()
|
||||
for senderCursor != nil {
|
||||
p := senderCursor.Key().(txMeta).priority
|
||||
if p != weight {
|
||||
weight = p
|
||||
}
|
||||
|
||||
senderCursor = senderCursor.Next()
|
||||
}
|
||||
|
||||
return weight
|
||||
}
|
||||
|
||||
// CountTx returns the number of transactions in the mempool.
|
||||
func (mp *PriorityNonceMempool) CountTx() int {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
return mp.priorityIndex.Len()
|
||||
}
|
||||
|
||||
// Remove removes a transaction from the mempool in O(log n) time, returning an
|
||||
// error if unsuccessful.
|
||||
func (mp *PriorityNonceMempool) Remove(tx sdk.Tx) error {
|
||||
mp.mtx.Lock()
|
||||
defer mp.mtx.Unlock()
|
||||
|
||||
txInfo, err := extractTxInfo(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mp.decrSenderTxCnt(txInfo.Sender, txInfo.Nonce)
|
||||
|
||||
scoreKey := txMeta{nonce: txInfo.Nonce, sender: txInfo.Sender}
|
||||
score, ok := mp.scores[scoreKey]
|
||||
if !ok {
|
||||
return mempool.ErrTxNotFound
|
||||
}
|
||||
tk := txMeta{nonce: txInfo.Nonce, priority: score.priority, sender: txInfo.Sender, weight: score.weight}
|
||||
|
||||
senderTxs, ok := mp.senderIndices[txInfo.Sender]
|
||||
if !ok {
|
||||
return fmt.Errorf("sender %s not found", txInfo.Sender)
|
||||
}
|
||||
|
||||
mp.priorityIndex.Remove(tk)
|
||||
senderTxs.Remove(tk)
|
||||
delete(mp.scores, scoreKey)
|
||||
mp.priorityCounts[score.priority]--
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) getLowestPriority() int64 {
|
||||
if mp.priorityIndex.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
min := int64(math.MaxInt64)
|
||||
for priority, count := range mp.priorityCounts {
|
||||
if count > 0 {
|
||||
if priority < min {
|
||||
min = priority
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) canInsert(sender string) bool {
|
||||
mp.senderTxCntLock.RLock()
|
||||
defer mp.senderTxCntLock.RUnlock()
|
||||
|
||||
if _, exists := mp.counterBySender[sender]; exists {
|
||||
return mp.counterBySender[sender] < MAX_TXS_PRE_SENDER_IN_MEMPOOL
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) incrSenderTxCnt(sender string, nonce uint64) error {
|
||||
mp.senderTxCntLock.Lock()
|
||||
defer mp.senderTxCntLock.Unlock()
|
||||
|
||||
existsKey := txMeta{nonce: nonce, sender: sender}
|
||||
if _, exists := mp.txRecord[existsKey]; !exists {
|
||||
mp.txRecord[existsKey] = struct{}{}
|
||||
|
||||
if _, exists := mp.counterBySender[sender]; !exists {
|
||||
mp.counterBySender[sender] = 1
|
||||
} else {
|
||||
if mp.counterBySender[sender] < MAX_TXS_PRE_SENDER_IN_MEMPOOL {
|
||||
mp.counterBySender[sender] += 1
|
||||
} else {
|
||||
return fmt.Errorf("tx sender has too many txs in mempool")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp *PriorityNonceMempool) decrSenderTxCnt(sender string, nonce uint64) {
|
||||
mp.senderTxCntLock.Lock()
|
||||
defer mp.senderTxCntLock.Unlock()
|
||||
|
||||
existsKey := txMeta{nonce: nonce, sender: sender}
|
||||
if _, exists := mp.txRecord[existsKey]; exists {
|
||||
delete(mp.txRecord, existsKey)
|
||||
|
||||
if _, exists := mp.counterBySender[sender]; exists {
|
||||
if mp.counterBySender[sender] > 1 {
|
||||
mp.counterBySender[sender] -= 1
|
||||
} else {
|
||||
delete(mp.counterBySender, sender)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func IsEmpty(mempool mempool.Mempool) error {
|
||||
mp := mempool.(*PriorityNonceMempool)
|
||||
if mp.priorityIndex.Len() != 0 {
|
||||
return fmt.Errorf("priorityIndex not empty")
|
||||
}
|
||||
|
||||
var countKeys []int64
|
||||
for k := range mp.priorityCounts {
|
||||
countKeys = append(countKeys, k)
|
||||
}
|
||||
|
||||
for _, k := range countKeys {
|
||||
if mp.priorityCounts[k] != 0 {
|
||||
return fmt.Errorf("priorityCounts not zero at %v, got %v", k, mp.priorityCounts[k])
|
||||
}
|
||||
}
|
||||
|
||||
var senderKeys []string
|
||||
for k := range mp.senderIndices {
|
||||
senderKeys = append(senderKeys, k)
|
||||
}
|
||||
|
||||
for _, k := range senderKeys {
|
||||
if mp.senderIndices[k].Len() != 0 {
|
||||
return fmt.Errorf("senderIndex not empty for sender %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TxInfo struct {
|
||||
Sender string
|
||||
Nonce uint64
|
||||
Tx sdk.Tx
|
||||
}
|
||||
|
||||
func extractTxInfo(tx sdk.Tx) (*TxInfo, error) {
|
||||
var sender string
|
||||
var nonce uint64
|
||||
|
||||
sigs, err := tx.(signing.SigVerifiableTx).GetSignaturesV2()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(sigs) == 0 {
|
||||
msgs := tx.GetMsgs()
|
||||
if len(msgs) != 1 {
|
||||
return nil, fmt.Errorf("tx must have at least one signer")
|
||||
}
|
||||
msgEthTx, ok := msgs[0].(*evmtypes.MsgEthereumTx)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("tx must have at least one signer")
|
||||
}
|
||||
ethTx := msgEthTx.AsTransaction()
|
||||
signer := gethtypes.NewEIP2930Signer(ethTx.ChainId())
|
||||
ethSender, err := signer.Sender(ethTx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tx must have at least one signer")
|
||||
}
|
||||
sender = sdk.AccAddress(ethSender.Bytes()).String()
|
||||
nonce = ethTx.Nonce()
|
||||
} else {
|
||||
sig := sigs[0]
|
||||
sender = sdk.AccAddress(sig.PubKey.Address()).String()
|
||||
nonce = sig.Sequence
|
||||
}
|
||||
|
||||
return &TxInfo{Sender: sender, Nonce: nonce, Tx: tx}, nil
|
||||
}
|
@ -48,9 +48,6 @@ func (suite *tallyHandlerSuite) SetupTest() {
|
||||
suite.tallier = NewTallyHandler(
|
||||
suite.app.GetGovKeeper(),
|
||||
stakingKeeper,
|
||||
suite.app.GetSavingsKeeper(),
|
||||
suite.app.GetEarnKeeper(),
|
||||
suite.app.GetLiquidKeeper(),
|
||||
suite.app.GetBankKeeper(),
|
||||
)
|
||||
}
|
||||
@ -133,7 +130,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
|
||||
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
|
||||
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
|
||||
})
|
||||
suite.Run("VetoedFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -148,7 +145,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
|
||||
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
|
||||
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndYesAboveThresholdPasses", func() {
|
||||
suite.SetupTest()
|
||||
@ -165,7 +162,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Truef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndYesBelowThresholdFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -182,7 +179,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("NotEnoughStakeFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -194,7 +191,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
suite.Run("UnvetoedAndAllAbstainedFails", func() {
|
||||
suite.SetupTest()
|
||||
@ -207,7 +204,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
|
||||
|
||||
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
|
||||
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
|
||||
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
|
||||
})
|
||||
|
||||
}
|
||||
|
@ -47,7 +47,9 @@ import (
|
||||
committeekeeper "github.com/0glabs/0g-chain/x/committee/keeper"
|
||||
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
|
||||
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
|
||||
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
|
||||
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
|
||||
wrappeda0gibasekeeper "github.com/0glabs/0g-chain/x/wrapped-a0gi-base/keeper"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -91,36 +93,45 @@ func NewTestAppFromSealed() TestApp {
|
||||
|
||||
encCfg := MakeEncodingConfig()
|
||||
|
||||
bApp := NewBaseApp(log.NewNopLogger(), db, encCfg, baseapp.SetChainID(TestChainId))
|
||||
app := NewApp(
|
||||
log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil,
|
||||
encCfg, DefaultOptions, baseapp.SetChainID(TestChainId),
|
||||
chaincfg.DefaultNodeHome, nil,
|
||||
encCfg, DefaultOptions, bApp,
|
||||
)
|
||||
return TestApp{App: *app}
|
||||
}
|
||||
|
||||
// nolint
|
||||
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
|
||||
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
|
||||
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
|
||||
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
|
||||
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
|
||||
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
|
||||
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
|
||||
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
|
||||
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
|
||||
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
|
||||
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
|
||||
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
|
||||
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
|
||||
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
|
||||
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
|
||||
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
|
||||
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
|
||||
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
|
||||
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
|
||||
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
|
||||
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
|
||||
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
|
||||
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
|
||||
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
|
||||
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
|
||||
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
|
||||
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
|
||||
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
|
||||
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
|
||||
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
|
||||
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
|
||||
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
|
||||
func (tApp TestApp) GetFeeMarketKeeper() *feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
|
||||
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
|
||||
func (tApp TestApp) GetPrecisebankKeeper() precisebankkeeper.Keeper { return tApp.precisebankKeeper }
|
||||
func (tApp TestApp) GetWrappedA0GIBaseKeeper() wrappeda0gibasekeeper.Keeper {
|
||||
return tApp.wrappeda0gibaseKeeper
|
||||
}
|
||||
|
||||
func (tApp TestApp) GetKVStoreKey(key string) *storetypes.KVStoreKey {
|
||||
return tApp.keys[key]
|
||||
}
|
||||
|
||||
func (tApp TestApp) GetBlockedMaccAddrs() map[string]bool {
|
||||
return tApp.loadBlockedMaccAddrs()
|
||||
}
|
||||
|
||||
// LegacyAmino returns the app's amino codec.
|
||||
func (app *App) LegacyAmino() *codec.LegacyAmino {
|
||||
return app.legacyAmino
|
||||
@ -451,21 +462,7 @@ func (tApp TestApp) CreateNewUnbondedValidator(ctx sdk.Context, valAddress sdk.V
|
||||
return err
|
||||
}
|
||||
|
||||
func (tApp TestApp) SetInflation(ctx sdk.Context, value sdk.Dec) {
|
||||
mk := tApp.GetMintKeeper()
|
||||
|
||||
mintParams := mk.GetParams(ctx)
|
||||
mintParams.InflationMax = sdk.ZeroDec()
|
||||
mintParams.InflationMin = sdk.ZeroDec()
|
||||
|
||||
if err := mintParams.Validate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
mk.SetParams(ctx, mintParams)
|
||||
}
|
||||
|
||||
// GeneratePrivKeyAddressPairsFromRand generates (deterministically) a total of n private keys and addresses.
|
||||
// GeneratePrivKeyAddressPairs generates (deterministically) a total of n private keys and addresses.
|
||||
func GeneratePrivKeyAddressPairs(n int) (keys []cryptotypes.PrivKey, addrs []sdk.AccAddress) {
|
||||
r := rand.New(rand.NewSource(12345)) // make the generation deterministic
|
||||
keys = make([]cryptotypes.PrivKey, n)
|
||||
|
@ -3,13 +3,16 @@ package app
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
wrappeda0gibasetypes "github.com/0glabs/0g-chain/x/wrapped-a0gi-base/types"
|
||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/module"
|
||||
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
|
||||
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||
)
|
||||
|
||||
const (
|
||||
UpgradeName_Testnet = "v0.3.1"
|
||||
UpgradeName_Testnet = "v0.5.0"
|
||||
)
|
||||
|
||||
// RegisterUpgradeHandlers registers the upgrade handlers for the app.
|
||||
@ -18,6 +21,24 @@ func (app App) RegisterUpgradeHandlers() {
|
||||
UpgradeName_Testnet,
|
||||
upgradeHandler(app, UpgradeName_Testnet),
|
||||
)
|
||||
|
||||
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
doUpgrade := upgradeInfo.Name == UpgradeName_Testnet
|
||||
|
||||
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
|
||||
storeUpgrades := storetypes.StoreUpgrades{
|
||||
Added: []string{
|
||||
wrappeda0gibasetypes.ModuleName,
|
||||
},
|
||||
}
|
||||
|
||||
// configure store loader that checks if version == upgradeHeight and applies store upgrades
|
||||
app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
|
||||
}
|
||||
}
|
||||
|
||||
// upgradeHandler returns an UpgradeHandler for the given upgrade parameters.
|
||||
@ -30,13 +51,21 @@ func upgradeHandler(
|
||||
plan upgradetypes.Plan,
|
||||
fromVM module.VersionMap,
|
||||
) (module.VersionMap, error) {
|
||||
app.Logger().Info(fmt.Sprintf("running %s upgrade handler", name))
|
||||
logger := app.Logger()
|
||||
logger.Info(fmt.Sprintf("running %s upgrade handler", name))
|
||||
|
||||
params := app.mintKeeper.GetParams(ctx)
|
||||
params.MintDenom = "ua0gi"
|
||||
app.mintKeeper.SetParams(ctx, params)
|
||||
// Run migrations for all modules and return new consensus version map.
|
||||
versionMap, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// run migrations for all modules and return new consensus version map
|
||||
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
|
||||
app.mintKeeper.InitGenesis(ctx, app.accountKeeper, &minttypes.GenesisState{
|
||||
BondDenom: "ua0gi",
|
||||
})
|
||||
|
||||
logger.Info("completed store migrations")
|
||||
|
||||
return versionMap, nil
|
||||
}
|
||||
}
|
||||
|
1
app/upgrades_test.go
Normal file
1
app/upgrades_test.go
Normal file
@ -0,0 +1 @@
|
||||
package app_test
|
45
build/lint.mk
Normal file
45
build/lint.mk
Normal file
@ -0,0 +1,45 @@
|
||||
################################################################################
|
||||
### Required Variables ###
|
||||
################################################################################
|
||||
ifndef DOCKER
|
||||
$(error DOCKER not set)
|
||||
endif
|
||||
|
||||
ifndef BUILD_DIR
|
||||
$(error BUILD_DIR not set)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
### Lint Settings ###
|
||||
################################################################################
|
||||
|
||||
LINT_FROM_REV ?= $(shell git merge-base origin/master HEAD)
|
||||
|
||||
GOLANGCI_VERSION ?= $(shell cat .golangci-version)
|
||||
GOLANGCI_IMAGE_TAG ?= golangci/golangci-lint:$(GOLANGCI_VERSION)
|
||||
|
||||
GOLANGCI_DIR ?= $(CURDIR)/$(BUILD_DIR)/.golangci-lint
|
||||
|
||||
GOLANGCI_CACHE_DIR ?= $(GOLANGCI_DIR)/$(GOLANGCI_VERSION)-cache
|
||||
GOLANGCI_MOD_CACHE_DIR ?= $(GOLANGCI_DIR)/go-mod
|
||||
|
||||
################################################################################
|
||||
### Lint Target ###
|
||||
################################################################################
|
||||
|
||||
.PHONY: lint
|
||||
lint: $(GOLANGCI_CACHE_DIR) $(GOLANGCI_MOD_CACHE_DIR)
|
||||
@echo "Running lint from rev $(LINT_FROM_REV), use LINT_FROM_REV var to override."
|
||||
$(DOCKER) run -t --rm \
|
||||
-v $(GOLANGCI_CACHE_DIR):/root/.cache \
|
||||
-v $(GOLANGCI_MOD_CACHE_DIR):/go/pkg/mod \
|
||||
-v $(CURDIR):/app \
|
||||
-w /app \
|
||||
$(GOLANGCI_IMAGE_TAG) \
|
||||
golangci-lint run -v --new-from-rev $(LINT_FROM_REV)
|
||||
|
||||
$(GOLANGCI_CACHE_DIR):
|
||||
@mkdir -p $@
|
||||
|
||||
$(GOLANGCI_MOD_CACHE_DIR):
|
||||
@mkdir -p $@
|
@ -56,6 +56,7 @@ proto-update-deps: check-rsync ## Update all third party proto files
|
||||
@mkdir -p client/docs
|
||||
@cp -f $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
|
||||
@cp -f $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
|
||||
@cp -f $(ETHERMINT_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/ethermint-swagger.yml
|
||||
|
||||
@mkdir -p $(COSMOS_PROTO_TYPES)
|
||||
@cp -f $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
|
||||
|
@ -1,76 +0,0 @@
|
||||
package chaincfg
|
||||
|
||||
import (
|
||||
"github.com/shopspring/decimal"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
Xmax, _ = sdk.NewDecFromStr("1.0") // upper limit on staked supply (as % of circ supply)
|
||||
Ymin, _ = sdk.NewDecFromStr("0.05") // target APY at upper limit
|
||||
|
||||
Xmin, _ = sdk.NewDecFromStr("0.2") // lower limit on staked supply (as % of circ supply)
|
||||
Ymax, _ = sdk.NewDecFromStr("0.15") // target APY at lower limit
|
||||
|
||||
decayRate, _ = sdk.NewDecFromStr("10")
|
||||
)
|
||||
|
||||
func decExp(x sdk.Dec) sdk.Dec {
|
||||
xDec := decimal.NewFromBigInt(x.BigInt(), -18)
|
||||
expDec, _ := xDec.ExpTaylor(18)
|
||||
expInt := expDec.Shift(18).BigInt()
|
||||
return sdk.NewDecFromBigIntWithPrec(expInt, 18)
|
||||
}
|
||||
|
||||
func NextInflationRate(ctx sdk.Context, minter minttypes.Minter, params minttypes.Params, bondedRatio sdk.Dec, circulatingRatio sdk.Dec) sdk.Dec {
|
||||
X := bondedRatio.Quo(circulatingRatio)
|
||||
|
||||
var apy sdk.Dec
|
||||
if X.LT(Xmin) {
|
||||
apy = Ymax
|
||||
} else {
|
||||
exp := decayRate.Neg().Mul(Xmax.Sub(Xmin))
|
||||
c := decExp(exp)
|
||||
d := Ymin.Sub(Ymax.Mul(c)).Quo(sdk.OneDec().Sub(c))
|
||||
expBonded := decayRate.Neg().Mul(X.Sub(Xmin))
|
||||
cBonded := decExp(expBonded)
|
||||
e := Ymax.Sub(d).Mul(cBonded)
|
||||
apy = d.Add(e)
|
||||
}
|
||||
|
||||
inflation := apy.Mul(bondedRatio)
|
||||
|
||||
// // The target annual inflation rate is recalculated for each previsions cycle. The
|
||||
// // inflation is also subject to a rate change (positive or negative) depending on
|
||||
// // the distance from the desired ratio (67%). The maximum rate change possible is
|
||||
// // defined to be 13% per year, however the annual inflation is capped as between
|
||||
// // 7% and 20%.
|
||||
|
||||
// // (1 - bondedRatio/GoalBonded) * InflationRateChange
|
||||
// inflationRateChangePerYear := sdk.OneDec().
|
||||
// Sub(bondedRatio.Quo(params.GoalBonded)).
|
||||
// Mul(params.InflationRateChange)
|
||||
// inflationRateChange := inflationRateChangePerYear.Quo(sdk.NewDec(int64(params.BlocksPerYear)))
|
||||
|
||||
// // adjust the new annual inflation for this next cycle
|
||||
// inflation := minter.Inflation.Add(inflationRateChange) // note inflationRateChange may be negative
|
||||
// if inflation.GT(params.InflationMax) {
|
||||
// inflation = params.InflationMax
|
||||
// }
|
||||
// if inflation.LT(params.InflationMin) {
|
||||
// inflation = params.InflationMin
|
||||
// }
|
||||
|
||||
ctx.Logger().Info(
|
||||
"nextInflationRate",
|
||||
"bondedRatio", bondedRatio,
|
||||
"circulatingRatio", circulatingRatio,
|
||||
"apy", apy,
|
||||
"inflation", inflation,
|
||||
"params", params,
|
||||
"minter", minter,
|
||||
)
|
||||
return inflation
|
||||
}
|
2
ci/env/kava-internal-testnet/KAVA.VERSION
vendored
2
ci/env/kava-internal-testnet/KAVA.VERSION
vendored
@ -1 +1 @@
|
||||
a967d2fdda299ec8e1e3b99fb55bd06ecfdb0469
|
||||
6862cde560c70cb82f7908e6cef22ca223465bd2
|
||||
|
124
ci/env/kava-internal-testnet/genesis.json
vendored
124
ci/env/kava-internal-testnet/genesis.json
vendored
@ -22,6 +22,8 @@
|
||||
},
|
||||
"app_hash": "",
|
||||
"app_state": {
|
||||
"06-solomachine": null,
|
||||
"07-tendermint": null,
|
||||
"auction": {
|
||||
"next_auction_id": "1",
|
||||
"params": {
|
||||
@ -505,6 +507,10 @@
|
||||
{
|
||||
"address": "kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
"amount": "500000000"
|
||||
},
|
||||
{
|
||||
"denom": "btcb",
|
||||
"amount": "200000000"
|
||||
@ -525,6 +531,10 @@
|
||||
"denom": "erc20/axelar/wbtc",
|
||||
"amount": "1000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"amount": "200000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/multichain/usdc",
|
||||
"amount": "1000000000000000000"
|
||||
@ -556,12 +566,20 @@
|
||||
{
|
||||
"denom": "usdx",
|
||||
"amount": "103000000000"
|
||||
},
|
||||
{
|
||||
"denom": "xrpb",
|
||||
"amount": "1000000000000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "kava1krh7k30pc9rteejpl2zycj0vau58y8c69xkzws",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
"amount": "100000000000000000"
|
||||
},
|
||||
{
|
||||
"denom": "btcb",
|
||||
"amount": "200000000"
|
||||
@ -582,6 +600,10 @@
|
||||
"denom": "erc20/axelar/wbtc",
|
||||
"amount": "1000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"amount": "200000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/tether/usdt",
|
||||
"amount": "100000000000"
|
||||
@ -601,6 +623,10 @@
|
||||
{
|
||||
"denom": "usdx",
|
||||
"amount": "103000000000"
|
||||
},
|
||||
{
|
||||
"denom": "xrpb",
|
||||
"amount": "103000000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -822,6 +848,7 @@
|
||||
"gov_denom": "ukava",
|
||||
"params": {
|
||||
"circuit_breaker": false,
|
||||
"liquidation_block_interval": 500,
|
||||
"collateral_params": [
|
||||
{
|
||||
"denom": "bnb",
|
||||
@ -993,8 +1020,7 @@
|
||||
"check_collateralization_index_count": "10",
|
||||
"conversion_factor": "6"
|
||||
}
|
||||
]
|
||||
,
|
||||
],
|
||||
"debt_auction_lot": "10000000000",
|
||||
"debt_auction_threshold": "100000000000",
|
||||
"debt_param": {
|
||||
@ -1241,7 +1267,15 @@
|
||||
"votes": []
|
||||
},
|
||||
"community": {
|
||||
"params": {}
|
||||
"params": {
|
||||
"upgrade_time_disable_inflation": "2023-11-01T00:00:00Z",
|
||||
"upgrade_time_set_staking_rewards_per_second": "744191",
|
||||
"staking_rewards_per_second": "0"
|
||||
},
|
||||
"staking_rewards_state": {
|
||||
"last_accumulation_time": "0001-01-01T00:00:00Z",
|
||||
"last_truncation_error": "0"
|
||||
}
|
||||
},
|
||||
"crisis": {
|
||||
"constant_fee": {
|
||||
@ -2067,6 +2101,25 @@
|
||||
}
|
||||
],
|
||||
"nested_types": []
|
||||
},
|
||||
{
|
||||
"msg_type_url": "/kava.committee.v1beta1.MsgVote",
|
||||
"msg_value_type_name": "MsgValueCommitteeVote",
|
||||
"value_types": [
|
||||
{
|
||||
"name": "proposal_id",
|
||||
"type": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "voter",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "vote_type",
|
||||
"type": "int32"
|
||||
}
|
||||
],
|
||||
"nested_types": []
|
||||
}
|
||||
],
|
||||
"allow_unprotected_txs": false
|
||||
@ -2229,22 +2282,27 @@
|
||||
"deposits": [],
|
||||
"votes": [],
|
||||
"proposals": [],
|
||||
"deposit_params": {
|
||||
"deposit_params": null,
|
||||
"voting_params": {
|
||||
"voting_period": "604800s"
|
||||
},
|
||||
"tally_params": null,
|
||||
"params": {
|
||||
"min_deposit": [
|
||||
{
|
||||
"denom": "ukava",
|
||||
"amount": "10000000"
|
||||
}
|
||||
],
|
||||
"max_deposit_period": "172800s"
|
||||
},
|
||||
"voting_params": {
|
||||
"voting_period": "600s"
|
||||
},
|
||||
"tally_params": {
|
||||
"max_deposit_period": "172800s",
|
||||
"voting_period": "604800s",
|
||||
"quorum": "0.334000000000000000",
|
||||
"threshold": "0.500000000000000000",
|
||||
"veto_threshold": "0.334000000000000000"
|
||||
"veto_threshold": "0.334000000000000000",
|
||||
"min_initial_deposit_ratio": "0.000000000000000000",
|
||||
"burn_vote_quorum": false,
|
||||
"burn_proposal_deposit_prevote": false,
|
||||
"burn_vote_veto": true
|
||||
}
|
||||
},
|
||||
"hard": {
|
||||
@ -2519,6 +2577,24 @@
|
||||
},
|
||||
"reserve_factor": "0.025000000000000000",
|
||||
"keeper_reward_percentage": "0.020000000000000000"
|
||||
},
|
||||
{
|
||||
"denom": "erc20/bitgo/wbtc",
|
||||
"borrow_limit": {
|
||||
"has_max_limit": true,
|
||||
"maximum_limit": "0.000000000000000000",
|
||||
"loan_to_value": "0.000000000000000000"
|
||||
},
|
||||
"spot_market_id": "btc:usd:30",
|
||||
"conversion_factor": "100000000",
|
||||
"interest_rate_model": {
|
||||
"base_rate_apy": "0.000000000000000000",
|
||||
"base_multiplier": "0.050000000000000000",
|
||||
"kink": "0.800000000000000000",
|
||||
"jump_multiplier": "5.000000000000000000"
|
||||
},
|
||||
"reserve_factor": "0.025000000000000000",
|
||||
"keeper_reward_percentage": "0.020000000000000000"
|
||||
}
|
||||
],
|
||||
"minimum_borrow_usd_value": "10.000000000000000000"
|
||||
@ -2734,6 +2810,18 @@
|
||||
"amount": "787"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"collateral_type": "erc20/bitgo/wbtc",
|
||||
"start": "2022-11-11T15:00:00Z",
|
||||
"end": "2025-11-11T15:00:00Z",
|
||||
"rewards_per_second": [
|
||||
{
|
||||
"denom": "ukava",
|
||||
"amount": "787"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"hard_borrow_reward_periods": [],
|
||||
@ -3170,6 +3258,16 @@
|
||||
}
|
||||
},
|
||||
"params": null,
|
||||
"packetfowardmiddleware": {
|
||||
"params": {
|
||||
"fee_percentage": "0.000000000000000000"
|
||||
},
|
||||
"in_flight_packets": {}
|
||||
},
|
||||
"precisebank": {
|
||||
"balances": [],
|
||||
"remainder": "0"
|
||||
},
|
||||
"pricefeed": {
|
||||
"params": {
|
||||
"markets": [
|
||||
@ -3643,6 +3741,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"router": {},
|
||||
"savings": {
|
||||
"params": {
|
||||
"supported_denoms": [
|
||||
@ -3814,7 +3913,8 @@
|
||||
"params": {
|
||||
"send_enabled": true,
|
||||
"receive_enabled": true
|
||||
}
|
||||
},
|
||||
"total_escrowed": []
|
||||
},
|
||||
"upgrade": {},
|
||||
"validatorvesting": null,
|
||||
|
3
ci/env/kava-protonet/genesis.json
vendored
3
ci/env/kava-protonet/genesis.json
vendored
@ -3006,6 +3006,9 @@
|
||||
},
|
||||
"in_flight_packets": {}
|
||||
},
|
||||
"precisebank": {
|
||||
"remainder": "0"
|
||||
},
|
||||
"pricefeed": {
|
||||
"params": {
|
||||
"markets": [
|
||||
|
@ -182,6 +182,23 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./out/swagger/kava/precisebank/v1/query.swagger.json",
|
||||
"tags": {
|
||||
"rename": {
|
||||
"Query": "Precisebank"
|
||||
}
|
||||
},
|
||||
"operationIds": {
|
||||
"rename": [
|
||||
{
|
||||
"type": "regex",
|
||||
"from": "(.*)",
|
||||
"to": "Precisebank$1"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./out/swagger/kava/pricefeed/v1beta1/query.swagger.json",
|
||||
"tags": {
|
||||
@ -295,6 +312,30 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./client/docs/ethermint-swagger.yml",
|
||||
"dereference": {
|
||||
"circular": "ignore"
|
||||
},
|
||||
"tags": {
|
||||
"rename": {
|
||||
"Query": "Ethermint"
|
||||
}
|
||||
},
|
||||
"operationIds": {
|
||||
"rename": [
|
||||
{
|
||||
"type": "regex",
|
||||
"from": "(.*)",
|
||||
"to": "Ethermint$1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"paths": {
|
||||
"exclude": [
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"url": "./client/docs/legacy-swagger.yml",
|
||||
"dereference": {
|
||||
|
4458
client/docs/ethermint-swagger.yml
Normal file
4458
client/docs/ethermint-swagger.yml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
381
client/erc20/MintableBurnableERC20.abi
Normal file
381
client/erc20/MintableBurnableERC20.abi
Normal file
@ -0,0 +1,381 @@
|
||||
[
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "symbol",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "decimals_",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "constructor"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Approval",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "previousOwner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "OwnershipTransferred",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": true,
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "Transfer",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "owner",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "allowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "approve",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "account",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "balanceOf",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "burn",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "decimals",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint8",
|
||||
"name": "",
|
||||
"type": "uint8"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "subtractedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "decreaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "spender",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "addedValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "increaseAllowance",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "mint",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "name",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "owner",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "renounceOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "symbol",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "string",
|
||||
"name": "",
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "totalSupply",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transfer",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "from",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "to",
|
||||
"type": "address"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "amount",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "transferFrom",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "bool",
|
||||
"name": "",
|
||||
"type": "bool"
|
||||
}
|
||||
],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "address",
|
||||
"name": "newOwner",
|
||||
"type": "address"
|
||||
}
|
||||
],
|
||||
"name": "transferOwnership",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
1
client/erc20/MintableBurnableERC20.bin
Normal file
1
client/erc20/MintableBurnableERC20.bin
Normal file
File diff suppressed because one or more lines are too long
1069
client/erc20/main.go
Normal file
1069
client/erc20/main.go
Normal file
File diff suppressed because one or more lines are too long
@ -7,9 +7,9 @@ import (
|
||||
"github.com/0glabs/0g-chain/client/grpc/util"
|
||||
)
|
||||
|
||||
// KavaGrpcClient enables the usage of kava grpc query clients and query utils
|
||||
type KavaGrpcClient struct {
|
||||
config KavaGrpcClientConfig
|
||||
// ZgChainGrpcClient enables the usage of kava grpc query clients and query utils
|
||||
type ZgChainGrpcClient struct {
|
||||
config ZgChainGrpcClientConfig
|
||||
|
||||
// Query clients for cosmos and kava modules
|
||||
Query *query.QueryClient
|
||||
@ -18,18 +18,18 @@ type KavaGrpcClient struct {
|
||||
*util.Util
|
||||
}
|
||||
|
||||
// KavaGrpcClientConfig is a configuration struct for a KavaGrpcClient
|
||||
type KavaGrpcClientConfig struct {
|
||||
// ZgChainGrpcClientConfig is a configuration struct for a ZgChainGrpcClient
|
||||
type ZgChainGrpcClientConfig struct {
|
||||
// note: add future config options here
|
||||
}
|
||||
|
||||
// NewClient creates a new KavaGrpcClient via a grpc url
|
||||
func NewClient(grpcUrl string) (*KavaGrpcClient, error) {
|
||||
// NewClient creates a new ZgChainGrpcClient via a grpc url
|
||||
func NewClient(grpcUrl string) (*ZgChainGrpcClient, error) {
|
||||
return NewClientWithConfig(grpcUrl, NewDefaultConfig())
|
||||
}
|
||||
|
||||
// NewClientWithConfig creates a new KavaGrpcClient via a grpc url and config
|
||||
func NewClientWithConfig(grpcUrl string, config KavaGrpcClientConfig) (*KavaGrpcClient, error) {
|
||||
// NewClientWithConfig creates a new ZgChainGrpcClient via a grpc url and config
|
||||
func NewClientWithConfig(grpcUrl string, config ZgChainGrpcClientConfig) (*ZgChainGrpcClient, error) {
|
||||
if grpcUrl == "" {
|
||||
return nil, errors.New("grpc url cannot be empty")
|
||||
}
|
||||
@ -37,7 +37,7 @@ func NewClientWithConfig(grpcUrl string, config KavaGrpcClientConfig) (*KavaGrpc
|
||||
if error != nil {
|
||||
return nil, error
|
||||
}
|
||||
client := &KavaGrpcClient{
|
||||
client := &ZgChainGrpcClient{
|
||||
Query: query,
|
||||
Util: util.NewUtil(query),
|
||||
config: config,
|
||||
@ -45,6 +45,6 @@ func NewClientWithConfig(grpcUrl string, config KavaGrpcClientConfig) (*KavaGrpc
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func NewDefaultConfig() KavaGrpcClientConfig {
|
||||
return KavaGrpcClientConfig{}
|
||||
func NewDefaultConfig() ZgChainGrpcClientConfig {
|
||||
return ZgChainGrpcClientConfig{}
|
||||
}
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -28,8 +30,20 @@ func newGrpcConnection(ctx context.Context, endpoint string) (*grpc.ClientConn,
|
||||
return nil, fmt.Errorf("unknown grpc url scheme: %s", grpcUrl.Scheme)
|
||||
}
|
||||
|
||||
// Ensure the encoding config is set up correctly with the query client
|
||||
// otherwise it will produce panics like:
|
||||
// invalid Go type math.Int for field ...
|
||||
encodingConfig := app.MakeEncodingConfig()
|
||||
protoCodec := codec.NewProtoCodec(encodingConfig.InterfaceRegistry)
|
||||
grpcCodec := protoCodec.GRPCCodec()
|
||||
|
||||
secureOpt := grpc.WithTransportCredentials(creds)
|
||||
grpcConn, err := grpc.DialContext(ctx, grpcUrl.Host, secureOpt)
|
||||
grpcConn, err := grpc.DialContext(
|
||||
ctx,
|
||||
grpcUrl.Host,
|
||||
secureOpt,
|
||||
grpc.WithDefaultCallOptions(grpc.ForceCodec(grpcCodec)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
committeetypes "github.com/0glabs/0g-chain/x/committee/types"
|
||||
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
|
||||
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
|
||||
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
|
||||
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
|
||||
)
|
||||
|
||||
@ -60,21 +61,12 @@ type QueryClient struct {
|
||||
|
||||
// kava module query clients
|
||||
|
||||
Auction auctiontypes.QueryClient
|
||||
Bep3 bep3types.QueryClient
|
||||
Cdp cdptypes.QueryClient
|
||||
Committee committeetypes.QueryClient
|
||||
Community communitytypes.QueryClient
|
||||
Earn earntypes.QueryClient
|
||||
Evmutil evmutiltypes.QueryClient
|
||||
Hard hardtypes.QueryClient
|
||||
Incentive incentivetypes.QueryClient
|
||||
Issuance issuancetypes.QueryClient
|
||||
Kavadist kavadisttypes.QueryClient
|
||||
Liquid liquidtypes.QueryClient
|
||||
Pricefeed pricefeedtypes.QueryClient
|
||||
Savings savingstypes.QueryClient
|
||||
Swap swaptypes.QueryClient
|
||||
Bep3 bep3types.QueryClient
|
||||
Committee committeetypes.QueryClient
|
||||
Evmutil evmutiltypes.QueryClient
|
||||
Issuance issuancetypes.QueryClient
|
||||
Pricefeed pricefeedtypes.QueryClient
|
||||
Precisebank precisebanktypes.QueryClient
|
||||
}
|
||||
|
||||
// NewQueryClient creates a new QueryClient and initializes all the module query clients
|
||||
@ -105,21 +97,12 @@ func NewQueryClient(grpcEndpoint string) (*QueryClient, error) {
|
||||
IbcClient: ibcclienttypes.NewQueryClient(conn),
|
||||
IbcTransfer: ibctransfertypes.NewQueryClient(conn),
|
||||
|
||||
Auction: auctiontypes.NewQueryClient(conn),
|
||||
Bep3: bep3types.NewQueryClient(conn),
|
||||
Cdp: cdptypes.NewQueryClient(conn),
|
||||
Committee: committeetypes.NewQueryClient(conn),
|
||||
Community: communitytypes.NewQueryClient(conn),
|
||||
Earn: earntypes.NewQueryClient(conn),
|
||||
Evmutil: evmutiltypes.NewQueryClient(conn),
|
||||
Hard: hardtypes.NewQueryClient(conn),
|
||||
Incentive: incentivetypes.NewQueryClient(conn),
|
||||
Issuance: issuancetypes.NewQueryClient(conn),
|
||||
Kavadist: kavadisttypes.NewQueryClient(conn),
|
||||
Liquid: liquidtypes.NewQueryClient(conn),
|
||||
Pricefeed: pricefeedtypes.NewQueryClient(conn),
|
||||
Savings: savingstypes.NewQueryClient(conn),
|
||||
Swap: swaptypes.NewQueryClient(conn),
|
||||
Bep3: bep3types.NewQueryClient(conn),
|
||||
Committee: committeetypes.NewQueryClient(conn),
|
||||
Evmutil: evmutiltypes.NewQueryClient(conn),
|
||||
Issuance: issuancetypes.NewQueryClient(conn),
|
||||
Pricefeed: pricefeedtypes.NewQueryClient(conn),
|
||||
Precisebank: precisebanktypes.NewQueryClient(conn),
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
@ -55,20 +55,10 @@ func TestNewQueryClient_ValidClient(t *testing.T) {
|
||||
require.NotNil(t, client.IbcTransfer)
|
||||
|
||||
// validate kava clients
|
||||
require.NotNil(t, client.Auction)
|
||||
require.NotNil(t, client.Bep3)
|
||||
require.NotNil(t, client.Cdp)
|
||||
require.NotNil(t, client.Committee)
|
||||
require.NotNil(t, client.Community)
|
||||
require.NotNil(t, client.Earn)
|
||||
require.NotNil(t, client.Evmutil)
|
||||
require.NotNil(t, client.Hard)
|
||||
require.NotNil(t, client.Incentive)
|
||||
require.NotNil(t, client.Issuance)
|
||||
require.NotNil(t, client.Kavadist)
|
||||
require.NotNil(t, client.Liquid)
|
||||
require.NotNil(t, client.Pricefeed)
|
||||
require.NotNil(t, client.Savings)
|
||||
require.NotNil(t, client.Swap)
|
||||
})
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
simappparams "cosmossdk.io/simapp/params"
|
||||
"github.com/0glabs/0g-chain/client/rest"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
|
@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Kava-Labs/opendb"
|
||||
cometbftdb "github.com/cometbft/cometbft-db"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
@ -18,6 +20,7 @@ import (
|
||||
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
|
||||
"github.com/cosmos/cosmos-sdk/store"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/signing"
|
||||
"github.com/cosmos/cosmos-sdk/x/crisis"
|
||||
ethermintflags "github.com/evmos/ethermint/server/flags"
|
||||
"github.com/spf13/cast"
|
||||
@ -25,6 +28,8 @@ import (
|
||||
|
||||
"github.com/0glabs/0g-chain/app"
|
||||
"github.com/0glabs/0g-chain/app/params"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
evmtypes "github.com/evmos/ethermint/x/evm/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -33,6 +38,8 @@ const (
|
||||
flagSkipLoadLatest = "skip-load-latest"
|
||||
)
|
||||
|
||||
var accountNonceOp app.AccountNonceOp
|
||||
|
||||
// appCreator holds functions used by the sdk server to control the 0g-chain app.
|
||||
// The methods implement types in cosmos-sdk/server/types
|
||||
type appCreator struct {
|
||||
@ -63,7 +70,7 @@ func (ac appCreator) newApp(
|
||||
|
||||
homeDir := cast.ToString(appOpts.Get(flags.FlagHome))
|
||||
snapshotDir := filepath.Join(homeDir, "data", "snapshots") // TODO can these directory names be imported from somewhere?
|
||||
snapshotDB, err := cometbftdb.NewDB("metadata", server.GetAppDBBackend(appOpts), snapshotDir)
|
||||
snapshotDB, err := opendb.OpenDB(appOpts, snapshotDir, "metadata", server.GetAppDBBackend(appOpts))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -106,18 +113,7 @@ func (ac appCreator) newApp(
|
||||
skipLoadLatest = cast.ToBool(appOpts.Get(flagSkipLoadLatest))
|
||||
}
|
||||
|
||||
return app.NewApp(
|
||||
logger, db, homeDir, traceStore, ac.encodingConfig,
|
||||
app.Options{
|
||||
SkipLoadLatest: skipLoadLatest,
|
||||
SkipUpgradeHeights: skipUpgradeHeights,
|
||||
SkipGenesisInvariants: cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)),
|
||||
InvariantCheckPeriod: cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
|
||||
MempoolEnableAuth: mempoolEnableAuth,
|
||||
MempoolAuthAddresses: mempoolAuthAddresses,
|
||||
EVMTrace: cast.ToString(appOpts.Get(ethermintflags.EVMTracer)),
|
||||
EVMMaxGasWanted: cast.ToUint64(appOpts.Get(ethermintflags.EVMMaxTxGasWanted)),
|
||||
},
|
||||
bApp := app.NewBaseApp(logger, db, ac.encodingConfig,
|
||||
baseapp.SetPruning(pruningOpts),
|
||||
baseapp.SetMinGasPrices(strings.Replace(cast.ToString(appOpts.Get(server.FlagMinGasPrices)), ";", ",", -1)),
|
||||
baseapp.SetHaltHeight(cast.ToUint64(appOpts.Get(server.FlagHaltHeight))),
|
||||
@ -131,7 +127,54 @@ func (ac appCreator) newApp(
|
||||
baseapp.SetIAVLDisableFastNode(cast.ToBool(iavlDisableFastNode)),
|
||||
baseapp.SetIAVLLazyLoading(cast.ToBool(appOpts.Get(server.FlagIAVLLazyLoading))),
|
||||
baseapp.SetChainID(chainID),
|
||||
baseapp.SetTxInfoExtracter(extractTxInfo),
|
||||
)
|
||||
|
||||
mempool := app.NewPriorityMempool(
|
||||
app.PriorityNonceWithMaxTx(fixMempoolSize(appOpts)),
|
||||
app.PriorityNonceWithTxReplacedCallback(func(ctx context.Context, oldTx, newTx *app.TxInfo) {
|
||||
if oldTx.Sender != newTx.Sender {
|
||||
sdkContext := sdk.UnwrapSDKContext(ctx)
|
||||
if accountNonceOp != nil {
|
||||
nonce := accountNonceOp.GetAccountNonce(sdkContext, oldTx.Sender)
|
||||
if nonce > 0 {
|
||||
accountNonceOp.SetAccountNonce(sdkContext, oldTx.Sender, nonce-1)
|
||||
sdkContext.Logger().Debug("rewind the nonce of the account", "account", oldTx.Sender, "from", nonce, "to", nonce-1)
|
||||
} else {
|
||||
sdkContext.Logger().Info("First meeting account", "account", oldTx.Sender)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sdkContext := sdk.UnwrapSDKContext(ctx)
|
||||
sdkContext.Logger().Info("tx replace", "account", oldTx.Sender, "nonce", oldTx.Nonce)
|
||||
}
|
||||
bApp.RegisterMempoolTxReplacedEvent(ctx, oldTx.Tx, newTx.Tx)
|
||||
}),
|
||||
)
|
||||
bApp.SetMempool(mempool)
|
||||
|
||||
bApp.SetTxEncoder(ac.encodingConfig.TxConfig.TxEncoder())
|
||||
abciProposalHandler := app.NewDefaultProposalHandler(mempool, bApp)
|
||||
bApp.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler())
|
||||
|
||||
newApp := app.NewApp(
|
||||
homeDir, traceStore, ac.encodingConfig,
|
||||
app.Options{
|
||||
SkipLoadLatest: skipLoadLatest,
|
||||
SkipUpgradeHeights: skipUpgradeHeights,
|
||||
SkipGenesisInvariants: cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)),
|
||||
InvariantCheckPeriod: cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
|
||||
MempoolEnableAuth: mempoolEnableAuth,
|
||||
MempoolAuthAddresses: mempoolAuthAddresses,
|
||||
EVMTrace: cast.ToString(appOpts.Get(ethermintflags.EVMTracer)),
|
||||
EVMMaxGasWanted: cast.ToUint64(appOpts.Get(ethermintflags.EVMMaxTxGasWanted)),
|
||||
},
|
||||
bApp,
|
||||
)
|
||||
|
||||
accountNonceOp = app.NewAccountNonceOp(newApp)
|
||||
|
||||
return newApp
|
||||
}
|
||||
|
||||
// appExport writes out an app's state to json.
|
||||
@ -156,13 +199,15 @@ func (ac appCreator) appExport(
|
||||
|
||||
var tempApp *app.App
|
||||
if height != -1 {
|
||||
tempApp = app.NewApp(logger, db, homePath, traceStore, ac.encodingConfig, options)
|
||||
bApp := app.NewBaseApp(logger, db, ac.encodingConfig)
|
||||
tempApp = app.NewApp(homePath, traceStore, ac.encodingConfig, options, bApp)
|
||||
|
||||
if err := tempApp.LoadHeight(height); err != nil {
|
||||
return servertypes.ExportedApp{}, err
|
||||
}
|
||||
} else {
|
||||
tempApp = app.NewApp(logger, db, homePath, traceStore, ac.encodingConfig, options)
|
||||
bApp := app.NewBaseApp(logger, db, ac.encodingConfig)
|
||||
tempApp = app.NewApp(homePath, traceStore, ac.encodingConfig, options, bApp)
|
||||
}
|
||||
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport)
|
||||
}
|
||||
@ -184,3 +229,72 @@ func accAddressesFromBech32(addresses ...string) ([]sdk.AccAddress, error) {
|
||||
}
|
||||
return decodedAddresses, nil
|
||||
}
|
||||
|
||||
var ErrMustHaveSigner error = errors.New("tx must have at least one signer")
|
||||
|
||||
func extractTxInfo(ctx sdk.Context, tx sdk.Tx) (*sdk.TxInfo, error) {
|
||||
sigs, err := tx.(signing.SigVerifiableTx).GetSignaturesV2()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sender string
|
||||
var nonce uint64
|
||||
var gasPrice uint64
|
||||
var gasLimit uint64
|
||||
var txType int32
|
||||
|
||||
if len(sigs) == 0 {
|
||||
txType = 1
|
||||
msgs := tx.GetMsgs()
|
||||
if len(msgs) != 1 {
|
||||
return nil, ErrMustHaveSigner
|
||||
}
|
||||
msgEthTx, ok := msgs[0].(*evmtypes.MsgEthereumTx)
|
||||
if !ok {
|
||||
return nil, ErrMustHaveSigner
|
||||
}
|
||||
ethTx := msgEthTx.AsTransaction()
|
||||
signer := gethtypes.NewEIP2930Signer(ethTx.ChainId())
|
||||
ethSender, err := signer.Sender(ethTx)
|
||||
if err != nil {
|
||||
return nil, ErrMustHaveSigner
|
||||
}
|
||||
sender = sdk.AccAddress(ethSender.Bytes()).String()
|
||||
nonce = ethTx.Nonce()
|
||||
gasPrice = ethTx.GasPrice().Uint64()
|
||||
gasLimit = ethTx.Gas()
|
||||
} else {
|
||||
sig := sigs[0]
|
||||
sender = sdk.AccAddress(sig.PubKey.Address()).String()
|
||||
nonce = sig.Sequence
|
||||
}
|
||||
|
||||
return &sdk.TxInfo{
|
||||
SignerAddress: sender,
|
||||
Nonce: nonce,
|
||||
GasLimit: gasLimit,
|
||||
GasPrice: gasPrice,
|
||||
TxType: txType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fixMempoolSize(appOpts servertypes.AppOptions) int {
|
||||
val1 := appOpts.Get("mempool.size")
|
||||
val2 := appOpts.Get(server.FlagMempoolMaxTxs)
|
||||
|
||||
if val1 != nil && val2 != nil {
|
||||
size1 := cast.ToInt(val1)
|
||||
size2 := cast.ToInt(val2)
|
||||
if size1 != size2 {
|
||||
panic("the value of mempool.size and mempool.max-txs are different")
|
||||
}
|
||||
return size1
|
||||
} else if val1 == nil && val2 == nil {
|
||||
panic("not found mempool size in config")
|
||||
} else if val1 == nil {
|
||||
return cast.ToInt(val2)
|
||||
} else { //if val2 == nil {
|
||||
return cast.ToInt(val1)
|
||||
}
|
||||
}
|
||||
|
@ -31,10 +31,7 @@ func newDataCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
printKeys(tree)
|
||||
hash, err := tree.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash := tree.Hash()
|
||||
fmt.Printf("Hash: %X\n", hash)
|
||||
fmt.Printf("Size: %X\n", tree.Size())
|
||||
|
||||
|
@ -28,11 +28,7 @@ func newHashCmd(opts ethermintserver.StartOptions) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := tree.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Hash: %X\n", hash)
|
||||
fmt.Printf("Hash: %X\n", tree.Hash())
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -2,15 +2,19 @@ package iavlviewer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
"cosmossdk.io/log"
|
||||
dbm "github.com/cosmos/cosmos-db"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
"github.com/cosmos/cosmos-sdk/store/wrapper"
|
||||
ethermintserver "github.com/evmos/ethermint/server"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/cosmos/iavl"
|
||||
iavldb "github.com/cosmos/iavl/db"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -54,7 +58,9 @@ func openPrefixTree(opts ethermintserver.StartOptions, cmd *cobra.Command, prefi
|
||||
}
|
||||
}()
|
||||
|
||||
tree, err := readTree(db, version, []byte(prefix))
|
||||
cosmosdb := wrapper.NewCosmosDB(db)
|
||||
|
||||
tree, err := readTree(cosmosdb, version, []byte(prefix))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read tree with prefix %s: %s", prefix, err)
|
||||
}
|
||||
@ -69,10 +75,7 @@ func readTree(db dbm.DB, version int, prefix []byte) (*iavl.MutableTree, error)
|
||||
db = dbm.NewPrefixDB(db, prefix)
|
||||
}
|
||||
|
||||
tree, err := iavl.NewMutableTree(db, DefaultCacheSize, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tree := iavl.NewMutableTree(iavldb.NewWrapper(db), DefaultCacheSize, false, log.NewLogger(os.Stdout))
|
||||
ver, err := tree.LoadVersion(int64(version))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -14,14 +14,14 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/0glabs/0g-chain/cmd/opendb"
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/cometbft/cometbft/libs/log"
|
||||
"github.com/Kava-Labs/opendb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -3,16 +3,18 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
tmcfg "github.com/cometbft/cometbft/config"
|
||||
tmcli "github.com/cometbft/cometbft/libs/cli"
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/client/config"
|
||||
"github.com/cosmos/cosmos-sdk/client/debug"
|
||||
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keyring"
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
|
||||
tmcfg "github.com/cometbft/cometbft/config"
|
||||
tmcli "github.com/cometbft/cometbft/libs/cli"
|
||||
servertypes "github.com/cosmos/cosmos-sdk/server/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/genutil"
|
||||
@ -29,8 +31,8 @@ import (
|
||||
"github.com/0glabs/0g-chain/chaincfg"
|
||||
"github.com/0glabs/0g-chain/cmd/0gchaind/iavlviewer"
|
||||
"github.com/0glabs/0g-chain/cmd/0gchaind/rocksdb"
|
||||
"github.com/0glabs/0g-chain/cmd/opendb"
|
||||
"github.com/0glabs/0g-chain/crypto/vrf"
|
||||
"github.com/Kava-Labs/opendb"
|
||||
)
|
||||
|
||||
func customKeyringOptions() keyring.Option {
|
||||
@ -52,7 +54,7 @@ func NewRootCmd() *cobra.Command {
|
||||
WithAccountRetriever(types.AccountRetriever{}).
|
||||
WithBroadcastMode(flags.FlagBroadcastMode).
|
||||
WithHomeDir(chaincfg.DefaultNodeHome).
|
||||
WithKeyringOptions(customKeyringOptions()).
|
||||
WithKeyringOptions(hd.EthSecp256k1Option()).
|
||||
WithViper(chaincfg.EnvPrefix)
|
||||
rootCmd := &cobra.Command{
|
||||
Use: chaincfg.AppName,
|
||||
@ -90,7 +92,14 @@ func NewRootCmd() *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// addSubCmds registers all the sub commands used by 0g-chain.
|
||||
// dbOpener is a function to open `application.db`, potentially with customized options.
|
||||
// dbOpener sets dataDir to "data", dbName to "application" and calls generic OpenDB function.
|
||||
func dbOpener(opts servertypes.AppOptions, rootDir string, backend dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(rootDir, "data")
|
||||
return opendb.OpenDB(opts, dataDir, "application", backend)
|
||||
}
|
||||
|
||||
// addSubCmds registers all the sub commands used by kava.
|
||||
func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, defaultNodeHome string) {
|
||||
gentxModule, ok := app.ModuleBasics[genutiltypes.ModuleName].(genutil.AppModuleBasic)
|
||||
if !ok {
|
||||
@ -120,7 +129,7 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
|
||||
opts := ethermintserver.StartOptions{
|
||||
AppCreator: ac.newApp,
|
||||
DefaultNodeHome: chaincfg.DefaultNodeHome,
|
||||
DBOpener: opendb.OpenDB,
|
||||
DBOpener: dbOpener,
|
||||
}
|
||||
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
|
||||
ethermintserver.AddCommands(
|
||||
|
@ -219,9 +219,12 @@ func shardApplicationDb(multistore *rootmulti.Store, startBlock, endBlock int64)
|
||||
}
|
||||
|
||||
if len(pruneHeights) > 0 {
|
||||
// prune application state
|
||||
fmt.Printf("pruning application state to height %d\n", startBlock)
|
||||
if err := multistore.PruneStores(true, pruneHeights); err != nil {
|
||||
return fmt.Errorf("failed to prune application state: %s", err)
|
||||
for _, pruneHeight := range pruneHeights {
|
||||
if err := multistore.PruneStores(pruneHeight); err != nil {
|
||||
return fmt.Errorf("failed to prune application state: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,499 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
|
||||
var rocksdbMetrics *Metrics
|
||||
|
||||
// Metrics contains all rocksdb metrics which will be reported to prometheus
|
||||
type Metrics struct {
|
||||
// Keys
|
||||
NumberKeysWritten metrics.Gauge
|
||||
NumberKeysRead metrics.Gauge
|
||||
NumberKeysUpdated metrics.Gauge
|
||||
EstimateNumKeys metrics.Gauge
|
||||
|
||||
// Files
|
||||
NumberFileOpens metrics.Gauge
|
||||
NumberFileErrors metrics.Gauge
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage metrics.Gauge
|
||||
EstimateTableReadersMem metrics.Gauge
|
||||
CurSizeAllMemTables metrics.Gauge
|
||||
BlockCachePinnedUsage metrics.Gauge
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss metrics.Gauge
|
||||
BlockCacheHit metrics.Gauge
|
||||
BlockCacheAdd metrics.Gauge
|
||||
BlockCacheAddFailures metrics.Gauge
|
||||
|
||||
// Detailed Cache
|
||||
BlockCacheIndexMiss metrics.Gauge
|
||||
BlockCacheIndexHit metrics.Gauge
|
||||
BlockCacheIndexBytesInsert metrics.Gauge
|
||||
|
||||
BlockCacheFilterMiss metrics.Gauge
|
||||
BlockCacheFilterHit metrics.Gauge
|
||||
BlockCacheFilterBytesInsert metrics.Gauge
|
||||
|
||||
BlockCacheDataMiss metrics.Gauge
|
||||
BlockCacheDataHit metrics.Gauge
|
||||
BlockCacheDataBytesInsert metrics.Gauge
|
||||
|
||||
// Latency
|
||||
DBGetMicrosP50 metrics.Gauge
|
||||
DBGetMicrosP95 metrics.Gauge
|
||||
DBGetMicrosP99 metrics.Gauge
|
||||
DBGetMicrosP100 metrics.Gauge
|
||||
DBGetMicrosCount metrics.Gauge
|
||||
|
||||
DBWriteMicrosP50 metrics.Gauge
|
||||
DBWriteMicrosP95 metrics.Gauge
|
||||
DBWriteMicrosP99 metrics.Gauge
|
||||
DBWriteMicrosP100 metrics.Gauge
|
||||
DBWriteMicrosCount metrics.Gauge
|
||||
|
||||
// Write Stall
|
||||
StallMicros metrics.Gauge
|
||||
|
||||
DBWriteStallP50 metrics.Gauge
|
||||
DBWriteStallP95 metrics.Gauge
|
||||
DBWriteStallP99 metrics.Gauge
|
||||
DBWriteStallP100 metrics.Gauge
|
||||
DBWriteStallCount metrics.Gauge
|
||||
DBWriteStallSum metrics.Gauge
|
||||
|
||||
// Bloom Filter
|
||||
BloomFilterUseful metrics.Gauge
|
||||
BloomFilterFullPositive metrics.Gauge
|
||||
BloomFilterFullTruePositive metrics.Gauge
|
||||
|
||||
// LSM Tree Stats
|
||||
LastLevelReadBytes metrics.Gauge
|
||||
LastLevelReadCount metrics.Gauge
|
||||
NonLastLevelReadBytes metrics.Gauge
|
||||
NonLastLevelReadCount metrics.Gauge
|
||||
|
||||
GetHitL0 metrics.Gauge
|
||||
GetHitL1 metrics.Gauge
|
||||
GetHitL2AndUp metrics.Gauge
|
||||
}
|
||||
|
||||
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
|
||||
func registerMetrics() {
|
||||
if rocksdbMetrics != nil {
|
||||
// metrics already registered
|
||||
return
|
||||
}
|
||||
|
||||
labels := make([]string, 0)
|
||||
rocksdbMetrics = &Metrics{
|
||||
// Keys
|
||||
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_written",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_read",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "number_keys_updated",
|
||||
Help: "",
|
||||
}, labels),
|
||||
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "key",
|
||||
Name: "estimate_num_keys",
|
||||
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
|
||||
}, labels),
|
||||
|
||||
// Files
|
||||
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_opens",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "file",
|
||||
Name: "number_file_errors",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Memory
|
||||
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_usage",
|
||||
Help: "memory size for the entries residing in block cache",
|
||||
}, labels),
|
||||
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "estimate_table_readers_mem",
|
||||
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
|
||||
}, labels),
|
||||
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "cur_size_all_mem_tables",
|
||||
Help: "approximate size of active and unflushed immutable memtables (bytes)",
|
||||
}, labels),
|
||||
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "memory",
|
||||
Name: "block_cache_pinned_usage",
|
||||
Help: "returns the memory size for the entries being pinned",
|
||||
}, labels),
|
||||
|
||||
// Cache
|
||||
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_miss",
|
||||
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
|
||||
}, labels),
|
||||
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_hit",
|
||||
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
|
||||
}, labels),
|
||||
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add",
|
||||
Help: "number of blocks added to block cache",
|
||||
}, labels),
|
||||
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "cache",
|
||||
Name: "block_cache_add_failures",
|
||||
Help: "number of failures when adding blocks to block cache",
|
||||
}, labels),
|
||||
|
||||
// Detailed Cache
|
||||
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_index_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_filter_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_miss",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_hit",
|
||||
Help: "",
|
||||
}, labels),
|
||||
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "detailed_cache",
|
||||
Name: "block_cache_data_bytes_insert",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Latency
|
||||
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_get_micros_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "latency",
|
||||
Name: "db_write_micros_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Write Stall
|
||||
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "stall_micros",
|
||||
Help: "Writer has to wait for compaction or flush to finish.",
|
||||
}, labels),
|
||||
|
||||
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p50",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p95",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p99",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_p100",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "stall",
|
||||
Name: "db_write_stall_sum",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
// Bloom Filter
|
||||
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_useful",
|
||||
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
|
||||
}, labels),
|
||||
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_full_positive",
|
||||
Help: "number of times bloom FullFilter has not avoided the reads.",
|
||||
}, labels),
|
||||
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "filter",
|
||||
Name: "bloom_filter_full_true_positive",
|
||||
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
|
||||
}, labels),
|
||||
|
||||
// LSM Tree Stats
|
||||
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "last_level_read_bytes",
|
||||
Help: "",
|
||||
}, labels),
|
||||
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "last_level_read_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "non_last_level_read_bytes",
|
||||
Help: "",
|
||||
}, labels),
|
||||
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "non_last_level_read_count",
|
||||
Help: "",
|
||||
}, labels),
|
||||
|
||||
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l0",
|
||||
Help: "number of Get() queries served by L0",
|
||||
}, labels),
|
||||
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l1",
|
||||
Help: "number of Get() queries served by L1",
|
||||
}, labels),
|
||||
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: "rocksdb",
|
||||
Subsystem: "lsm",
|
||||
Name: "get_hit_l2_and_up",
|
||||
Help: "number of Get() queries served by L2 and up",
|
||||
}, labels),
|
||||
}
|
||||
}
|
||||
|
||||
// report reports metrics to prometheus based on rocksdb props and stats
|
||||
func (m *Metrics) report(props *properties, stats *stats) {
|
||||
// Keys
|
||||
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
|
||||
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
|
||||
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
|
||||
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
|
||||
|
||||
// Files
|
||||
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
|
||||
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
|
||||
|
||||
// Memory
|
||||
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
|
||||
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
|
||||
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
|
||||
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
|
||||
|
||||
// Cache
|
||||
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
|
||||
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
|
||||
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
|
||||
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
|
||||
|
||||
// Detailed Cache
|
||||
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
|
||||
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
|
||||
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
|
||||
|
||||
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
|
||||
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
|
||||
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
|
||||
|
||||
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
|
||||
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
|
||||
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
|
||||
|
||||
// Latency
|
||||
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
|
||||
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
|
||||
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
|
||||
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
|
||||
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
|
||||
|
||||
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
|
||||
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
|
||||
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
|
||||
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
|
||||
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
|
||||
|
||||
// Write Stall
|
||||
m.StallMicros.Set(float64(stats.StallMicros))
|
||||
|
||||
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
|
||||
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
|
||||
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
|
||||
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
|
||||
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
|
||||
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
|
||||
|
||||
// Bloom Filter
|
||||
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
|
||||
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
|
||||
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
|
||||
|
||||
// LSM Tree Stats
|
||||
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
|
||||
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
|
||||
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
|
||||
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
|
||||
|
||||
m.GetHitL0.Set(float64(stats.GetHitL0))
|
||||
m.GetHitL1.Set(float64(stats.GetHitL1))
|
||||
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
//go:build !rocksdb
|
||||
// +build !rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
"github.com/cosmos/cosmos-sdk/server/types"
|
||||
)
|
||||
|
||||
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
|
||||
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
|
||||
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(home, "data")
|
||||
return dbm.NewDB("application", backendType, dataDir)
|
||||
}
|
@ -1,398 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
// Copyright 2023 Kava Labs, Inc.
|
||||
// Copyright 2023 Cronos Labs, Inc.
|
||||
//
|
||||
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbm "github.com/cometbft/cometbft-db"
|
||||
"github.com/cosmos/cosmos-sdk/server/types"
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
|
||||
|
||||
const (
|
||||
// default tm-db block cache size for RocksDB
|
||||
defaultBlockCacheSize = 1 << 30
|
||||
|
||||
DefaultColumnFamilyName = "default"
|
||||
|
||||
enableMetricsOptName = "rocksdb.enable-metrics"
|
||||
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
|
||||
defaultReportMetricsIntervalSecs = 15
|
||||
|
||||
maxOpenFilesDBOptName = "rocksdb.max-open-files"
|
||||
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
|
||||
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
|
||||
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
|
||||
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
|
||||
useFsyncDBOptName = "rocksdb.use_fsync"
|
||||
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
|
||||
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
|
||||
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
|
||||
|
||||
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
|
||||
numLevelsCFOptName = "rocksdb.num-levels"
|
||||
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
|
||||
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
|
||||
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
|
||||
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
|
||||
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
|
||||
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
|
||||
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
|
||||
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
|
||||
|
||||
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
|
||||
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
|
||||
blockSizeBBTOOptName = "rocksdb.block_size"
|
||||
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
|
||||
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
|
||||
formatVersionBBTOOptName = "rocksdb.format_version"
|
||||
|
||||
asyncIOReadOptName = "rocksdb.read-async-io"
|
||||
)
|
||||
|
||||
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
|
||||
dataDir := filepath.Join(home, "data")
|
||||
if backendType == dbm.RocksDBBackend {
|
||||
return openRocksdb(dataDir, appOpts)
|
||||
}
|
||||
|
||||
return dbm.NewDB("application", backendType, dataDir)
|
||||
}
|
||||
|
||||
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
|
||||
// option will be overridden only in case if it explicitly specified in appOpts
|
||||
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
|
||||
optionsPath := filepath.Join(dir, "application.db")
|
||||
dbOpts, cfOpts, err := LoadLatestOptions(optionsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// customize rocksdb options
|
||||
bbtoOpts := bbtoFromAppOpts(appOpts)
|
||||
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
|
||||
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
|
||||
dbOpts = overrideDBOpts(dbOpts, appOpts)
|
||||
cfOpts = overrideCFOpts(cfOpts, appOpts)
|
||||
readOpts := readOptsFromAppOpts(appOpts)
|
||||
|
||||
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
|
||||
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
|
||||
if reportMetricsIntervalSecs == 0 {
|
||||
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
|
||||
}
|
||||
|
||||
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
|
||||
}
|
||||
|
||||
// LoadLatestOptions loads and returns database and column family options
|
||||
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
|
||||
// if database exists it should have only one column family named default
|
||||
func LoadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
|
||||
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
|
||||
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
|
||||
return newDefaultOptions(), newDefaultOptions(), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfNames := latestOpts.ColumnFamilyNames()
|
||||
cfOpts := latestOpts.ColumnFamilyOpts()
|
||||
// db should have only one column family named default
|
||||
ok := len(cfNames) == 1 && cfNames[0] == DefaultColumnFamilyName
|
||||
if !ok {
|
||||
return nil, nil, ErrUnexpectedConfiguration
|
||||
}
|
||||
|
||||
// return db and cf opts
|
||||
return latestOpts.Options(), &cfOpts[0], nil
|
||||
}
|
||||
|
||||
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
|
||||
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
|
||||
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
|
||||
if maxOpenFiles != nil {
|
||||
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
|
||||
}
|
||||
|
||||
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
|
||||
if maxFileOpeningThreads != nil {
|
||||
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
|
||||
}
|
||||
|
||||
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
|
||||
if tableCacheNumshardbits != nil {
|
||||
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
|
||||
}
|
||||
|
||||
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
|
||||
if allowMMAPWrites != nil {
|
||||
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
|
||||
}
|
||||
|
||||
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
|
||||
if allowMMAPReads != nil {
|
||||
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
|
||||
}
|
||||
|
||||
useFsync := appOpts.Get(useFsyncDBOptName)
|
||||
if useFsync != nil {
|
||||
dbOpts.SetUseFsync(cast.ToBool(useFsync))
|
||||
}
|
||||
|
||||
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
|
||||
if useAdaptiveMutex != nil {
|
||||
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
|
||||
}
|
||||
|
||||
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
|
||||
if bytesPerSync != nil {
|
||||
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
|
||||
}
|
||||
|
||||
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
|
||||
if maxBackgroundJobs != nil {
|
||||
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
|
||||
}
|
||||
|
||||
return dbOpts
|
||||
}
|
||||
|
||||
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
|
||||
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
|
||||
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
|
||||
if writeBufferSize != nil {
|
||||
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
|
||||
}
|
||||
|
||||
numLevels := appOpts.Get(numLevelsCFOptName)
|
||||
if numLevels != nil {
|
||||
cfOpts.SetNumLevels(cast.ToInt(numLevels))
|
||||
}
|
||||
|
||||
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
|
||||
if maxWriteBufferNumber != nil {
|
||||
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
|
||||
}
|
||||
|
||||
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
|
||||
if minWriteBufferNumberToMerge != nil {
|
||||
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
|
||||
}
|
||||
|
||||
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
|
||||
if maxBytesForLevelBase != nil {
|
||||
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
|
||||
}
|
||||
|
||||
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
|
||||
if maxBytesForLevelMultiplier != nil {
|
||||
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
|
||||
}
|
||||
|
||||
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
|
||||
if targetFileSizeBase != nil {
|
||||
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
|
||||
}
|
||||
|
||||
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
|
||||
if targetFileSizeMultiplier != nil {
|
||||
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
|
||||
}
|
||||
|
||||
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
|
||||
if level0FileNumCompactionTrigger != nil {
|
||||
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
|
||||
}
|
||||
|
||||
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
|
||||
if level0SlowdownWritesTrigger != nil {
|
||||
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
|
||||
}
|
||||
|
||||
return cfOpts
|
||||
}
|
||||
|
||||
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
|
||||
ro := grocksdb.NewDefaultReadOptions()
|
||||
asyncIO := appOpts.Get(asyncIOReadOptName)
|
||||
if asyncIO != nil {
|
||||
ro.SetAsyncIO(cast.ToBool(asyncIO))
|
||||
}
|
||||
|
||||
return ro
|
||||
}
|
||||
|
||||
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
|
||||
bbto := defaultBBTO()
|
||||
|
||||
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
|
||||
if blockCacheSize != nil {
|
||||
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
|
||||
bbto.SetBlockCache(cache)
|
||||
}
|
||||
|
||||
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
|
||||
if bitsPerKey != nil {
|
||||
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
|
||||
bbto.SetFilterPolicy(filter)
|
||||
}
|
||||
|
||||
blockSize := appOpts.Get(blockSizeBBTOOptName)
|
||||
if blockSize != nil {
|
||||
bbto.SetBlockSize(cast.ToInt(blockSize))
|
||||
}
|
||||
|
||||
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
|
||||
if cacheIndexAndFilterBlocks != nil {
|
||||
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
|
||||
}
|
||||
|
||||
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
|
||||
if pinL0FilterAndIndexBlocksInCache != nil {
|
||||
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
|
||||
}
|
||||
|
||||
formatVersion := appOpts.Get(formatVersionBBTOOptName)
|
||||
if formatVersion != nil {
|
||||
bbto.SetFormatVersion(cast.ToInt(formatVersion))
|
||||
}
|
||||
|
||||
return bbto
|
||||
}
|
||||
|
||||
// newRocksDBWithOptions opens rocksdb with provided database and column family options
|
||||
// newRocksDBWithOptions expects that db has only one column family named default
|
||||
func newRocksDBWithOptions(
|
||||
name string,
|
||||
dir string,
|
||||
dbOpts *grocksdb.Options,
|
||||
cfOpts *grocksdb.Options,
|
||||
readOpts *grocksdb.ReadOptions,
|
||||
enableMetrics bool,
|
||||
reportMetricsIntervalSecs int64,
|
||||
) (*dbm.RocksDB, error) {
|
||||
dbPath := filepath.Join(dir, name+".db")
|
||||
|
||||
// Ensure path exists
|
||||
if err := os.MkdirAll(dbPath, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create db path: %w", err)
|
||||
}
|
||||
|
||||
// EnableStatistics adds overhead so shouldn't be enabled in production
|
||||
if enableMetrics {
|
||||
dbOpts.EnableStatistics()
|
||||
}
|
||||
|
||||
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{DefaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if enableMetrics {
|
||||
registerMetrics()
|
||||
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
|
||||
}
|
||||
|
||||
wo := grocksdb.NewDefaultWriteOptions()
|
||||
woSync := grocksdb.NewDefaultWriteOptions()
|
||||
woSync.SetSync(true)
|
||||
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
|
||||
}
|
||||
|
||||
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
|
||||
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
|
||||
func newDefaultOptions() *grocksdb.Options {
|
||||
// default rocksdb option, good enough for most cases, including heavy workloads.
|
||||
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
|
||||
// compression: snappy as default, need to -lsnappy to enable.
|
||||
bbto := defaultBBTO()
|
||||
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetBlockBasedTableFactory(bbto)
|
||||
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
|
||||
opts.SetMaxOpenFiles(4096)
|
||||
opts.SetCreateIfMissing(true)
|
||||
opts.IncreaseParallelism(runtime.NumCPU())
|
||||
// 1.5GB maximum memory use for writebuffer.
|
||||
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
|
||||
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
|
||||
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
|
||||
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
|
||||
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
|
||||
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
|
||||
|
||||
return bbto
|
||||
}
|
||||
|
||||
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
|
||||
// NOTE: should be launched as a goroutine
|
||||
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
props, stats, err := getPropsAndStats(db)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rocksdbMetrics.report(props, stats)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getPropsAndStats gets statistics from rocksdb
|
||||
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
|
||||
propsLoader := newPropsLoader(db)
|
||||
props, err := propsLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statMap, err := parseSerializedStats(props.OptionsStatistics)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statLoader := newStatLoader(statMap)
|
||||
stats, err := statLoader.load()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return props, stats, nil
|
||||
}
|
@ -1,384 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockAppOptions struct {
|
||||
opts map[string]interface{}
|
||||
}
|
||||
|
||||
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
|
||||
return &mockAppOptions{
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockAppOptions) Get(key string) interface{} {
|
||||
return m.opts[key]
|
||||
}
|
||||
|
||||
func TestOpenRocksdb(t *testing.T) {
|
||||
t.Run("db already exists", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 2 options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 4 options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
db, err := openRocksdb(dir, tc.mockAppOptions)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("db doesn't exist yet", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
mockAppOpts := newMockAppOptions(map[string]interface{}{})
|
||||
db, err := openRocksdb(dir, mockAppOpts)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadLatestOptions(t *testing.T) {
|
||||
t.Run("db already exists", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
const testCasesNum = 3
|
||||
dbOptsList := make([]*grocksdb.Options, testCasesNum)
|
||||
cfOptsList := make([]*grocksdb.Options, testCasesNum)
|
||||
|
||||
dbOptsList[0] = newDefaultOptions()
|
||||
cfOptsList[0] = newDefaultOptions()
|
||||
|
||||
dbOptsList[1] = newDefaultOptions()
|
||||
dbOptsList[1].SetMaxOpenFiles(999)
|
||||
cfOptsList[1] = newDefaultOptions()
|
||||
cfOptsList[1].SetWriteBufferSize(999_999)
|
||||
|
||||
dbOptsList[2] = newDefaultOptions()
|
||||
dbOptsList[2].SetMaxOpenFiles(999)
|
||||
dbOptsList[2].SetMaxFileOpeningThreads(9)
|
||||
cfOptsList[2] = newDefaultOptions()
|
||||
cfOptsList[2].SetWriteBufferSize(999_999)
|
||||
cfOptsList[2].SetNumLevels(9)
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
dbOpts *grocksdb.Options
|
||||
cfOpts *grocksdb.Options
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
dbOpts: dbOptsList[0],
|
||||
cfOpts: cfOptsList[0],
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 2 options",
|
||||
dbOpts: dbOptsList[1],
|
||||
cfOpts: cfOptsList[1],
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "change 4 options",
|
||||
dbOpts: dbOptsList[2],
|
||||
cfOpts: cfOptsList[2],
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
name := "application"
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("db doesn't exist yet", func(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
dbOpts, cfOpts, err := LoadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
|
||||
func TestOverrideDBOpts(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
maxOpenFiles int
|
||||
maxFileOpeningThreads int
|
||||
}{
|
||||
{
|
||||
desc: "override nothing",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
},
|
||||
{
|
||||
desc: "override max-open-files",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
|
||||
},
|
||||
{
|
||||
desc: "override max-file-opening-threads",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
|
||||
maxFileOpeningThreads: 9,
|
||||
},
|
||||
{
|
||||
desc: "override max-open-files and max-file-opening-threads",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
maxOpenFilesDBOptName: 999,
|
||||
maxFileOpeningThreadsDBOptName: 9,
|
||||
}),
|
||||
maxOpenFiles: 999,
|
||||
maxFileOpeningThreads: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
dbOpts := newDefaultOptions()
|
||||
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverrideCFOpts(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
writeBufferSize uint64
|
||||
numLevels int
|
||||
}{
|
||||
{
|
||||
desc: "override nothing",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "override write-buffer-size",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
}),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: defaultOpts.GetNumLevels(),
|
||||
},
|
||||
{
|
||||
desc: "override num-levels",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
writeBufferSize: defaultOpts.GetWriteBufferSize(),
|
||||
numLevels: 9,
|
||||
},
|
||||
{
|
||||
desc: "override write-buffer-size and num-levels",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
writeBufferSizeCFOptName: 999_999,
|
||||
numLevelsCFOptName: 9,
|
||||
}),
|
||||
writeBufferSize: 999_999,
|
||||
numLevels: 9,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
cfOpts := newDefaultOptions()
|
||||
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadOptsFromAppOpts(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
mockAppOptions *mockAppOptions
|
||||
asyncIO bool
|
||||
}{
|
||||
{
|
||||
desc: "default options",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
|
||||
asyncIO: false,
|
||||
},
|
||||
{
|
||||
desc: "set asyncIO option to true",
|
||||
mockAppOptions: newMockAppOptions(map[string]interface{}{
|
||||
asyncIOReadOptName: true,
|
||||
}),
|
||||
asyncIO: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
|
||||
|
||||
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRocksDBWithOptions(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
name := "application"
|
||||
dir, err := os.MkdirTemp("", "rocksdb")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := os.RemoveAll(dir)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
dbOpts := newDefaultOptions()
|
||||
dbOpts.SetMaxOpenFiles(999)
|
||||
cfOpts := newDefaultOptions()
|
||||
cfOpts.SetWriteBufferSize(999_999)
|
||||
|
||||
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.Close())
|
||||
|
||||
dbOpts, cfOpts, err = LoadLatestOptions(filepath.Join(dir, "application.db"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
|
||||
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
|
||||
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
|
||||
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
|
||||
}
|
||||
|
||||
func TestNewDefaultOptions(t *testing.T) {
|
||||
defaultOpts := newDefaultOptions()
|
||||
|
||||
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
|
||||
require.Equal(t, 4096, maxOpenFiles)
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
)
|
||||
|
||||
type propsGetter interface {
|
||||
GetProperty(propName string) (value string)
|
||||
GetIntProperty(propName string) (value uint64, success bool)
|
||||
}
|
||||
|
||||
type propsLoader struct {
|
||||
db propsGetter
|
||||
errorMsgs []string
|
||||
}
|
||||
|
||||
func newPropsLoader(db propsGetter) *propsLoader {
|
||||
return &propsLoader{
|
||||
db: db,
|
||||
errorMsgs: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *propsLoader) load() (*properties, error) {
|
||||
props := &properties{
|
||||
BaseLevel: l.getIntProperty("rocksdb.base-level"),
|
||||
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
|
||||
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
|
||||
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
|
||||
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
|
||||
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
|
||||
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
|
||||
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
|
||||
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
|
||||
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
|
||||
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
|
||||
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
|
||||
}
|
||||
|
||||
if len(l.errorMsgs) != 0 {
|
||||
errorMsg := strings.Join(l.errorMsgs, ";")
|
||||
return nil, errors.New(errorMsg)
|
||||
}
|
||||
|
||||
return props, nil
|
||||
}
|
||||
|
||||
func (l *propsLoader) getProperty(propName string) string {
|
||||
value := l.db.GetProperty(propName)
|
||||
if value == "" {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
|
||||
return ""
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (l *propsLoader) getIntProperty(propName string) uint64 {
|
||||
value, ok := l.db.GetIntProperty(propName)
|
||||
if !ok {
|
||||
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
|
||||
return 0
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
type properties struct {
|
||||
BaseLevel uint64
|
||||
BlockCacheCapacity uint64
|
||||
BlockCachePinnedUsage uint64
|
||||
BlockCacheUsage uint64
|
||||
CurSizeActiveMemTable uint64
|
||||
CurSizeAllMemTables uint64
|
||||
EstimateLiveDataSize uint64
|
||||
EstimateNumKeys uint64
|
||||
EstimateTableReadersMem uint64
|
||||
LiveSSTFilesSize uint64
|
||||
SizeAllMemTables uint64
|
||||
OptionsStatistics string
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mockPropsGetter struct {
|
||||
props map[string]string
|
||||
intProps map[string]uint64
|
||||
}
|
||||
|
||||
func newMockPropsGetter(
|
||||
props map[string]string,
|
||||
intProps map[string]uint64,
|
||||
) *mockPropsGetter {
|
||||
return &mockPropsGetter{
|
||||
props: props,
|
||||
intProps: intProps,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockPropsGetter) GetProperty(propName string) string {
|
||||
return m.props[propName]
|
||||
}
|
||||
|
||||
func (m *mockPropsGetter) GetIntProperty(propName string) (uint64, bool) {
|
||||
prop, ok := m.intProps[propName]
|
||||
return prop, ok
|
||||
}
|
||||
|
||||
func TestPropsLoader(t *testing.T) {
|
||||
defaultProps := map[string]string{
|
||||
"rocksdb.options-statistics": "1",
|
||||
}
|
||||
defaultIntProps := map[string]uint64{
|
||||
"rocksdb.base-level": 1,
|
||||
"rocksdb.block-cache-capacity": 2,
|
||||
"rocksdb.block-cache-pinned-usage": 3,
|
||||
"rocksdb.block-cache-usage": 4,
|
||||
"rocksdb.cur-size-active-mem-table": 5,
|
||||
"rocksdb.cur-size-all-mem-tables": 6,
|
||||
"rocksdb.estimate-live-data-size": 7,
|
||||
"rocksdb.estimate-num-keys": 8,
|
||||
"rocksdb.estimate-table-readers-mem": 9,
|
||||
"rocksdb.live-sst-files-size": 10,
|
||||
"rocksdb.size-all-mem-tables": 11,
|
||||
}
|
||||
missingProps := make(map[string]string)
|
||||
missingIntProps := make(map[string]uint64)
|
||||
defaultExpectedProps := properties{
|
||||
BaseLevel: 1,
|
||||
BlockCacheCapacity: 2,
|
||||
BlockCachePinnedUsage: 3,
|
||||
BlockCacheUsage: 4,
|
||||
CurSizeActiveMemTable: 5,
|
||||
CurSizeAllMemTables: 6,
|
||||
EstimateLiveDataSize: 7,
|
||||
EstimateNumKeys: 8,
|
||||
EstimateTableReadersMem: 9,
|
||||
LiveSSTFilesSize: 10,
|
||||
SizeAllMemTables: 11,
|
||||
OptionsStatistics: "1",
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
props map[string]string
|
||||
intProps map[string]uint64
|
||||
expectedProps *properties
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
props: defaultProps,
|
||||
intProps: defaultIntProps,
|
||||
expectedProps: &defaultExpectedProps,
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
desc: "missing props",
|
||||
props: missingProps,
|
||||
intProps: defaultIntProps,
|
||||
expectedProps: nil,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
desc: "missing integer props",
|
||||
props: defaultProps,
|
||||
intProps: missingIntProps,
|
||||
expectedProps: nil,
|
||||
success: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
mockPropsGetter := newMockPropsGetter(tc.props, tc.intProps)
|
||||
|
||||
propsLoader := newPropsLoader(mockPropsGetter)
|
||||
actualProps, err := propsLoader.load()
|
||||
if tc.success {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
require.Equal(t, tc.expectedProps, actualProps)
|
||||
})
|
||||
}
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
)
|
||||
|
||||
// stat represents one line from rocksdb statistics data, stat may have one or more properties
|
||||
// examples:
|
||||
// - rocksdb.block.cache.miss COUNT : 5
|
||||
// - rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
// `rocksdb.compaction.times.micros` is name of stat, P50, COUNT, SUM, etc... are props of stat
|
||||
type stat struct {
|
||||
name string
|
||||
props map[string]string
|
||||
}
|
||||
|
||||
// parseSerializedStats parses serialisedStats into map of stat objects
|
||||
// example of serializedStats:
|
||||
// rocksdb.block.cache.miss COUNT : 5
|
||||
// rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
func parseSerializedStats(serializedStats string) (map[string]*stat, error) {
|
||||
stats := make(map[string]*stat, 0)
|
||||
|
||||
serializedStatList := strings.Split(serializedStats, "\n")
|
||||
if len(serializedStatList) == 0 {
|
||||
return nil, errors.New("serializedStats is empty")
|
||||
}
|
||||
serializedStatList = serializedStatList[:len(serializedStatList)-1]
|
||||
// iterate over stats line by line
|
||||
for _, serializedStat := range serializedStatList {
|
||||
stat, err := parseSerializedStat(serializedStat)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats[stat.name] = stat
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// parseSerializedStat parses serialisedStat into stat object
|
||||
// example of serializedStat:
|
||||
// rocksdb.block.cache.miss COUNT : 5
|
||||
func parseSerializedStat(serializedStat string) (*stat, error) {
|
||||
tokens := strings.Split(serializedStat, " ")
|
||||
tokensNum := len(tokens)
|
||||
if err := validateTokens(tokens); err != nil {
|
||||
return nil, fmt.Errorf("tokens are invalid: %v", err)
|
||||
}
|
||||
|
||||
props := make(map[string]string)
|
||||
for idx := 1; idx < tokensNum; idx += 3 {
|
||||
// never should happen, but double check to avoid unexpected panic
|
||||
if idx+2 >= tokensNum {
|
||||
break
|
||||
}
|
||||
|
||||
key := tokens[idx]
|
||||
sep := tokens[idx+1]
|
||||
value := tokens[idx+2]
|
||||
|
||||
if err := validateStatProperty(key, value, sep); err != nil {
|
||||
return nil, fmt.Errorf("invalid stat property: %v", err)
|
||||
}
|
||||
|
||||
props[key] = value
|
||||
}
|
||||
|
||||
return &stat{
|
||||
name: tokens[0],
|
||||
props: props,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// validateTokens validates that tokens contains name + N triples (key, sep, value)
|
||||
func validateTokens(tokens []string) error {
|
||||
tokensNum := len(tokens)
|
||||
if tokensNum < 4 {
|
||||
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
|
||||
}
|
||||
if (tokensNum-1)%3 != 0 {
|
||||
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
|
||||
}
|
||||
if tokens[0] == "" {
|
||||
return fmt.Errorf("stat name shouldn't be empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateStatProperty validates that key and value are divided by separator and aren't empty
|
||||
func validateStatProperty(key, value, sep string) error {
|
||||
if key == "" {
|
||||
return fmt.Errorf("key shouldn't be empty")
|
||||
}
|
||||
if sep != ":" {
|
||||
return fmt.Errorf("separator should be :")
|
||||
}
|
||||
if value == "" {
|
||||
return fmt.Errorf("value shouldn't be empty")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,208 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseSerializedStats(t *testing.T) {
|
||||
defaultSerializedStats := `rocksdb.block.cache.miss COUNT : 1
|
||||
rocksdb.block.cache.hit COUNT : 2
|
||||
rocksdb.block.cache.add COUNT : 3
|
||||
rocksdb.block.cache.add.failures COUNT : 4
|
||||
rocksdb.compaction.times.micros P50 : 1 P95 : 2 P99 : 3 P100 : 4 COUNT : 5 SUM : 6
|
||||
rocksdb.compaction.times.cpu_micros P50 : 7 P95 : 8 P99 : 9 P100 : 10 COUNT : 11 SUM : 12
|
||||
`
|
||||
defaultExpectedStatMap := map[string]*stat{
|
||||
"rocksdb.block.cache.miss": {
|
||||
name: "rocksdb.block.cache.miss",
|
||||
props: map[string]string{
|
||||
"COUNT": "1",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.hit": {
|
||||
name: "rocksdb.block.cache.hit",
|
||||
props: map[string]string{
|
||||
"COUNT": "2",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.add": {
|
||||
name: "rocksdb.block.cache.add",
|
||||
props: map[string]string{
|
||||
"COUNT": "3",
|
||||
},
|
||||
},
|
||||
"rocksdb.block.cache.add.failures": {
|
||||
name: "rocksdb.block.cache.add.failures",
|
||||
props: map[string]string{
|
||||
"COUNT": "4",
|
||||
},
|
||||
},
|
||||
"rocksdb.compaction.times.micros": {
|
||||
name: "rocksdb.compaction.times.micros",
|
||||
props: map[string]string{
|
||||
"P50": "1",
|
||||
"P95": "2",
|
||||
"P99": "3",
|
||||
"P100": "4",
|
||||
"COUNT": "5",
|
||||
"SUM": "6",
|
||||
},
|
||||
},
|
||||
"rocksdb.compaction.times.cpu_micros": {
|
||||
name: "rocksdb.compaction.times.cpu_micros",
|
||||
props: map[string]string{
|
||||
"P50": "7",
|
||||
"P95": "8",
|
||||
"P99": "9",
|
||||
"P100": "10",
|
||||
"COUNT": "11",
|
||||
"SUM": "12",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
serializedStats string
|
||||
expectedStatMap map[string]*stat
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
serializedStats: defaultSerializedStats,
|
||||
expectedStatMap: defaultExpectedStatMap,
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing value #1",
|
||||
serializedStats: `rocksdb.block.cache.miss COUNT :
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing value #2",
|
||||
serializedStats: `rocksdb.compaction.times.micros P50 : 1 P95 :
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing stat name",
|
||||
serializedStats: ` COUNT : 1
|
||||
`,
|
||||
expectedStatMap: nil,
|
||||
errMsg: "stat name shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "empty stat",
|
||||
serializedStats: ``,
|
||||
expectedStatMap: make(map[string]*stat),
|
||||
errMsg: "",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
actualStatMap, err := parseSerializedStats(tc.serializedStats)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
require.Equal(t, tc.expectedStatMap, actualStatMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateTokens(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
tokens []string
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
tokens: []string{"name", "key", ":", "value"},
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing value #1",
|
||||
tokens: []string{"name", "key", ":"},
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "missing value #2",
|
||||
tokens: []string{"name", "key", ":", "value", "key2", ":"},
|
||||
errMsg: "invalid number of tokens",
|
||||
},
|
||||
{
|
||||
desc: "empty stat name",
|
||||
tokens: []string{"", "key", ":", "value"},
|
||||
errMsg: "stat name shouldn't be empty",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
err := validateTokens(tc.tokens)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStatProperty(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
key string
|
||||
value string
|
||||
sep string
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
desc: "success case",
|
||||
key: "key",
|
||||
value: "value",
|
||||
sep: ":",
|
||||
errMsg: "",
|
||||
},
|
||||
{
|
||||
desc: "missing key",
|
||||
key: "",
|
||||
value: "value",
|
||||
sep: ":",
|
||||
errMsg: "key shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "missing value",
|
||||
key: "key",
|
||||
value: "",
|
||||
sep: ":",
|
||||
errMsg: "value shouldn't be empty",
|
||||
},
|
||||
{
|
||||
desc: "invalid separator",
|
||||
key: "key",
|
||||
value: "value",
|
||||
sep: "#",
|
||||
errMsg: "separator should be :",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
err := validateStatProperty(tc.key, tc.value, tc.sep)
|
||||
if tc.errMsg == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.errMsg)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,284 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
sum = "SUM"
|
||||
count = "COUNT"
|
||||
p50 = "P50"
|
||||
p95 = "P95"
|
||||
p99 = "P99"
|
||||
p100 = "P100"
|
||||
)
|
||||
|
||||
type statLoader struct {
|
||||
// statMap contains map of stat objects returned by parseSerializedStats function
|
||||
// example of stats:
|
||||
// #1: rocksdb.block.cache.miss COUNT : 5
|
||||
// #2: rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
|
||||
// #1 case will be cast into int64
|
||||
// #2 case will be cast into float64Histogram
|
||||
statMap map[string]*stat
|
||||
|
||||
// NOTE: some methods accumulate errors instead of returning them, these methods are private and not intended to use outside
|
||||
errors []error
|
||||
}
|
||||
|
||||
func newStatLoader(statMap map[string]*stat) *statLoader {
|
||||
return &statLoader{
|
||||
statMap: statMap,
|
||||
errors: make([]error, 0),
|
||||
}
|
||||
}
|
||||
|
||||
type stats struct {
|
||||
NumberKeysWritten int64
|
||||
NumberKeysRead int64
|
||||
NumberKeysUpdated int64
|
||||
|
||||
// total block cache misses
|
||||
// BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
|
||||
// BLOCK_CACHE_FILTER_MISS +
|
||||
// BLOCK_CACHE_DATA_MISS;
|
||||
// BLOCK_CACHE_INDEX_MISS: # of times cache miss when accessing index block from block cache.
|
||||
// BLOCK_CACHE_FILTER_MISS: # of times cache miss when accessing filter block from block cache.
|
||||
// BLOCK_CACHE_DATA_MISS: # of times cache miss when accessing data block from block cache.
|
||||
BlockCacheMiss int64
|
||||
|
||||
// total block cache hit
|
||||
// BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
|
||||
// BLOCK_CACHE_FILTER_HIT +
|
||||
// BLOCK_CACHE_DATA_HIT;
|
||||
// BLOCK_CACHE_INDEX_HIT: # of times cache hit when accessing index block from block cache.
|
||||
// BLOCK_CACHE_FILTER_HIT: # of times cache hit when accessing filter block from block cache.
|
||||
// BLOCK_CACHE_DATA_HIT: # of times cache hit when accessing data block from block cache.
|
||||
BlockCacheHit int64
|
||||
|
||||
// # of blocks added to block cache.
|
||||
BlockCacheAdd int64
|
||||
// # of failures when adding blocks to block cache.
|
||||
BlockCacheAddFailures int64
|
||||
|
||||
BlockCacheIndexMiss int64
|
||||
BlockCacheIndexHit int64
|
||||
BlockCacheIndexBytesInsert int64
|
||||
BlockCacheFilterMiss int64
|
||||
BlockCacheFilterHit int64
|
||||
BlockCacheFilterBytesInsert int64
|
||||
BlockCacheDataMiss int64
|
||||
BlockCacheDataHit int64
|
||||
BlockCacheDataBytesInsert int64
|
||||
|
||||
CompactReadBytes int64 // Bytes read during compaction
|
||||
CompactWriteBytes int64 // Bytes written during compaction
|
||||
|
||||
CompactionTimesMicros *float64Histogram
|
||||
CompactionTimesCPUMicros *float64Histogram
|
||||
NumFilesInSingleCompaction *float64Histogram
|
||||
|
||||
// Read amplification statistics.
|
||||
// Read amplification can be calculated using this formula
|
||||
// (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
|
||||
//
|
||||
// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
|
||||
// TODO(yevhenii): seems not working?
|
||||
ReadAmpEstimateUsefulBytes int64 // Estimate of total bytes actually used.
|
||||
ReadAmpTotalReadBytes int64 // Total size of loaded data blocks.
|
||||
|
||||
NumberFileOpens int64
|
||||
NumberFileErrors int64
|
||||
|
||||
// # of times bloom filter has avoided file reads, i.e., negatives.
|
||||
BloomFilterUseful int64
|
||||
// # of times bloom FullFilter has not avoided the reads.
|
||||
BloomFilterFullPositive int64
|
||||
// # of times bloom FullFilter has not avoided the reads and data actually
|
||||
// exist.
|
||||
BloomFilterFullTruePositive int64
|
||||
|
||||
// # of memtable hits.
|
||||
MemtableHit int64
|
||||
// # of memtable misses.
|
||||
MemtableMiss int64
|
||||
|
||||
// # of Get() queries served by L0
|
||||
GetHitL0 int64
|
||||
// # of Get() queries served by L1
|
||||
GetHitL1 int64
|
||||
// # of Get() queries served by L2 and up
|
||||
GetHitL2AndUp int64
|
||||
|
||||
// The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
|
||||
// DB::Merge(), and DB::Write().
|
||||
BytesWritten int64
|
||||
// The number of uncompressed bytes read from DB::Get(). It could be
|
||||
// either from memtables, cache, or table files.
|
||||
// For the number of logical bytes read from DB::MultiGet(),
|
||||
// please use NUMBER_MULTIGET_BYTES_READ.
|
||||
BytesRead int64
|
||||
|
||||
// Writer has to wait for compaction or flush to finish.
|
||||
StallMicros int64
|
||||
DBWriteStallHistogram *float64Histogram
|
||||
|
||||
// Last level and non-last level read statistics
|
||||
LastLevelReadBytes int64
|
||||
LastLevelReadCount int64
|
||||
NonLastLevelReadBytes int64
|
||||
NonLastLevelReadCount int64
|
||||
|
||||
DBGetMicros *float64Histogram
|
||||
DBWriteMicros *float64Histogram
|
||||
|
||||
// Value size distribution in each operation
|
||||
BytesPerRead *float64Histogram
|
||||
BytesPerWrite *float64Histogram
|
||||
BytesPerMultiget *float64Histogram
|
||||
|
||||
// Time spent flushing memtable to disk
|
||||
FlushMicros *float64Histogram
|
||||
}
|
||||
|
||||
type float64Histogram struct {
|
||||
Sum float64
|
||||
Count float64
|
||||
P50 float64
|
||||
P95 float64
|
||||
P99 float64
|
||||
P100 float64
|
||||
}
|
||||
|
||||
func (l *statLoader) error() error {
|
||||
if len(l.errors) != 0 {
|
||||
return fmt.Errorf("%v", l.errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *statLoader) load() (*stats, error) {
|
||||
stats := &stats{
|
||||
NumberKeysWritten: l.getInt64StatValue("rocksdb.number.keys.written", count),
|
||||
NumberKeysRead: l.getInt64StatValue("rocksdb.number.keys.read", count),
|
||||
NumberKeysUpdated: l.getInt64StatValue("rocksdb.number.keys.updated", count),
|
||||
BlockCacheMiss: l.getInt64StatValue("rocksdb.block.cache.miss", count),
|
||||
BlockCacheHit: l.getInt64StatValue("rocksdb.block.cache.hit", count),
|
||||
BlockCacheAdd: l.getInt64StatValue("rocksdb.block.cache.add", count),
|
||||
BlockCacheAddFailures: l.getInt64StatValue("rocksdb.block.cache.add.failures", count),
|
||||
BlockCacheIndexMiss: l.getInt64StatValue("rocksdb.block.cache.index.miss", count),
|
||||
BlockCacheIndexHit: l.getInt64StatValue("rocksdb.block.cache.index.hit", count),
|
||||
BlockCacheIndexBytesInsert: l.getInt64StatValue("rocksdb.block.cache.index.bytes.insert", count),
|
||||
BlockCacheFilterMiss: l.getInt64StatValue("rocksdb.block.cache.filter.miss", count),
|
||||
BlockCacheFilterHit: l.getInt64StatValue("rocksdb.block.cache.filter.hit", count),
|
||||
BlockCacheFilterBytesInsert: l.getInt64StatValue("rocksdb.block.cache.filter.bytes.insert", count),
|
||||
BlockCacheDataMiss: l.getInt64StatValue("rocksdb.block.cache.data.miss", count),
|
||||
BlockCacheDataHit: l.getInt64StatValue("rocksdb.block.cache.data.hit", count),
|
||||
BlockCacheDataBytesInsert: l.getInt64StatValue("rocksdb.block.cache.data.bytes.insert", count),
|
||||
CompactReadBytes: l.getInt64StatValue("rocksdb.compact.read.bytes", count),
|
||||
CompactWriteBytes: l.getInt64StatValue("rocksdb.compact.write.bytes", count),
|
||||
CompactionTimesMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.micros"),
|
||||
CompactionTimesCPUMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.cpu_micros"),
|
||||
NumFilesInSingleCompaction: l.getFloat64HistogramStatValue("rocksdb.numfiles.in.singlecompaction"),
|
||||
ReadAmpEstimateUsefulBytes: l.getInt64StatValue("rocksdb.read.amp.estimate.useful.bytes", count),
|
||||
ReadAmpTotalReadBytes: l.getInt64StatValue("rocksdb.read.amp.total.read.bytes", count),
|
||||
NumberFileOpens: l.getInt64StatValue("rocksdb.no.file.opens", count),
|
||||
NumberFileErrors: l.getInt64StatValue("rocksdb.no.file.errors", count),
|
||||
BloomFilterUseful: l.getInt64StatValue("rocksdb.bloom.filter.useful", count),
|
||||
BloomFilterFullPositive: l.getInt64StatValue("rocksdb.bloom.filter.full.positive", count),
|
||||
BloomFilterFullTruePositive: l.getInt64StatValue("rocksdb.bloom.filter.full.true.positive", count),
|
||||
MemtableHit: l.getInt64StatValue("rocksdb.memtable.hit", count),
|
||||
MemtableMiss: l.getInt64StatValue("rocksdb.memtable.miss", count),
|
||||
GetHitL0: l.getInt64StatValue("rocksdb.l0.hit", count),
|
||||
GetHitL1: l.getInt64StatValue("rocksdb.l1.hit", count),
|
||||
GetHitL2AndUp: l.getInt64StatValue("rocksdb.l2andup.hit", count),
|
||||
BytesWritten: l.getInt64StatValue("rocksdb.bytes.written", count),
|
||||
BytesRead: l.getInt64StatValue("rocksdb.bytes.read", count),
|
||||
StallMicros: l.getInt64StatValue("rocksdb.stall.micros", count),
|
||||
DBWriteStallHistogram: l.getFloat64HistogramStatValue("rocksdb.db.write.stall"),
|
||||
LastLevelReadBytes: l.getInt64StatValue("rocksdb.last.level.read.bytes", count),
|
||||
LastLevelReadCount: l.getInt64StatValue("rocksdb.last.level.read.count", count),
|
||||
NonLastLevelReadBytes: l.getInt64StatValue("rocksdb.non.last.level.read.bytes", count),
|
||||
NonLastLevelReadCount: l.getInt64StatValue("rocksdb.non.last.level.read.count", count),
|
||||
DBGetMicros: l.getFloat64HistogramStatValue("rocksdb.db.get.micros"),
|
||||
DBWriteMicros: l.getFloat64HistogramStatValue("rocksdb.db.write.micros"),
|
||||
BytesPerRead: l.getFloat64HistogramStatValue("rocksdb.bytes.per.read"),
|
||||
BytesPerWrite: l.getFloat64HistogramStatValue("rocksdb.bytes.per.write"),
|
||||
BytesPerMultiget: l.getFloat64HistogramStatValue("rocksdb.bytes.per.multiget"),
|
||||
FlushMicros: l.getFloat64HistogramStatValue("rocksdb.db.flush.micros"),
|
||||
}
|
||||
|
||||
err := l.error()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// getFloat64HistogramStatValue converts stat object into float64Histogram
|
||||
func (l *statLoader) getFloat64HistogramStatValue(statName string) *float64Histogram {
|
||||
return &float64Histogram{
|
||||
Sum: l.getFloat64StatValue(statName, sum),
|
||||
Count: l.getFloat64StatValue(statName, count),
|
||||
P50: l.getFloat64StatValue(statName, p50),
|
||||
P95: l.getFloat64StatValue(statName, p95),
|
||||
P99: l.getFloat64StatValue(statName, p99),
|
||||
P100: l.getFloat64StatValue(statName, p100),
|
||||
}
|
||||
}
|
||||
|
||||
// getInt64StatValue converts property of stat object into int64
|
||||
func (l *statLoader) getInt64StatValue(statName, propName string) int64 {
|
||||
stringVal := l.getStatValue(statName, propName)
|
||||
if stringVal == "" {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
|
||||
return 0
|
||||
}
|
||||
|
||||
intVal, err := strconv.ParseInt(stringVal, 10, 64)
|
||||
if err != nil {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't parse int: %v", err))
|
||||
return 0
|
||||
}
|
||||
|
||||
return intVal
|
||||
}
|
||||
|
||||
// getFloat64StatValue converts property of stat object into float64
|
||||
func (l *statLoader) getFloat64StatValue(statName, propName string) float64 {
|
||||
stringVal := l.getStatValue(statName, propName)
|
||||
if stringVal == "" {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
|
||||
return 0
|
||||
}
|
||||
|
||||
floatVal, err := strconv.ParseFloat(stringVal, 64)
|
||||
if err != nil {
|
||||
l.errors = append(l.errors, fmt.Errorf("can't parse float: %v", err))
|
||||
return 0
|
||||
}
|
||||
|
||||
return floatVal
|
||||
}
|
||||
|
||||
// getStatValue gets property of stat object
|
||||
func (l *statLoader) getStatValue(statName, propName string) string {
|
||||
stat, ok := l.statMap[statName]
|
||||
if !ok {
|
||||
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't exist", statName))
|
||||
return ""
|
||||
}
|
||||
prop, ok := stat.props[propName]
|
||||
if !ok {
|
||||
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't have %v property", statName, propName))
|
||||
return ""
|
||||
}
|
||||
|
||||
return prop
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package opendb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStatsLoader(t *testing.T) {
|
||||
defaultStat := stat{
|
||||
props: map[string]string{
|
||||
"COUNT": "1",
|
||||
},
|
||||
}
|
||||
defaultHistogramStat := stat{
|
||||
props: map[string]string{
|
||||
"P50": "1",
|
||||
"P95": "2",
|
||||
"P99": "3",
|
||||
"P100": "4",
|
||||
"COUNT": "5",
|
||||
"SUM": "6",
|
||||
},
|
||||
}
|
||||
defaultStatMap := map[string]*stat{
|
||||
"rocksdb.number.keys.written": &defaultStat,
|
||||
"rocksdb.number.keys.read": &defaultStat,
|
||||
"rocksdb.number.keys.updated": &defaultStat,
|
||||
"rocksdb.block.cache.miss": &defaultStat,
|
||||
"rocksdb.block.cache.hit": &defaultStat,
|
||||
"rocksdb.block.cache.add": &defaultStat,
|
||||
"rocksdb.block.cache.add.failures": &defaultStat,
|
||||
"rocksdb.block.cache.index.miss": &defaultStat,
|
||||
"rocksdb.block.cache.index.hit": &defaultStat,
|
||||
"rocksdb.block.cache.index.bytes.insert": &defaultStat,
|
||||
"rocksdb.block.cache.filter.miss": &defaultStat,
|
||||
"rocksdb.block.cache.filter.hit": &defaultStat,
|
||||
"rocksdb.block.cache.filter.bytes.insert": &defaultStat,
|
||||
"rocksdb.block.cache.data.miss": &defaultStat,
|
||||
"rocksdb.block.cache.data.hit": &defaultStat,
|
||||
"rocksdb.block.cache.data.bytes.insert": &defaultStat,
|
||||
"rocksdb.compact.read.bytes": &defaultStat,
|
||||
"rocksdb.compact.write.bytes": &defaultStat,
|
||||
"rocksdb.compaction.times.micros": &defaultHistogramStat,
|
||||
"rocksdb.compaction.times.cpu_micros": &defaultHistogramStat,
|
||||
"rocksdb.numfiles.in.singlecompaction": &defaultHistogramStat,
|
||||
"rocksdb.read.amp.estimate.useful.bytes": &defaultStat,
|
||||
"rocksdb.read.amp.total.read.bytes": &defaultStat,
|
||||
"rocksdb.no.file.opens": &defaultStat,
|
||||
"rocksdb.no.file.errors": &defaultStat,
|
||||
"rocksdb.bloom.filter.useful": &defaultStat,
|
||||
"rocksdb.bloom.filter.full.positive": &defaultStat,
|
||||
"rocksdb.bloom.filter.full.true.positive": &defaultStat,
|
||||
"rocksdb.memtable.hit": &defaultStat,
|
||||
"rocksdb.memtable.miss": &defaultStat,
|
||||
"rocksdb.l0.hit": &defaultStat,
|
||||
"rocksdb.l1.hit": &defaultStat,
|
||||
"rocksdb.l2andup.hit": &defaultStat,
|
||||
"rocksdb.bytes.written": &defaultStat,
|
||||
"rocksdb.bytes.read": &defaultStat,
|
||||
"rocksdb.stall.micros": &defaultStat,
|
||||
"rocksdb.db.write.stall": &defaultHistogramStat,
|
||||
"rocksdb.last.level.read.bytes": &defaultStat,
|
||||
"rocksdb.last.level.read.count": &defaultStat,
|
||||
"rocksdb.non.last.level.read.bytes": &defaultStat,
|
||||
"rocksdb.non.last.level.read.count": &defaultStat,
|
||||
"rocksdb.db.get.micros": &defaultHistogramStat,
|
||||
"rocksdb.db.write.micros": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.read": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.write": &defaultHistogramStat,
|
||||
"rocksdb.bytes.per.multiget": &defaultHistogramStat,
|
||||
"rocksdb.db.flush.micros": &defaultHistogramStat,
|
||||
}
|
||||
|
||||
statLoader := newStatLoader(defaultStatMap)
|
||||
stats, err := statLoader.load()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, stats.NumberKeysWritten, int64(1))
|
||||
require.Equal(t, stats.NumberKeysRead, int64(1))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P50, float64(1))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P95, float64(2))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P99, float64(3))
|
||||
require.Equal(t, stats.CompactionTimesMicros.P100, float64(4))
|
||||
require.Equal(t, stats.CompactionTimesMicros.Count, float64(5))
|
||||
require.Equal(t, stats.CompactionTimesMicros.Sum, float64(6))
|
||||
}
|
2252
contracts/package-lock.json
generated
2252
contracts/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -5,7 +5,7 @@
|
||||
"private": true,
|
||||
"description": "Solidity contracts for 0g Blockchain",
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
"node": ">=20.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "npm run clean && npm run compile && npm run ethermint-json",
|
||||
@ -23,14 +23,14 @@
|
||||
"test": "hardhat test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@nomicfoundation/hardhat-toolbox": "^2.0.2",
|
||||
"@nomicfoundation/hardhat-toolbox": "^5.0.0",
|
||||
"@openzeppelin/contracts": "4.8.3",
|
||||
"@typescript-eslint/eslint-plugin": "^5.59.6",
|
||||
"@typescript-eslint/parser": "^5.59.6",
|
||||
"eslint": "^8.40.0",
|
||||
"eslint-config-prettier": "8.8.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"hardhat": "^2.14.0",
|
||||
"hardhat": "^2.22.8",
|
||||
"prettier": "2.8.8",
|
||||
"prettier-plugin-solidity": "^1.1.3",
|
||||
"solhint": "^3.4.1",
|
||||
|
@ -26,7 +26,7 @@ rm -rf $DATA
|
||||
|
||||
BINARY=kava
|
||||
|
||||
# Create new data directory, overwriting any that alread existed
|
||||
# Create new data directory, overwriting any that already existed
|
||||
chainID="kavalocalnet_8888-1"
|
||||
$BINARY init validator --chain-id $chainID
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
129
go.mod
129
go.mod
@ -4,61 +4,64 @@ go 1.21
|
||||
|
||||
require (
|
||||
cosmossdk.io/errors v1.0.1
|
||||
cosmossdk.io/log v1.3.1
|
||||
cosmossdk.io/math v1.3.0
|
||||
cosmossdk.io/simapp v0.0.0-20231127212628-044ff4d8c015
|
||||
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51
|
||||
github.com/cenkalti/backoff/v4 v4.1.3
|
||||
github.com/cometbft/cometbft v0.37.4
|
||||
github.com/cometbft/cometbft v0.37.9
|
||||
github.com/cometbft/cometbft-db v0.9.1
|
||||
github.com/coniks-sys/coniks-go v0.0.0-20180722014011-11acf4819b71
|
||||
github.com/consensys/gnark-crypto v0.12.1
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.4
|
||||
github.com/cosmos/cosmos-db v1.0.2
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.5
|
||||
github.com/cosmos/cosmos-sdk v0.47.10
|
||||
github.com/cosmos/go-bip39 v1.0.0
|
||||
github.com/cosmos/gogoproto v1.4.10
|
||||
github.com/cosmos/iavl v0.20.1
|
||||
github.com/cosmos/iavl v1.2.0
|
||||
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3
|
||||
github.com/cosmos/ibc-go/modules/light-clients/08-wasm v0.1.1-ibc-go-v7.3-wasmvm-v1.5
|
||||
github.com/cosmos/ibc-go/v7 v7.4.0
|
||||
github.com/ethereum/go-ethereum v1.10.26
|
||||
github.com/evmos/ethermint v0.21.0
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.3
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/linxGnu/grocksdb v1.8.6
|
||||
github.com/huandu/skiplist v1.2.0
|
||||
github.com/linxGnu/grocksdb v1.8.13
|
||||
github.com/pelletier/go-toml/v2 v2.1.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/shopspring/decimal v1.4.0
|
||||
github.com/spf13/cast v1.6.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/viper v1.16.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.18.2
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/subosito/gotenv v1.6.0
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0
|
||||
google.golang.org/grpc v1.60.1
|
||||
google.golang.org/protobuf v1.32.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.111.0 // indirect
|
||||
cloud.google.com/go/compute v1.23.3 // indirect
|
||||
cloud.google.com/go v0.112.0 // indirect
|
||||
cloud.google.com/go/compute v1.24.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.5 // indirect
|
||||
cloud.google.com/go/storage v1.35.1 // indirect
|
||||
cloud.google.com/go/iam v1.1.6 // indirect
|
||||
cloud.google.com/go/storage v1.36.0 // indirect
|
||||
cosmossdk.io/api v0.3.1 // indirect
|
||||
cosmossdk.io/core v0.6.1 // indirect
|
||||
cosmossdk.io/depinject v1.0.0-alpha.4 // indirect
|
||||
cosmossdk.io/log v1.3.1 // indirect
|
||||
cosmossdk.io/simapp v0.0.0-20231127212628-044ff4d8c015 // indirect
|
||||
cosmossdk.io/tools/rosetta v0.2.1 // indirect
|
||||
filippo.io/edwards25519 v1.0.0 // indirect
|
||||
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
|
||||
github.com/99designs/keyring v1.2.1 // indirect
|
||||
github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect
|
||||
github.com/CosmWasm/wasmvm v1.5.2 // indirect
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
@ -67,30 +70,32 @@ require (
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.7.0 // indirect
|
||||
github.com/btcsuite/btcd v0.23.4 // indirect
|
||||
github.com/btcsuite/btcd v0.24.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/cockroachdb/apd/v2 v2.0.2 // indirect
|
||||
github.com/cockroachdb/errors v1.10.0 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v1.1.0 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect
|
||||
github.com/confio/ics23/go v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/cosmos/btcutil v1.0.5 // indirect
|
||||
github.com/cosmos/gogogateway v1.2.0 // indirect
|
||||
github.com/cosmos/ics23/go v0.10.0 // indirect
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.1 // indirect
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect
|
||||
github.com/cosmos/rosetta-sdk-go v0.10.0 // indirect
|
||||
github.com/creachadair/taskgroup v0.4.2 // indirect
|
||||
github.com/danieljoos/wincred v1.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
|
||||
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
@ -100,20 +105,22 @@ require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
||||
github.com/emicklei/dot v1.6.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.23.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/golang/glog v1.1.2 // indirect
|
||||
github.com/golang/glog v1.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
@ -121,7 +128,7 @@ require (
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/orderedcode v0.0.1 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/handlers v1.5.1 // indirect
|
||||
@ -131,16 +138,16 @@ require (
|
||||
github.com/gtank/merlin v0.1.1 // indirect
|
||||
github.com/gtank/ristretto255 v0.1.2 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.7.1 // indirect
|
||||
github.com/hashicorp/go-getter v1.7.5 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-safetemp v1.0.0 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.1 // indirect
|
||||
github.com/huandu/skiplist v1.2.0 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iancoleman/orderedmap v0.2.0 // indirect
|
||||
github.com/improbable-eng/grpc-web v0.15.0 // indirect
|
||||
@ -148,11 +155,10 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.0 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@ -167,28 +173,32 @@ require (
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/mtibben/percent v0.2.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rakyll/statik v0.1.7 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rs/cors v1.8.3 // indirect
|
||||
github.com/rs/zerolog v1.32.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sasha-s/go-deadlock v0.3.1 // indirect
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tendermint/go-amino v0.16.0 // indirect
|
||||
github.com/tidwall/btree v1.6.0 // indirect
|
||||
github.com/tidwall/btree v1.7.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
@ -197,20 +207,24 @@ require (
|
||||
github.com/zondax/ledger-go v0.14.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.8 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/oauth2 v0.15.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
|
||||
go.opentelemetry.io/otel v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.22.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.22.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.17.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/api v0.153.0 // indirect
|
||||
google.golang.org/api v0.162.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
@ -224,20 +238,23 @@ replace (
|
||||
// Use the cosmos keyring code
|
||||
github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0
|
||||
// Use cometbft fork of tendermint
|
||||
github.com/cometbft/cometbft => github.com/kava-labs/cometbft v0.37.4-kava.1
|
||||
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.9.1-kava.1
|
||||
github.com/cometbft/cometbft => github.com/0glabs/cometbft v0.37.9-0glabs.3
|
||||
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.9.1-kava.2
|
||||
// Use cosmos-sdk fork with backported fix for unsafe-reset-all, staking transfer events, and custom tally handler support
|
||||
// github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.46.11-kava.3
|
||||
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3
|
||||
github.com/cosmos/cosmos-sdk => github.com/0glabs/cosmos-sdk v0.47.10-0glabs.12
|
||||
github.com/cosmos/iavl => github.com/kava-labs/iavl v1.2.0-kava.1
|
||||
// See https://github.com/cosmos/cosmos-sdk/pull/13093
|
||||
github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
// Use go-ethereum fork with precompiles
|
||||
// Tracking kava-labs/go-ethereum kava/release/v1.10 branch
|
||||
// TODO: Tag before release
|
||||
github.com/ethereum/go-ethereum => github.com/evmos/go-ethereum v1.10.26-evmos-rc2
|
||||
// Use ethermint fork that respects min-gas-price with NoBaseFee true and london enabled, and includes eip712 support
|
||||
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.0.2
|
||||
github.com/evmos/ethermint => github.com/0glabs/ethermint v0.21.0-0g.v3.1.15
|
||||
// See https://github.com/cosmos/cosmos-sdk/pull/10401, https://github.com/cosmos/cosmos-sdk/commit/0592ba6158cd0bf49d894be1cef4faeec59e8320
|
||||
github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.9.0
|
||||
// Downgraded to avoid bugs in following commits which causes "version does not exist" errors
|
||||
github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
|
||||
// Avoid change in slices.SortFunc, see https://github.com/cosmos/cosmos-sdk/issues/20159
|
||||
golang.org/x/exp => golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb
|
||||
)
|
||||
|
244
go.sum
244
go.sum
@ -32,8 +32,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
|
||||
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
|
||||
cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
|
||||
cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
|
||||
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
|
||||
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
|
||||
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
|
||||
@ -71,8 +71,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||
cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
|
||||
cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
|
||||
@ -112,8 +112,8 @@ cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y97
|
||||
cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
|
||||
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
|
||||
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
|
||||
cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
|
||||
cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
|
||||
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
|
||||
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
|
||||
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
|
||||
@ -174,8 +174,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
|
||||
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
|
||||
cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
|
||||
cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
|
||||
cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
|
||||
cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
|
||||
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
|
||||
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
|
||||
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
|
||||
@ -209,10 +209,12 @@ filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
|
||||
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
|
||||
git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw=
|
||||
git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA=
|
||||
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3 h1:Wx3tVMTuFaaHDeJT/OzT7QLfAIpeaZsG9R6XoTOyKCw=
|
||||
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.3/go.mod h1:BWo24B8cApWcO2/widWYIdt3CPxbh+HCSypCPpjTjog=
|
||||
github.com/0glabs/ethermint v0.21.0-0g.v3.0.2 h1:4YI5wzzRdAvZ27PMLityxooICEE1bkG+7HgNQUm6JyM=
|
||||
github.com/0glabs/ethermint v0.21.0-0g.v3.0.2/go.mod h1:HYQUhvcZBIG71H3xlxQSk0XyQEjeaHsduOj6O2QImrE=
|
||||
github.com/0glabs/cometbft v0.37.9-0glabs.3 h1:sobMz3C+OdFYNRQ3degfCZUHUzyuSPUIZqVMYgDtJs4=
|
||||
github.com/0glabs/cometbft v0.37.9-0glabs.3/go.mod h1:j0Q3RqrCd+cztWCugs3obbzC4NyHGBPZZjtm/fWV00I=
|
||||
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.12 h1:mVUhlaGUPn8izK6TfdXD13xakN8+HGl3Y349YF6Kgqc=
|
||||
github.com/0glabs/cosmos-sdk v0.47.10-0glabs.12/go.mod h1:KskIVnhXTFqrw7CDccMvx7To5KzUsOomIsQV7sPGOog=
|
||||
github.com/0glabs/ethermint v0.21.0-0g.v3.1.15 h1:j3GwMVy1bjOb7TNyH7v7qOUu5LRl6oruZECIx9W77J0=
|
||||
github.com/0glabs/ethermint v0.21.0-0g.v3.1.15/go.mod h1:6e/gOcDLhvlDWK3JLJVBgki0gD6H4E1eG7l9byocgWA=
|
||||
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs=
|
||||
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
|
||||
@ -229,9 +231,13 @@ github.com/CosmWasm/wasmvm v1.5.2/go.mod h1:Q0bSEtlktzh7W2hhEaifrFp1Erx11ckQZmjq
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
|
||||
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51 h1:tMTENCeSPIJO8yCpEQbT15XYXt4EFNQUx3s334uxVts=
|
||||
github.com/Kava-Labs/opendb v0.0.0-20240719173129-a2f11f6d7e51/go.mod h1:LbPsJiWvj90NT3Y9YV8EFPkWfvp8A15Tp88qqKa3LxA=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
@ -303,9 +309,9 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd v0.23.4 h1:IzV6qqkfwbItOS/sg/aDfPDsjPP8twrCOE2R93hxMlQ=
|
||||
github.com/btcsuite/btcd v0.23.4/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
|
||||
github.com/btcsuite/btcd v0.24.0 h1:gL3uHE/IaFj6fcZSu03SvqPMSx7s/dPzfpG/atRwWdo=
|
||||
github.com/btcsuite/btcd v0.24.0/go.mod h1:K4IDc1593s8jKXIF7yS7yCTSxrknB9z0STzc2j6XgE4=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
@ -313,11 +319,12 @@ github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
@ -349,8 +356,8 @@ github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
@ -378,15 +385,23 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E=
|
||||
github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU=
|
||||
github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4=
|
||||
github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI=
|
||||
github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA=
|
||||
@ -413,8 +428,10 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk=
|
||||
github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis=
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.4 h1:aEL7tU/rLOmxZQ9z4i7mzxcLbSCY48OdY7lIWTLG7oU=
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.4/go.mod h1:oeB+FyVzG3XrQJbJng0EnV8Vljfk9XvTIpGILNU/9Co=
|
||||
github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs=
|
||||
github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA=
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA=
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec=
|
||||
github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y=
|
||||
github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY=
|
||||
github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw=
|
||||
@ -423,8 +440,6 @@ github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ
|
||||
github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU=
|
||||
github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI=
|
||||
github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek=
|
||||
github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg=
|
||||
github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A=
|
||||
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3 h1:MZGDMETv72suFpTAD6VPGqSIm1FJcChtk2HmVh9D+Bo=
|
||||
github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7 v7.1.3/go.mod h1:UvDmcGIWJPIytq+Q78/ff5NTOsuX/7IrNgEugTW5i0s=
|
||||
github.com/cosmos/ibc-go/modules/light-clients/08-wasm v0.1.1-ibc-go-v7.3-wasmvm-v1.5 h1:sMoHjep+KInjMrppNCEutMVm1p8nI9WhKCuMQ+EcUHw=
|
||||
@ -435,15 +450,16 @@ github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZD
|
||||
github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0=
|
||||
github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo=
|
||||
github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA=
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.1 h1:12ac9+GwBb9BjP7X5ygpFk09Itwzjzfmg6A2CWFjoVs=
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.1/go.mod h1:5tv2RVJEd2+Y38TIQN4CRjJeQGyqOEiKJDfqhk5UjqE=
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM=
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8=
|
||||
github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM=
|
||||
github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creachadair/taskgroup v0.4.2 h1:jsBLdAJE42asreGss2xZGZ8fJra7WtwnHWeJFxv2Li8=
|
||||
github.com/creachadair/taskgroup v0.4.2/go.mod h1:qiXUOSrbwAY3u0JPGTzObbE3yf9hcXHDKBZ2ZjpCbgM=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
@ -459,11 +475,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
@ -504,6 +521,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI=
|
||||
github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@ -515,13 +534,15 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/evmos/go-ethereum v1.10.26-evmos-rc2 h1:tYghk1ZZ8X4/OQ4YI9hvtm8aSN8OSqO0g9vo/sCMdBo=
|
||||
github.com/evmos/go-ethereum v1.10.26-evmos-rc2/go.mod h1:/6CsT5Ceen2WPLI/oCA3xMcZ5sWMF/D46SjM/ayY0Oo=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
||||
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
@ -541,8 +562,8 @@ github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqG
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE=
|
||||
github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
|
||||
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
@ -556,8 +577,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
|
||||
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
@ -566,8 +587,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
@ -620,8 +641,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@ -656,8 +677,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
@ -723,8 +744,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
@ -780,8 +801,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY=
|
||||
github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
|
||||
github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4=
|
||||
github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
@ -802,8 +823,11 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
@ -880,10 +904,10 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kava-labs/cometbft v0.37.4-kava.1 h1:QRuyBieWdUBpe4pcXgzu1SdMH2lkTaqXr/JPIeqdiHE=
|
||||
github.com/kava-labs/cometbft v0.37.4-kava.1/go.mod h1:Cmg5Hp4sNpapm7j+x0xRyt2g0juQfmB752ous+pA0G8=
|
||||
github.com/kava-labs/cometbft-db v0.9.1-kava.1 h1:0KmSPdXYdRp6TsgKuMxRnMZCMEGC5ysIVjuJddYr4tw=
|
||||
github.com/kava-labs/cometbft-db v0.9.1-kava.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U=
|
||||
github.com/kava-labs/cometbft-db v0.9.1-kava.2 h1:ZQaio886ifvml9XtJB4IYHhlArgA3+/a5Zwidg7H2J8=
|
||||
github.com/kava-labs/cometbft-db v0.9.1-kava.2/go.mod h1:PvUZbx7zeR7I4CAvtKBoii/5ia5gXskKjDjIVpt7gDw=
|
||||
github.com/kava-labs/iavl v1.2.0-kava.1 h1:HPme3nVrR25XshEFDckMg6fp0tVfpAjTi32/5Iiyuzk=
|
||||
github.com/kava-labs/iavl v1.2.0-kava.1/go.mod h1:HidWWLVAtODJqFD6Hbne2Y0q3SdxByJepHUOeoH4LiI=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@ -894,8 +918,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
||||
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
||||
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||
@ -925,12 +949,10 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/linxGnu/grocksdb v1.8.6 h1:O7I6SIGPrypf3f/gmrrLUBQDKfO8uOoYdWf4gLS06tc=
|
||||
github.com/linxGnu/grocksdb v1.8.6/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
|
||||
github.com/linxGnu/grocksdb v1.8.13 h1:X3Id7Obhf8qLY9WPc4LmmtIyabmdDf810XSFDnLlW7E=
|
||||
github.com/linxGnu/grocksdb v1.8.13/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
|
||||
github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
@ -1067,6 +1089,8 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@ -1077,8 +1101,8 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU=
|
||||
github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67 h1:jik8PHtAIsPlCRJjJzl4udgEf7hawInF9texMeO2jrU=
|
||||
github.com/petermattis/goid v0.0.0-20231207134359-e60b3f734c67/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
@ -1109,8 +1133,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
@ -1127,8 +1151,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o=
|
||||
github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
|
||||
@ -1145,8 +1169,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo=
|
||||
github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
@ -1159,6 +1183,10 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
|
||||
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
@ -1182,6 +1210,8 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
@ -1193,18 +1223,16 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
|
||||
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
|
||||
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
|
||||
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
|
||||
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
@ -1214,8 +1242,9 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@ -1227,8 +1256,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
@ -1236,8 +1266,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
|
||||
github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME=
|
||||
github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
|
||||
github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
|
||||
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
|
||||
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
|
||||
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
@ -1308,22 +1338,30 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
|
||||
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
|
||||
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
|
||||
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
|
||||
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
|
||||
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU=
|
||||
go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
@ -1452,8 +1490,8 @@ golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfS
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1479,8 +1517,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
|
||||
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
|
||||
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -1765,8 +1803,8 @@ google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ
|
||||
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
|
||||
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
|
||||
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
|
||||
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
|
||||
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
|
||||
google.golang.org/api v0.162.0 h1:Vhs54HkaEpkMBdgGdOT2P6F0csGG/vxDS0hWHJzmmps=
|
||||
google.golang.org/api v0.162.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -1885,12 +1923,12 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw
|
||||
google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
|
||||
google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
|
||||
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg=
|
||||
google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
@ -1932,8 +1970,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
|
||||
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@ -1950,8 +1988,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
52
golangci.yml
52
golangci.yml
@ -1,52 +0,0 @@
|
||||
run:
|
||||
tests: false
|
||||
# # timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# timeout: 5m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
# - errcheck
|
||||
- goconst
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- maligned
|
||||
- misspell
|
||||
- nakedret
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- misspell
|
||||
- wsl
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- text: "Use of weak random number generator"
|
||||
linters:
|
||||
- gosec
|
||||
- text: "comment on exported var"
|
||||
linters:
|
||||
- golint
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
@ -36,11 +36,14 @@ $BINARY init validator --chain-id $chainID
|
||||
sed -in-place='' 's/enable = false/enable = true/g' $DATA/config/app.toml
|
||||
|
||||
# Set evm tracer to json
|
||||
sed -in-place='' 's/tracer = ""/tracer = "json"/g' $DATA/config/app.toml
|
||||
sed -in-place='' 's/tracer = ""/tracer = ""/g' $DATA/config/app.toml
|
||||
|
||||
# Enable full error trace to be returned on tx failure
|
||||
# Disable full error trace
|
||||
sed -in-place='' '/iavl-cache-size/a\
|
||||
trace = true' $DATA/config/app.toml
|
||||
trace = false' $DATA/config/app.toml
|
||||
|
||||
# Set min gas prices
|
||||
sed -in-place='' 's/minimum-gas-prices = "0ua0gi"/minimum-gas-prices = "0.01ua0gi,100000neuron"/g' $DATA/config/app.toml
|
||||
|
||||
# Set client chain id
|
||||
sed -in-place='' 's/chain-id = ""/chain-id = "zgchain_8888-1"/g' $DATA/config/client.toml
|
||||
|
62
precompiles/common/common.go
Normal file
62
precompiles/common/common.go
Normal file
@ -0,0 +1,62 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/evmos/ethermint/x/evm/statedb"
|
||||
)
|
||||
|
||||
type PrecompileCommon interface {
|
||||
Abi() *abi.ABI
|
||||
IsTx(string) bool
|
||||
KVGasConfig() storetypes.GasConfig
|
||||
}
|
||||
|
||||
func InitializePrecompileCall(
|
||||
p PrecompileCommon,
|
||||
evm *vm.EVM,
|
||||
contract *vm.Contract,
|
||||
readonly bool,
|
||||
) (
|
||||
ctx sdk.Context,
|
||||
stateDB *statedb.StateDB,
|
||||
method *abi.Method,
|
||||
initialGas storetypes.Gas,
|
||||
args []interface{},
|
||||
err error,
|
||||
) {
|
||||
// parse input
|
||||
if len(contract.Input) < 4 {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, vm.ErrExecutionReverted
|
||||
}
|
||||
method, err = p.Abi().MethodById(contract.Input[:4])
|
||||
if err != nil {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, vm.ErrExecutionReverted
|
||||
}
|
||||
args, err = method.Inputs.Unpack(contract.Input[4:])
|
||||
if err != nil {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, err
|
||||
}
|
||||
// readonly check
|
||||
if readonly && p.IsTx(method.Name) {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, errors.New(ErrWriteOnReadOnly)
|
||||
}
|
||||
// get state db and context
|
||||
stateDB, ok := evm.StateDB.(*statedb.StateDB)
|
||||
if !ok {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, errors.New(ErrGetStateDB)
|
||||
}
|
||||
ctx, err = stateDB.GetCachedContextForPrecompile()
|
||||
if err != nil {
|
||||
return sdk.Context{}, nil, nil, uint64(0), nil, err
|
||||
}
|
||||
// initial gas
|
||||
initialGas = ctx.GasMeter().GasConsumed()
|
||||
ctx = ctx.WithKVGasConfig(p.KVGasConfig())
|
||||
|
||||
return ctx, stateDB, method, initialGas, args, nil
|
||||
}
|
@ -3,4 +3,6 @@ package common
|
||||
const (
|
||||
ErrGetStateDB = "get EVM StateDB failed"
|
||||
ErrInvalidNumberOfArgs = "invalid number of arguments; expected %d; got: %d"
|
||||
ErrSenderNotOrigin = "msg.sender is not from tx origin"
|
||||
ErrWriteOnReadOnly = "read only call to write functions"
|
||||
)
|
||||
|
18
precompiles/common/utils.go
Normal file
18
precompiles/common/utils.go
Normal file
@ -0,0 +1,18 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"cosmossdk.io/math"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func ToLowerHexWithoutPrefix(addr common.Address) string {
|
||||
return strings.ToLower(addr.Hex()[2:])
|
||||
}
|
||||
|
||||
// BigIntToLegacyDec converts a uint number (18 decimals) to math.LegacyDec (18 decimals)
|
||||
func BigIntToLegacyDec(x *big.Int) math.LegacyDec {
|
||||
return math.LegacyNewDecFromBigIntWithPrec(x, math.LegacyPrecision)
|
||||
}
|
@ -268,6 +268,46 @@
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "params",
|
||||
"outputs": [
|
||||
{
|
||||
"components": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "tokensPerVote",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "maxVotesPerSigner",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "maxQuorums",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "epochBlocks",
|
||||
"type": "uint256"
|
||||
},
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "encodedSlices",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"internalType": "struct IDASigners.Params",
|
||||
"name": "",
|
||||
"type": "tuple"
|
||||
}
|
||||
],
|
||||
"stateMutability": "view",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,16 +1,14 @@
|
||||
package dasigners
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
precompiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
dasignerskeeper "github.com/0glabs/0g-chain/x/dasigners/v1/keeper"
|
||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/evmos/ethermint/x/evm/statedb"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -18,6 +16,7 @@ const (
|
||||
|
||||
RequiredGasMax uint64 = 1000_000_000
|
||||
|
||||
DASignersFunctionParams = "params"
|
||||
DASignersFunctionEpochNumber = "epochNumber"
|
||||
DASignersFunctionQuorumCount = "quorumCount"
|
||||
DASignersFunctionGetSigner = "getSigner"
|
||||
@ -32,6 +31,7 @@ const (
|
||||
)
|
||||
|
||||
var RequiredGasBasic = map[string]uint64{
|
||||
DASignersFunctionParams: 1000,
|
||||
DASignersFunctionEpochNumber: 1000,
|
||||
DASignersFunctionQuorumCount: 1000,
|
||||
DASignersFunctionGetSigner: 100000,
|
||||
@ -45,17 +45,8 @@ var RequiredGasBasic = map[string]uint64{
|
||||
DASignersFunctionRegisteredEpoch: 10000,
|
||||
}
|
||||
|
||||
var KVGasConfig storetypes.GasConfig = storetypes.GasConfig{
|
||||
HasCost: 0,
|
||||
DeleteCost: 0,
|
||||
ReadCostFlat: 0,
|
||||
ReadCostPerByte: 0,
|
||||
WriteCostFlat: 0,
|
||||
WriteCostPerByte: 0,
|
||||
IterNextCostFlat: 0,
|
||||
}
|
||||
|
||||
var _ vm.PrecompiledContract = &DASignersPrecompile{}
|
||||
var _ precompiles_common.PrecompileCommon = &DASignersPrecompile{}
|
||||
|
||||
type DASignersPrecompile struct {
|
||||
abi abi.ABI
|
||||
@ -90,33 +81,37 @@ func (d *DASignersPrecompile) RequiredGas(input []byte) uint64 {
|
||||
return RequiredGasMax
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) IsTx(method string) bool {
|
||||
switch method {
|
||||
case DASignersFunctionUpdateSocket,
|
||||
DASignersFunctionRegisterSigner,
|
||||
DASignersFunctionRegisterNextEpoch:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) Abi() *abi.ABI {
|
||||
return &d.abi
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) KVGasConfig() storetypes.GasConfig {
|
||||
return storetypes.KVGasConfig()
|
||||
}
|
||||
|
||||
// Run implements vm.PrecompiledContract.
|
||||
func (d *DASignersPrecompile) Run(evm *vm.EVM, contract *vm.Contract, readonly bool) ([]byte, error) {
|
||||
// parse input
|
||||
if len(contract.Input) < 4 {
|
||||
return nil, vm.ErrExecutionReverted
|
||||
}
|
||||
method, err := d.abi.MethodById(contract.Input[:4])
|
||||
if err != nil {
|
||||
return nil, vm.ErrExecutionReverted
|
||||
}
|
||||
args, err := method.Inputs.Unpack(contract.Input[4:])
|
||||
ctx, stateDB, method, initialGas, args, err := precompiles_common.InitializePrecompileCall(d, evm, contract, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// get state db and context
|
||||
stateDB, ok := evm.StateDB.(*statedb.StateDB)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrGetStateDB)
|
||||
}
|
||||
ctx := stateDB.GetContext()
|
||||
// reset gas config
|
||||
ctx = ctx.WithKVGasConfig(KVGasConfig)
|
||||
initialGas := ctx.GasMeter().GasConsumed()
|
||||
|
||||
var bz []byte
|
||||
switch method.Name {
|
||||
// queries
|
||||
case DASignersFunctionParams:
|
||||
bz, err = d.Params(ctx, evm, method, args)
|
||||
case DASignersFunctionEpochNumber:
|
||||
bz, err = d.EpochNumber(ctx, evm, method, args)
|
||||
case DASignersFunctionQuorumCount:
|
||||
@ -135,11 +130,11 @@ func (d *DASignersPrecompile) Run(evm *vm.EVM, contract *vm.Contract, readonly b
|
||||
bz, err = d.RegisteredEpoch(ctx, evm, method, args)
|
||||
// txs
|
||||
case DASignersFunctionRegisterSigner:
|
||||
bz, err = d.RegisterSigner(ctx, evm, stateDB, method, args)
|
||||
bz, err = d.RegisterSigner(ctx, evm, stateDB, contract, method, args)
|
||||
case DASignersFunctionRegisterNextEpoch:
|
||||
bz, err = d.RegisterNextEpoch(ctx, evm, stateDB, method, args)
|
||||
bz, err = d.RegisterNextEpoch(ctx, evm, stateDB, contract, method, args)
|
||||
case DASignersFunctionUpdateSocket:
|
||||
bz, err = d.UpdateSocket(ctx, evm, stateDB, method, args)
|
||||
bz, err = d.UpdateSocket(ctx, evm, stateDB, contract, method, args)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package dasigners_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"github.com/0glabs/0g-chain/x/dasigners/v1/types"
|
||||
abci "github.com/cometbft/cometbft/abci/types"
|
||||
"github.com/consensys/gnark-crypto/ecc/bn254"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -23,7 +25,7 @@ import (
|
||||
"cosmossdk.io/math"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/evmos/ethermint/crypto/ethsecp256k1"
|
||||
"github.com/evmos/ethermint/x/evm/statedb"
|
||||
)
|
||||
|
||||
type DASignersTestSuite struct {
|
||||
@ -44,8 +46,7 @@ func (suite *DASignersTestSuite) AddDelegation(from string, to string, amount ma
|
||||
suite.Require().NoError(err)
|
||||
validator, found := suite.StakingKeeper.GetValidator(suite.Ctx, valAddr)
|
||||
if !found {
|
||||
consPriv, err := ethsecp256k1.GenerateKey()
|
||||
suite.Require().NoError(err)
|
||||
consPriv := ed25519.GenPrivKey()
|
||||
newValidator, err := stakingtypes.NewValidator(valAddr, consPriv.PubKey(), stakingtypes.Description{})
|
||||
suite.Require().NoError(err)
|
||||
validator = newValidator
|
||||
@ -73,8 +74,8 @@ func (suite *DASignersTestSuite) SetupTest() {
|
||||
suite.Assert().EqualValues(ok, true)
|
||||
suite.dasigners = precompile.(*dasignersprecompile.DASignersPrecompile)
|
||||
|
||||
suite.signerOne = testutil.GenSigner()
|
||||
suite.signerTwo = testutil.GenSigner()
|
||||
suite.signerOne = suite.GenSigner()
|
||||
suite.signerTwo = suite.GenSigner()
|
||||
abi, err := abi.JSON(strings.NewReader(dasignersprecompile.DASignersABI))
|
||||
suite.Assert().NoError(err)
|
||||
suite.abi = abi
|
||||
@ -100,7 +101,11 @@ func (suite *DASignersTestSuite) runTx(input []byte, signer *testutil.TestSigner
|
||||
precompiles := suite.EvmKeeper.GetPrecompiles()
|
||||
evm.WithPrecompiles(precompiles, []common.Address{suite.addr})
|
||||
|
||||
return suite.dasigners.Run(evm, contract, false)
|
||||
bz, err := suite.dasigners.Run(evm, contract, false)
|
||||
if err == nil {
|
||||
evm.StateDB.(*statedb.StateDB).Commit()
|
||||
}
|
||||
return bz, err
|
||||
}
|
||||
|
||||
func (suite *DASignersTestSuite) registerSigner(testSigner *testutil.TestSigner, sk *big.Int) *types.Signer {
|
||||
@ -309,8 +314,11 @@ func (suite *DASignersTestSuite) Test_DASigners() {
|
||||
suite.AddDelegation(suite.signerOne.HexAddr, suite.signerOne.HexAddr, keeper.BondedConversionRate.Mul(sdk.NewIntFromUint64(params.TokensPerVote)))
|
||||
suite.AddDelegation(suite.signerTwo.HexAddr, suite.signerOne.HexAddr, keeper.BondedConversionRate.Mul(sdk.NewIntFromUint64(params.TokensPerVote)).Mul(sdk.NewIntFromUint64(2)))
|
||||
// tx test
|
||||
fmt.Println("registering signer 1..")
|
||||
signer1 := suite.registerSigner(suite.signerOne, big.NewInt(1))
|
||||
fmt.Println("registering signer 2..")
|
||||
signer2 := suite.registerSigner(suite.signerTwo, big.NewInt(11))
|
||||
fmt.Println("signers registered..")
|
||||
suite.updateSocket(suite.signerOne, signer1)
|
||||
suite.updateSocket(suite.signerTwo, signer2)
|
||||
suite.registerEpoch(suite.signerOne, big.NewInt(1))
|
||||
@ -370,6 +378,25 @@ func (suite *DASignersTestSuite) Test_DASigners() {
|
||||
|
||||
}
|
||||
|
||||
func (suite *DASignersTestSuite) Test_Params() {
|
||||
input, err := suite.abi.Pack(
|
||||
"params",
|
||||
)
|
||||
suite.Assert().NoError(err)
|
||||
|
||||
bz, err := suite.runTx(input, suite.signerOne, 10000000)
|
||||
suite.Assert().NoError(err)
|
||||
out, err := suite.abi.Methods["params"].Outputs.Unpack(bz)
|
||||
suite.Assert().NoError(err)
|
||||
params := out[0].(dasignersprecompile.IDASignersParams)
|
||||
expected := types.DefaultGenesisState().Params
|
||||
suite.Assert().EqualValues(expected.TokensPerVote, params.TokensPerVote.Uint64())
|
||||
suite.Assert().EqualValues(expected.MaxVotesPerSigner, params.MaxVotesPerSigner.Uint64())
|
||||
suite.Assert().EqualValues(expected.MaxQuorums, params.MaxQuorums.Uint64())
|
||||
suite.Assert().EqualValues(expected.EpochBlocks, params.EpochBlocks.Uint64())
|
||||
suite.Assert().EqualValues(expected.EncodedSlices, params.EncodedSlices.Uint64())
|
||||
}
|
||||
|
||||
func TestKeeperSuite(t *testing.T) {
|
||||
suite.Run(t, new(DASignersTestSuite))
|
||||
}
|
||||
|
@ -4,13 +4,24 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
precompiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
)
|
||||
|
||||
func (d *DASignersPrecompile) Params(ctx sdk.Context, _ *vm.EVM, method *abi.Method, _ []interface{}) ([]byte, error) {
|
||||
params := d.dasignersKeeper.GetParams(ctx)
|
||||
return method.Outputs.Pack(IDASignersParams{
|
||||
TokensPerVote: big.NewInt(int64(params.TokensPerVote)),
|
||||
MaxVotesPerSigner: big.NewInt(int64(params.MaxVotesPerSigner)),
|
||||
MaxQuorums: big.NewInt(int64(params.MaxQuorums)),
|
||||
EpochBlocks: big.NewInt(int64(params.EpochBlocks)),
|
||||
EncodedSlices: big.NewInt(int64(params.EncodedSlices)),
|
||||
})
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) EpochNumber(ctx sdk.Context, _ *vm.EVM, method *abi.Method, _ []interface{}) ([]byte, error) {
|
||||
epochNumber, err := d.dasignersKeeper.GetEpochNumber(ctx)
|
||||
if err != nil {
|
||||
@ -49,9 +60,9 @@ func (d *DASignersPrecompile) GetSigner(ctx sdk.Context, _ *vm.EVM, method *abi.
|
||||
|
||||
func (d *DASignersPrecompile) IsSigner(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
}
|
||||
account := ToLowerHexWithoutPrefix(args[0].(common.Address))
|
||||
account := precompiles_common.ToLowerHexWithoutPrefix(args[0].(common.Address))
|
||||
_, found, err := d.dasignersKeeper.GetSigner(ctx, account)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -61,9 +72,9 @@ func (d *DASignersPrecompile) IsSigner(ctx sdk.Context, _ *vm.EVM, method *abi.M
|
||||
|
||||
func (d *DASignersPrecompile) RegisteredEpoch(ctx sdk.Context, _ *vm.EVM, method *abi.Method, args []interface{}) ([]byte, error) {
|
||||
if len(args) != 2 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
}
|
||||
account := ToLowerHexWithoutPrefix(args[0].(common.Address))
|
||||
account := precompiles_common.ToLowerHexWithoutPrefix(args[0].(common.Address))
|
||||
epoch := args[1].(*big.Int).Uint64()
|
||||
_, found, err := d.dasignersKeeper.GetRegistration(ctx, epoch, account)
|
||||
if err != nil {
|
||||
|
@ -1,24 +1,37 @@
|
||||
package dasigners
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/evmos/ethermint/x/evm/statedb"
|
||||
|
||||
precompiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
)
|
||||
|
||||
func (d *DASignersPrecompile) RegisterSigner(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
|
||||
func (d *DASignersPrecompile) RegisterSigner(
|
||||
ctx sdk.Context,
|
||||
evm *vm.EVM,
|
||||
stateDB *statedb.StateDB,
|
||||
contract *vm.Contract,
|
||||
method *abi.Method,
|
||||
args []interface{},
|
||||
) ([]byte, error) {
|
||||
msg, err := NewMsgRegisterSigner(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// validation
|
||||
sender := ToLowerHexWithoutPrefix(evm.Origin)
|
||||
sender := precompiles_common.ToLowerHexWithoutPrefix(evm.Origin)
|
||||
if sender != msg.Signer.Account {
|
||||
return nil, fmt.Errorf(ErrInvalidSender, sender, msg.Signer.Account)
|
||||
}
|
||||
if contract.CallerAddress != evm.Origin {
|
||||
return nil, errors.New(precompiles_common.ErrSenderNotOrigin)
|
||||
}
|
||||
// execute
|
||||
_, err = d.dasignersKeeper.RegisterSigner(sdk.WrapSDKContext(ctx), msg)
|
||||
if err != nil {
|
||||
@ -32,11 +45,22 @@ func (d *DASignersPrecompile) RegisterSigner(ctx sdk.Context, evm *vm.EVM, state
|
||||
return method.Outputs.Pack()
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) RegisterNextEpoch(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
|
||||
msg, err := NewMsgRegisterNextEpoch(args, ToLowerHexWithoutPrefix(evm.Origin))
|
||||
func (d *DASignersPrecompile) RegisterNextEpoch(
|
||||
ctx sdk.Context,
|
||||
evm *vm.EVM,
|
||||
stateDB *statedb.StateDB,
|
||||
contract *vm.Contract,
|
||||
method *abi.Method,
|
||||
args []interface{},
|
||||
) ([]byte, error) {
|
||||
msg, err := NewMsgRegisterNextEpoch(args, precompiles_common.ToLowerHexWithoutPrefix(evm.Origin))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// validation
|
||||
if contract.CallerAddress != evm.Origin {
|
||||
return nil, errors.New(precompiles_common.ErrSenderNotOrigin)
|
||||
}
|
||||
// execute
|
||||
_, err = d.dasignersKeeper.RegisterNextEpoch(sdk.WrapSDKContext(ctx), msg)
|
||||
if err != nil {
|
||||
@ -45,11 +69,22 @@ func (d *DASignersPrecompile) RegisterNextEpoch(ctx sdk.Context, evm *vm.EVM, st
|
||||
return method.Outputs.Pack()
|
||||
}
|
||||
|
||||
func (d *DASignersPrecompile) UpdateSocket(ctx sdk.Context, evm *vm.EVM, stateDB *statedb.StateDB, method *abi.Method, args []interface{}) ([]byte, error) {
|
||||
msg, err := NewMsgUpdateSocket(args, ToLowerHexWithoutPrefix(evm.Origin))
|
||||
func (d *DASignersPrecompile) UpdateSocket(
|
||||
ctx sdk.Context,
|
||||
evm *vm.EVM,
|
||||
stateDB *statedb.StateDB,
|
||||
contract *vm.Contract,
|
||||
method *abi.Method,
|
||||
args []interface{},
|
||||
) ([]byte, error) {
|
||||
msg, err := NewMsgUpdateSocket(args, precompiles_common.ToLowerHexWithoutPrefix(evm.Origin))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// validation
|
||||
if contract.CallerAddress != evm.Origin {
|
||||
return nil, errors.New(precompiles_common.ErrSenderNotOrigin)
|
||||
}
|
||||
// execute
|
||||
_, err = d.dasignersKeeper.UpdateSocket(sdk.WrapSDKContext(ctx), msg)
|
||||
if err != nil {
|
||||
|
@ -3,9 +3,8 @@ package dasigners
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
precopmiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
precompiles_common "github.com/0glabs/0g-chain/precompiles/common"
|
||||
dasignerstypes "github.com/0glabs/0g-chain/x/dasigners/v1/types"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
@ -27,6 +26,14 @@ type IDASignersSignerDetail = struct {
|
||||
PkG2 BN254G2Point "json:\"pkG2\""
|
||||
}
|
||||
|
||||
type IDASignersParams = struct {
|
||||
TokensPerVote *big.Int "json:\"tokensPerVote\""
|
||||
MaxVotesPerSigner *big.Int "json:\"maxVotesPerSigner\""
|
||||
MaxQuorums *big.Int "json:\"maxQuorums\""
|
||||
EpochBlocks *big.Int "json:\"epochBlocks\""
|
||||
EncodedSlices *big.Int "json:\"encodedSlices\""
|
||||
}
|
||||
|
||||
func NewBN254G1Point(b []byte) BN254G1Point {
|
||||
return BN254G1Point{
|
||||
X: new(big.Int).SetBytes(b[:32]),
|
||||
@ -65,7 +72,7 @@ func SerializeG2(p BN254G2Point) []byte {
|
||||
|
||||
func NewQueryQuorumCountRequest(args []interface{}) (*dasignerstypes.QueryQuorumCountRequest, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.QueryQuorumCountRequest{
|
||||
@ -75,21 +82,21 @@ func NewQueryQuorumCountRequest(args []interface{}) (*dasignerstypes.QueryQuorum
|
||||
|
||||
func NewQuerySignerRequest(args []interface{}) (*dasignerstypes.QuerySignerRequest, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
}
|
||||
accounts := args[0].([]common.Address)
|
||||
req := dasignerstypes.QuerySignerRequest{
|
||||
Accounts: make([]string, len(accounts)),
|
||||
}
|
||||
for i, account := range accounts {
|
||||
req.Accounts[i] = ToLowerHexWithoutPrefix(account)
|
||||
req.Accounts[i] = precompiles_common.ToLowerHexWithoutPrefix(account)
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func NewQueryEpochQuorumRequest(args []interface{}) (*dasignerstypes.QueryEpochQuorumRequest, error) {
|
||||
if len(args) != 2 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.QueryEpochQuorumRequest{
|
||||
@ -100,7 +107,7 @@ func NewQueryEpochQuorumRequest(args []interface{}) (*dasignerstypes.QueryEpochQ
|
||||
|
||||
func NewQueryEpochQuorumRowRequest(args []interface{}) (*dasignerstypes.QueryEpochQuorumRowRequest, error) {
|
||||
if len(args) != 3 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 3, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 3, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.QueryEpochQuorumRowRequest{
|
||||
@ -112,7 +119,7 @@ func NewQueryEpochQuorumRowRequest(args []interface{}) (*dasignerstypes.QueryEpo
|
||||
|
||||
func NewQueryAggregatePubkeyG1Request(args []interface{}) (*dasignerstypes.QueryAggregatePubkeyG1Request, error) {
|
||||
if len(args) != 3 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 3, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 3, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.QueryAggregatePubkeyG1Request{
|
||||
@ -131,19 +138,15 @@ func NewIDASignersSignerDetail(signer *dasignerstypes.Signer) IDASignersSignerDe
|
||||
}
|
||||
}
|
||||
|
||||
func ToLowerHexWithoutPrefix(addr common.Address) string {
|
||||
return strings.ToLower(addr.Hex()[2:])
|
||||
}
|
||||
|
||||
func NewMsgRegisterSigner(args []interface{}) (*dasignerstypes.MsgRegisterSigner, error) {
|
||||
if len(args) != 2 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 2, len(args))
|
||||
}
|
||||
|
||||
signer := args[0].(IDASignersSignerDetail)
|
||||
return &dasignerstypes.MsgRegisterSigner{
|
||||
Signer: &dasignerstypes.Signer{
|
||||
Account: ToLowerHexWithoutPrefix(signer.Signer),
|
||||
Account: precompiles_common.ToLowerHexWithoutPrefix(signer.Signer),
|
||||
Socket: signer.Socket,
|
||||
PubkeyG1: SerializeG1(signer.PkG1),
|
||||
PubkeyG2: SerializeG2(signer.PkG2),
|
||||
@ -154,7 +157,7 @@ func NewMsgRegisterSigner(args []interface{}) (*dasignerstypes.MsgRegisterSigner
|
||||
|
||||
func NewMsgRegisterNextEpoch(args []interface{}, account string) (*dasignerstypes.MsgRegisterNextEpoch, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.MsgRegisterNextEpoch{
|
||||
@ -165,7 +168,7 @@ func NewMsgRegisterNextEpoch(args []interface{}, account string) (*dasignerstype
|
||||
|
||||
func NewMsgUpdateSocket(args []interface{}, account string) (*dasignerstypes.MsgUpdateSocket, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf(precopmiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
return nil, fmt.Errorf(precompiles_common.ErrInvalidNumberOfArgs, 1, len(args))
|
||||
}
|
||||
|
||||
return &dasignerstypes.MsgUpdateSocket{
|
||||
|
18
precompiles/interfaces/.solhint.json
Normal file
18
precompiles/interfaces/.solhint.json
Normal file
@ -0,0 +1,18 @@
|
||||
{
|
||||
"extends": "solhint:recommended",
|
||||
"plugins": ["prettier"],
|
||||
"rules": {
|
||||
"avoid-low-level-calls": "off",
|
||||
"compiler-version": "off",
|
||||
"gas-custom-errors": "off",
|
||||
"explicit-types": ["warn", "implicit"],
|
||||
"func-visibility": ["warn", { "ignoreConstructors": true }],
|
||||
"max-states-count": "off",
|
||||
"no-empty-blocks": "off",
|
||||
"no-global-import": "off",
|
||||
"no-inline-assembly": "off",
|
||||
"not-rely-on-time": "off",
|
||||
"prettier/prettier": "error",
|
||||
"reason-string": "off"
|
||||
}
|
||||
}
|
88
precompiles/interfaces/contracts/IDASigners.sol
Normal file
88
precompiles/interfaces/contracts/IDASigners.sol
Normal file
@ -0,0 +1,88 @@
|
||||
// SPDX-License-Identifier: LGPL-3.0-only
|
||||
pragma solidity >=0.8.0;
|
||||
|
||||
library BN254 {
|
||||
struct G1Point {
|
||||
uint X;
|
||||
uint Y;
|
||||
}
|
||||
|
||||
// Encoding of field elements is: X[1] * i + X[0]
|
||||
struct G2Point {
|
||||
uint[2] X;
|
||||
uint[2] Y;
|
||||
}
|
||||
}
|
||||
|
||||
interface IDASigners {
|
||||
/*=== struct ===*/
|
||||
struct SignerDetail {
|
||||
address signer;
|
||||
string socket;
|
||||
BN254.G1Point pkG1;
|
||||
BN254.G2Point pkG2;
|
||||
}
|
||||
|
||||
struct Params {
|
||||
uint tokensPerVote;
|
||||
uint maxVotesPerSigner;
|
||||
uint maxQuorums;
|
||||
uint epochBlocks;
|
||||
uint encodedSlices;
|
||||
}
|
||||
|
||||
/*=== event ===*/
|
||||
event NewSigner(
|
||||
address indexed signer,
|
||||
BN254.G1Point pkG1,
|
||||
BN254.G2Point pkG2
|
||||
);
|
||||
event SocketUpdated(address indexed signer, string socket);
|
||||
|
||||
/*=== function ===*/
|
||||
function params() external view returns (Params memory);
|
||||
|
||||
function epochNumber() external view returns (uint);
|
||||
|
||||
function quorumCount(uint _epoch) external view returns (uint);
|
||||
|
||||
function isSigner(address _account) external view returns (bool);
|
||||
|
||||
function getSigner(
|
||||
address[] memory _account
|
||||
) external view returns (SignerDetail[] memory);
|
||||
|
||||
function getQuorum(
|
||||
uint _epoch,
|
||||
uint _quorumId
|
||||
) external view returns (address[] memory);
|
||||
|
||||
function getQuorumRow(
|
||||
uint _epoch,
|
||||
uint _quorumId,
|
||||
uint32 _rowIndex
|
||||
) external view returns (address);
|
||||
|
||||
function registerSigner(
|
||||
SignerDetail memory _signer,
|
||||
BN254.G1Point memory _signature
|
||||
) external;
|
||||
|
||||
function updateSocket(string memory _socket) external;
|
||||
|
||||
function registeredEpoch(
|
||||
address _account,
|
||||
uint _epoch
|
||||
) external view returns (bool);
|
||||
|
||||
function registerNextEpoch(BN254.G1Point memory _signature) external;
|
||||
|
||||
function getAggPkG1(
|
||||
uint _epoch,
|
||||
uint _quorumId,
|
||||
bytes memory _quorumBitmap
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (BN254.G1Point memory aggPkG1, uint total, uint hit);
|
||||
}
|
415
precompiles/interfaces/contracts/IStaking.sol
Normal file
415
precompiles/interfaces/contracts/IStaking.sol
Normal file
@ -0,0 +1,415 @@
|
||||
// SPDX-License-Identifier: LGPL-3.0-only
|
||||
pragma solidity >=0.8.0;
|
||||
|
||||
/**
|
||||
* @dev Description defines a validator description
|
||||
*/
|
||||
struct Description {
|
||||
string moniker;
|
||||
string identity;
|
||||
string website;
|
||||
string securityContact;
|
||||
string details;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev CommissionRates defines the initial commission rates to be used for creating
|
||||
* a validator.
|
||||
*/
|
||||
struct CommissionRates {
|
||||
uint rate; // 18 decimals
|
||||
uint maxRate; // 18 decimals
|
||||
uint maxChangeRate; // 18 decimals
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Commission defines the commission parameters.
|
||||
*/
|
||||
struct Commission {
|
||||
CommissionRates commissionRates;
|
||||
uint updateTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Validator defines a validator.
|
||||
*/
|
||||
struct Validator {
|
||||
address operatorAddress;
|
||||
string consensusPubkey;
|
||||
bool jailed;
|
||||
BondStatus status;
|
||||
uint tokens;
|
||||
uint delegatorShares; // 18 decimals
|
||||
Description description;
|
||||
int64 unbondingHeight;
|
||||
int64 unbondingTime;
|
||||
Commission commission;
|
||||
uint minSelfDelegation;
|
||||
int64 unbondingOnHoldRefCount;
|
||||
uint64[] unbondingIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Delegation represents the bond with tokens held by an account.
|
||||
*/
|
||||
struct Delegation {
|
||||
address delegatorAddress;
|
||||
address validatorAddress;
|
||||
uint shares; // 18 decimals
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev RedelegationResponse is equivalent to a Redelegation except that its entries
|
||||
* contain a balance in addition to shares which is more suitable for client
|
||||
* responses.
|
||||
*/
|
||||
struct DelegationResponse {
|
||||
Delegation delegation;
|
||||
uint balance;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev UnbondingDelegationEntry defines an unbonding object with relevant metadata.
|
||||
*/
|
||||
struct UnbondingDelegationEntry {
|
||||
int64 creationHeight;
|
||||
int64 completionTime;
|
||||
uint initialBalance;
|
||||
uint balance;
|
||||
uint64 unbondingId;
|
||||
int64 unbondingOnHoldRefCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev UnbondingDelegation stores all of a single delegator's unbonding bonds
|
||||
* for a single validator in an time-ordered list.
|
||||
*/
|
||||
struct UnbondingDelegation {
|
||||
address delegatorAddress;
|
||||
address validatorAddress;
|
||||
UnbondingDelegationEntry[] entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev RedelegationResponse is equivalent to a Redelegation except that its entries
|
||||
* contain a balance in addition to shares which is more suitable for client
|
||||
* responses.
|
||||
*/
|
||||
struct RedelegationResponse {
|
||||
Redelegation redelegation;
|
||||
RedelegationEntryResponse[] entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Redelegation contains the list of a particular delegator's redelegating bonds
|
||||
* from a particular source validator to a particular destination validator.
|
||||
*/
|
||||
struct Redelegation {
|
||||
address delegatorAddress;
|
||||
address validatorSrcAddress;
|
||||
address validatorDstAddress;
|
||||
RedelegationEntry[] entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev RedelegationEntry defines a redelegation object with relevant metadata.
|
||||
*/
|
||||
struct RedelegationEntry {
|
||||
int64 creationHeight;
|
||||
int64 completionTime;
|
||||
uint initialBalance;
|
||||
uint sharesDst; // 18 decimals
|
||||
uint64 unbondingId;
|
||||
int64 unbondingOnHoldRefCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev RedelegationEntryResponse is equivalent to a RedelegationEntry except that it
|
||||
* contains a balance in addition to shares which is more suitable for client
|
||||
* responses.
|
||||
*/
|
||||
struct RedelegationEntryResponse {
|
||||
RedelegationEntry redelegationEntry;
|
||||
uint balance;
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev Params defines the parameters for the x/staking module.
|
||||
*/
|
||||
struct Params {
|
||||
int64 unbondingTime;
|
||||
uint32 maxValidators;
|
||||
uint32 maxEntries;
|
||||
uint32 historicalEntries;
|
||||
string bondDenom;
|
||||
uint minCommissionRate; // 18 decimals
|
||||
}
|
||||
|
||||
/**
|
||||
* @dev BondStatus is the status of a validator.
|
||||
*/
|
||||
enum BondStatus {
|
||||
Unspecified,
|
||||
Unbonded,
|
||||
Unbonding,
|
||||
Bonded
|
||||
}
|
||||
|
||||
struct NullableUint {
|
||||
bool isNull;
|
||||
uint value;
|
||||
}
|
||||
|
||||
struct PageRequest {
|
||||
bytes key;
|
||||
uint64 offset;
|
||||
uint64 limit;
|
||||
bool countTotal;
|
||||
bool reverse;
|
||||
}
|
||||
|
||||
struct PageResponse {
|
||||
bytes nextKey;
|
||||
uint64 total;
|
||||
}
|
||||
|
||||
interface IStaking {
|
||||
/*=== cosmos tx ===*/
|
||||
|
||||
/**
|
||||
* @dev CreateValidator defines a method for creating a new validator for tx sender.
|
||||
* cosmos grpc: rpc CreateValidator(MsgCreateValidator) returns (MsgCreateValidatorResponse);
|
||||
*/
|
||||
function createValidator(
|
||||
Description memory description,
|
||||
CommissionRates memory commission,
|
||||
uint minSelfDelegation,
|
||||
string memory pubkey, // 0gchaind tendermint show-validator
|
||||
uint value
|
||||
) external;
|
||||
|
||||
/**
|
||||
* @dev EditValidator defines a method for editing an existing validator (tx sender).
|
||||
* cosmos grpc: rpc EditValidator(MsgEditValidator) returns (MsgEditValidatorResponse);
|
||||
*/
|
||||
function editValidator(
|
||||
Description memory description,
|
||||
NullableUint memory commissionRate,
|
||||
NullableUint memory minSelfDelegation
|
||||
) external;
|
||||
|
||||
/**
|
||||
* @dev Delegate defines a method for performing a delegation of coins from a delegator to a validator.abi
|
||||
* The delegator is tx sender.
|
||||
* cosmos grpc: rpc Delegate(MsgDelegate) returns (MsgDelegateResponse);
|
||||
*/
|
||||
function delegate(
|
||||
address validatorAddress,
|
||||
uint amount // in bond denom
|
||||
) external;
|
||||
|
||||
/**
|
||||
* @dev BeginRedelegate defines a method for performing a redelegationA
|
||||
* of coins from a delegator and source validator to a destination validator.
|
||||
* The delegator is tx sender.
|
||||
* cosmos grpc: rpc BeginRedelegate(MsgBeginRedelegate) returns (MsgBeginRedelegateResponse);
|
||||
*/
|
||||
function beginRedelegate(
|
||||
address validatorSrcAddress,
|
||||
address validatorDstAddress,
|
||||
uint amount // in bond denom
|
||||
) external returns (uint completionTime);
|
||||
|
||||
/**
|
||||
* @dev Undelegate defines a method for performing an undelegation from a
|
||||
* delegate and a validator.
|
||||
* The delegator is tx sender.
|
||||
* cosmos grpc: rpc Undelegate(MsgUndelegate) returns (MsgUndelegateResponse);
|
||||
*/
|
||||
function undelegate(
|
||||
address validatorAddress,
|
||||
uint amount // in bond denom
|
||||
) external returns (uint completionTime);
|
||||
|
||||
/**
|
||||
* @dev CancelUnbondingDelegation defines a method for performing canceling the unbonding delegation
|
||||
* and delegate back to previous validator.
|
||||
* The delegator is tx sender.
|
||||
* Since: cosmos-sdk 0.46
|
||||
* cosmos grpc: rpc CancelUnbondingDelegation(MsgCancelUnbondingDelegation) returns (MsgCancelUnbondingDelegationResponse);
|
||||
*/
|
||||
function cancelUnbondingDelegation(
|
||||
address validatorAddress,
|
||||
uint amount, // in bond denom
|
||||
uint creationHeight
|
||||
) external;
|
||||
|
||||
/**
|
||||
* @dev UpdateParams defines an operation for updating the x/staking module parameters.
|
||||
* Since: cosmos-sdk 0.47
|
||||
* grpc: rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse);
|
||||
*/
|
||||
// Skipped. This function is controlled by governance module.
|
||||
|
||||
/*=== cosmos query ===*/
|
||||
|
||||
/**
|
||||
* @dev Validators queries all validators that match the given status.
|
||||
* cosmos grpc: rpc Validators(QueryValidatorsRequest) returns (QueryValidatorsResponse);
|
||||
*/
|
||||
function validators(
|
||||
string memory status,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
Validator[] memory validators,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev Validator queries validator info for given validator address.
|
||||
* cosmos grpc: rpc Validator(QueryValidatorRequest) returns (QueryValidatorResponse);
|
||||
*/
|
||||
function validator(
|
||||
address validatorAddress
|
||||
) external view returns (Validator memory validator);
|
||||
|
||||
/**
|
||||
* @dev ValidatorDelegations queries delegate info for given validator.
|
||||
* cosmos grpc: rpc ValidatorDelegations(QueryValidatorDelegationsRequest) returns (QueryValidatorDelegationsResponse);
|
||||
*/
|
||||
function validatorDelegations(
|
||||
address validatorAddr,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
DelegationResponse[] memory delegationResponses,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev ValidatorUnbondingDelegations queries unbonding delegations of a validator.
|
||||
* cosmos grpc: rpc ValidatorUnbondingDelegations(QueryValidatorUnbondingDelegationsRequest) returns (QueryValidatorUnbondingDelegationsResponse);
|
||||
*/
|
||||
//
|
||||
function validatorUnbondingDelegations(
|
||||
address validatorAddr,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
UnbondingDelegation[] memory unbondingResponses,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev Delegation queries delegate info for given validator delegator pair.
|
||||
* cosmos grpc: rpc Delegation(QueryDelegationRequest) returns (QueryDelegationResponse);
|
||||
*/
|
||||
function delegation(
|
||||
address delegatorAddr,
|
||||
address validatorAddr
|
||||
) external view returns (Delegation memory delegation, uint balance);
|
||||
|
||||
/**
|
||||
* @dev UnbondingDelegation queries unbonding info for given validator delegator pair.
|
||||
* cosmos grpc: rpc UnbondingDelegation(QueryUnbondingDelegationRequest) returns (QueryUnbondingDelegationResponse);
|
||||
*/
|
||||
function unbondingDelegation(
|
||||
address delegatorAddr,
|
||||
address validatorAddr
|
||||
) external view returns (UnbondingDelegation memory unbond);
|
||||
|
||||
/**
|
||||
* @dev DelegatorDelegations queries all delegations of a given delegator address.
|
||||
*
|
||||
* cosmos grpc: rpc DelegatorDelegations(QueryDelegatorDelegationsRequest) returns (QueryDelegatorDelegationsResponse);
|
||||
*/
|
||||
function delegatorDelegations(
|
||||
address delegatorAddr,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
DelegationResponse[] memory delegationResponses,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev DelegatorUnbondingDelegations queries all unbonding delegations of a given delegator address.
|
||||
* cosmos grpc: rpc DelegatorUnbondingDelegations(QueryDelegatorUnbondingDelegationsRequest)
|
||||
*/
|
||||
function delegatorUnbondingDelegations(
|
||||
address delegatorAddr,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
UnbondingDelegation[] memory unbondingResponses,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev Redelegations queries redelegations of given address.
|
||||
*
|
||||
* grpc: rpc Redelegations(QueryRedelegationsRequest) returns (QueryRedelegationsResponse);
|
||||
*/
|
||||
function redelegations(
|
||||
address delegatorAddress,
|
||||
address srcValidatorAddress,
|
||||
address dstValidatorAddress,
|
||||
PageRequest calldata pageRequest
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
RedelegationResponse[] calldata redelegationResponses,
|
||||
PageResponse calldata paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev DelegatorValidators queries all validators info for given delegator address.
|
||||
* cosmos grpc: rpc DelegatorValidators(QueryDelegatorValidatorsRequest) returns (QueryDelegatorValidatorsResponse);
|
||||
*/
|
||||
function delegatorValidators(
|
||||
address delegatorAddr,
|
||||
PageRequest memory pagination
|
||||
)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
Validator[] memory validators,
|
||||
PageResponse memory paginationResult
|
||||
);
|
||||
|
||||
/**
|
||||
* @dev DelegatorValidator queries validator info for given delegator validator pair.
|
||||
* cosmos grpc: rpc DelegatorValidator(QueryDelegatorValidatorRequest) returns (QueryDelegatorValidatorResponse);
|
||||
*/
|
||||
function delegatorValidator(
|
||||
address delegatorAddr,
|
||||
address validatorAddr
|
||||
) external view returns (Validator memory validator);
|
||||
|
||||
/**
|
||||
* @dev Pool queries the pool info.
|
||||
* cosmos grpc: rpc Pool(QueryPoolRequest) returns (QueryPoolResponse);
|
||||
*/
|
||||
function pool()
|
||||
external
|
||||
view
|
||||
returns (uint notBondedTokens, uint bondedTokens);
|
||||
|
||||
/**
|
||||
* @dev Parameters queries the staking parameters.
|
||||
* cosmos grpc: rpc Params(QueryParamsRequest) returns (QueryParamsResponse);
|
||||
*/
|
||||
function params() external view returns (Params memory params);
|
||||
}
|
57
precompiles/interfaces/contracts/IWrappedA0GIBase.sol
Normal file
57
precompiles/interfaces/contracts/IWrappedA0GIBase.sol
Normal file
@ -0,0 +1,57 @@
|
||||
// SPDX-License-Identifier: LGPL-3.0-only
|
||||
pragma solidity >=0.8.0;
|
||||
|
||||
struct Supply {
|
||||
uint256 cap;
|
||||
uint256 initialSupply;
|
||||
uint256 supply;
|
||||
}
|
||||
|
||||
/**
|
||||
* @title WrappedA0GIBase is a precompile for wrapped a0gi(wA0GI), it enables wA0GI mint/burn native 0g token directly.
|
||||
*/
|
||||
interface IWrappedA0GIBase {
|
||||
/**
|
||||
* @dev set the wA0GI address.
|
||||
* It is designed to be called by governance module only so it's not implemented at EVM precompile side.
|
||||
* @param addr address of wA0GI
|
||||
*/
|
||||
// function setWA0GI(address addr) external;
|
||||
|
||||
/**
|
||||
* @dev get the wA0GI address.
|
||||
*/
|
||||
function getWA0GI() external view returns (address);
|
||||
|
||||
/**
|
||||
* @dev set the cap and initial supply for a minter.
|
||||
* It is designed to be called by governance module only so it's not implemented at EVM precompile side.
|
||||
* @param minter minter address
|
||||
* @param cap mint cap
|
||||
* @param initialSupply initial mint supply
|
||||
*/
|
||||
// function setMinterCap(address minter, uint256 cap, uint256 initialSupply) external;
|
||||
|
||||
/**
|
||||
* @dev get the mint supply of given address
|
||||
* @param minter minter address
|
||||
*/
|
||||
function minterSupply(address minter) external view returns (Supply memory);
|
||||
|
||||
/**
|
||||
* @dev mint a0gi to this precompile, add corresponding amount to minter's mint supply.
|
||||
* If sender's final mint supply exceeds its mint cap, the transaction will revert.
|
||||
* Can only be called by WA0GI.
|
||||
* @param minter minter address
|
||||
* @param amount amount to mint
|
||||
*/
|
||||
function mint(address minter, uint256 amount) external;
|
||||
|
||||
/**
|
||||
* @dev burn given amount of a0gi on behalf of minter, reduce corresponding amount from sender's mint supply.
|
||||
* Can only be called by WA0GI.
|
||||
* @param minter minter address
|
||||
* @param amount amount to burn
|
||||
*/
|
||||
function burn(address minter, uint256 amount) external;
|
||||
}
|
33
precompiles/interfaces/hardhat.config.ts
Normal file
33
precompiles/interfaces/hardhat.config.ts
Normal file
@ -0,0 +1,33 @@
|
||||
import "hardhat-abi-exporter";
|
||||
import { HardhatUserConfig } from "hardhat/types";
|
||||
|
||||
const config: HardhatUserConfig = {
|
||||
paths: {
|
||||
artifacts: "build/artifacts",
|
||||
cache: "build/cache",
|
||||
sources: "contracts",
|
||||
},
|
||||
solidity: {
|
||||
compilers: [
|
||||
{
|
||||
version: "0.8.20",
|
||||
settings: {
|
||||
evmVersion: "istanbul",
|
||||
optimizer: {
|
||||
enabled: true,
|
||||
runs: 200,
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
abiExporter: {
|
||||
path: "./abis",
|
||||
runOnCompile: true,
|
||||
clear: true,
|
||||
flat: true,
|
||||
format: "json",
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user