Compare commits

..

339 Commits

Author SHA1 Message Date
0g-wh
a1dc4a4a77
Merge pull request #74 from 0g-wh/dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
rebase to kava 0.27
2024-10-14 15:27:58 +08:00
0g-wh
840deea660 bump ethermint
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-10-14 07:23:48 +00:00
0g-wh
6c3360f102 fix tests 2024-09-27 14:26:07 +00:00
0g-wh
80b2dacbc2 fix evm denom 2024-09-27 07:54:20 +00:00
0g-wh
1152537679 rename kava to 0g 2024-09-27 03:04:45 +00:00
0g-wh
5bd6ac39ee fix 2024-09-26 09:42:10 +00:00
0g-wh
e0fcd07a08 Merge branch 'dev' of github.com:0glabs/0g-chain into dev 2024-09-25 15:43:41 +00:00
0g-wh
70ac592012 fix 2024-09-25 15:35:37 +00:00
0g-wh
de22587a5b fix 2024-09-25 15:31:20 +00:00
MiniFrenchBread
0c02c27a9d feat: DASigners change params (#67)
* feat: use gov to manage dasigners params

* feat: evm precompile query func

* test: unit test

* feat: remove epoch and block height hard check

* feat: add params event
2024-09-25 15:23:46 +00:00
0g-wh
4409bfc996 upgrade to ethermint v3.0.3 2024-09-25 15:23:46 +00:00
0g-wh
723241f484 Dockerfile for node 2024-09-25 15:23:35 +00:00
Solovyov1796
84d1a89bec fix wrong path of proto 2024-09-25 15:23:35 +00:00
Solovyov1796
94ddf20305 fix test build break 2024-09-25 15:23:35 +00:00
Solovyov1796
4ebbb886bf add conuncil back 2024-09-25 15:22:54 +00:00
MiniFrenchBread
57943ec0e0 fix: missing designers (#62)
* fix: add dasigners back

* test: remove manually initialize genesis

* feat: generate all missing epochs on begin block; only panic on smaller block height

* chore: add logs, fix EpochBlocks
2024-09-25 15:22:42 +00:00
MiniFrenchBread
04ce67f6a9 fix: designers; test: designers, precompile (#59)
* test: dasigners test

* test: genesis

* fix: abci; test: abci

* test: types

* test: keeper test

* test: util

* test: dasigners precompile

* chore: remove log
2024-09-25 15:22:10 +00:00
0g-wh
500e66733d fix wasm static link (#57) 2024-09-25 15:21:53 +00:00
0g-wh
8b691e61f8 Update upload-release-assets.yml 2024-09-25 15:21:53 +00:00
0g-wh
a0bdd2a142 add ibcwasmtypes to upgrades.go 2024-09-25 15:21:53 +00:00
0g-wh
53dcea2867 clean code 2024-09-25 15:20:46 +00:00
aeryz
d31a599c60 feat: add 08-wasm module
Signed-off-by: aeryz <abdullaheryz@protonmail.com>
2024-09-25 15:20:46 +00:00
0g-wh
07cf4ad258 fix cmd/keys 2024-09-25 15:19:44 +00:00
0g-wh
cb4e6e006e fix review issues 2024-09-25 15:19:44 +00:00
0g-wh
0e37d518ec prepare upgrade 2024-09-25 15:18:37 +00:00
0g-wh
822e374be6 rebase to kava cosmos 0.47 upgrade
rename

rename

tidy

clean code
2024-09-25 15:18:12 +00:00
0g-wh
9ca8359202 add Upload Release Assets workflow (#49)
* Create upload-release-assets.yml
2024-09-25 15:15:00 +00:00
Solovyov1796
32bcc7f4e3 update gitignore 2024-09-25 15:15:00 +00:00
Solovyov1796
f50a429527 merge testnet script 2024-09-25 15:15:00 +00:00
0g-wh
8ff2277450 add cosmovisor init script 2024-09-25 15:14:49 +00:00
Solovyov1796
cdf029c87a use 0glabs' cometbft 2024-09-25 15:14:48 +00:00
0xsatoshi
5f9325c2a0 enable vesting msgs 2024-09-25 15:14:33 +00:00
0xsatoshi
5f4f1851cb fix 2024-09-25 15:14:33 +00:00
0xsatoshi
4c28427089 fix 2024-09-25 15:14:33 +00:00
MiniFrenchBread
0f40b721ee refactor: epoch quorum storage 2024-09-25 15:14:33 +00:00
MiniFrenchBread
ec3733a2c6 feat: getQuorumRow 2024-09-25 15:14:33 +00:00
Solovyov1796
8df7625ac1 keep the EthSecp256k1 from cosmos for compatible 2024-09-25 15:14:33 +00:00
Solovyov1796
31c96eeb93 recover "rename denoms" in 3 files 2024-09-25 15:14:33 +00:00
Solovyov1796
ac1af4ae92 use chaincfg.MakeCoinForGasDenom 2024-09-25 15:14:33 +00:00
Solovyov1796
0d54bb9202 custom inflation calculation function 2024-09-25 15:14:33 +00:00
MiniFrenchBread
73158cd738 chore: remove tmp output 2024-09-25 15:14:33 +00:00
MiniFrenchBread
73b7d800a3 fix: decimals 2024-09-25 15:14:33 +00:00
MiniFrenchBread
27ddc91956 refactor: delegator 2024-09-25 15:14:33 +00:00
MiniFrenchBread
9962b7b0db fix: localtestnet.sh 2024-09-25 15:14:33 +00:00
MiniFrenchBread
f415fb1332 tidy 2024-09-25 15:14:33 +00:00
MiniFrenchBread
28b9c07e02 merge testnet/v0.1.x 2024-09-25 15:14:13 +00:00
Solovyov1796
45b7920181 remove the EthSecp256k1 from cosmos 2024-09-25 15:12:33 +00:00
Solovyov1796
56d337df16 rename denoms 2024-09-25 15:12:33 +00:00
Solovyov1796
85059d734e fix unit test 2024-09-25 15:12:16 +00:00
Solovyov1796
6b4e8415da remove module's legacy code 2024-09-25 15:11:45 +00:00
Solovyov1796
77b817f9b8 recover go mod file 2024-09-25 15:11:45 +00:00
Solovyov1796
46378d6157 remove das module 2024-09-25 15:11:28 +00:00
MiniFrenchBread
d0721fd172 feat: add get functions 2024-09-25 15:11:03 +00:00
MiniFrenchBread
5e34f5b289 fix: da signers begin block 2024-09-25 15:11:03 +00:00
MiniFrenchBread
d6bca1b221 feat: max quorum num 2024-09-25 15:11:03 +00:00
MiniFrenchBread
8dc89ad08d fix: quorum 2024-09-25 15:11:03 +00:00
MiniFrenchBread
e4989f10cd feat: quorum 2024-09-25 15:11:03 +00:00
MiniFrenchBread
9839a244bf fix: defaultGenesis 2024-09-25 15:11:03 +00:00
MiniFrenchBread
c9043ca158 feat: update dasigners proto api 2024-09-25 15:11:03 +00:00
MiniFrenchBread
8d48dadb02 fix: dasigners module 2024-09-25 15:11:03 +00:00
MiniFrenchBread
c80be7bbf7 chore: dependency 2024-09-25 15:11:03 +00:00
MiniFrenchBread
17fa02b554 feat: precompile 2024-09-25 15:10:47 +00:00
Solovyov1796
e348bd3748 rename the app name showed in usage (#10) 2024-09-25 15:08:30 +00:00
Peter Zhang
f44d7cc94d update max validator count 2024-09-25 15:08:30 +00:00
Peter Zhang
0bfbd114c9 update checkout branch 2024-09-25 15:08:30 +00:00
Solovyov1796
483a939724 update init-genesis.sh for devnet and testnet 2024-09-25 15:08:30 +00:00
Solovyov1796
547b0057c7 fix unit test 2024-09-25 15:08:30 +00:00
0xsatoshi
1da9745903 fix 2024-09-25 15:08:11 +00:00
0xsatoshi
e952a4a705 fix 2024-09-25 15:08:11 +00:00
Solovyov1796
69a4a6298e update scripts 2024-09-25 15:08:01 +00:00
Solovyov1796
d05c2f9563 update env vars 2024-09-25 15:08:01 +00:00
Peter Zhang
82f54a1974 modify deploy script 2024-09-25 15:08:01 +00:00
Solovyov1796
3f1140dcd4 update 2024-09-25 15:08:01 +00:00
Solovyov1796
849c95d93e fix unit test for x 2024-09-25 15:08:01 +00:00
Solovyov1796
eee50a3f75 add scripts for devnet 2024-09-25 15:07:54 +00:00
Solovyov1796
1d2820a3b6 fix panic 2024-09-25 15:07:54 +00:00
Solovyov1796
950e4766d2 merge script from branch v0.1.0 2024-09-25 15:07:54 +00:00
Solovyov1796
91698d388f fix test 2024-09-25 15:07:54 +00:00
Solovyov1796
4cf57457a7 add 0g code 2024-09-25 15:07:54 +00:00
Solovyov1796
337f1c5cc8 rename kava 2024-09-25 15:07:33 +00:00
Solovyov1796
a437523ea2 add vrf 2024-09-25 15:06:53 +00:00
Solovyov1796
77ec52e16b revise file structure in cmd 2024-09-25 15:04:55 +00:00
Solovyov1796
b1365fb792 add chaincfg to save all configration of chain 2024-09-25 15:04:10 +00:00
Solovyov1796
d61f4e94fd update build file 2024-09-25 15:04:10 +00:00
Solovyov1796
8bc3b15c46 revise proto files 2024-09-25 15:03:21 +00:00
Solovyov1796
e8008c9a3a remove useless modules 2024-09-25 15:03:21 +00:00
Solovyov1796
28fa4b7993 rename go mod path 2024-09-25 15:00:59 +00:00
Peter Zhang
bd0acdbd4b add deploy scripts 2024-09-25 14:57:06 +00:00
Peter Zhang
7f62518464 add deploy scripts 2024-09-25 14:57:06 +00:00
Draco
0b4c5da294 Add v26 migrate docs (#1863)
* add migrate docs

* change date to TDB
2024-09-25 14:57:00 +00:00
Nick DeLuca
ad93042155 Use IAVL 0.20.x for v0.26.x release, update deps (#1862)
* revert back to iavl v1 to avoid hash changes on new modules like
we are seeing on the v0.24.x to v0.25.x upgrade block.  Also, add
replace statements for exp and rapid to match upstream cosmos-sdk

* fix sharding prune store logging and error return.

* add comment to clarify WithKeyTable usage
2024-09-25 14:56:40 +00:00
Draco
a7dd451e44 Add packet-forwarding store to upgrade (#1856) 2024-09-25 14:56:13 +00:00
Kevin Davis
c99879e9f7 add legacy rest removal notice (#1857) 2024-09-25 14:55:15 +00:00
Levi Schoen
820a676709 upgrade to iavl v1 (#1845) 2024-09-25 14:55:15 +00:00
MiniFrenchBread
27feb30bb9
feat: DASigners change params (#67)
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
* feat: use gov to manage dasigners params

* feat: evm precompile query func

* test: unit test

* feat: remove epoch and block height hard check

* feat: add params event
2024-09-24 09:39:16 +08:00
0g-wh
17bd9a6c71
Merge pull request #72 from 0g-wh/dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
upgrade to ethermint v3.0.3
2024-08-23 11:39:57 +08:00
0g-wh
c172fb3c55 upgrade to ethermint v3.0.3 2024-08-23 11:36:18 +08:00
0g-wh
0eb947b594
Merge pull request #71 from 0g-wh/docker-node
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
Dockerfile for node
2024-08-22 13:17:11 +08:00
0g-wh
a2746657a1 Dockerfile for node 2024-08-22 13:14:58 +08:00
drklee3
493ce0516f
feat: Add upgrade handler, fractional balances & reserve transfer (#1966)
Add upgrade handler
Migrates from x/evmutil to x/precisebank:
- Fractional balances
- Reserve funds
  - Mints or burns coins to ensure fractional balances are fully backed.

Initialize remainder if necessary to ensure valid state.
E2E test with fixed kvtool
2024-08-21 18:01:29 -07:00
drklee3
65d091d458
fix(x/precisebank): Avoid blocked addr error on SendCoinsFromAccountToModule (#2012) 2024-08-21 17:29:04 -07:00
Nick DeLuca
8023be0067
chore(nodejs): Update to active LTS v20 (#2011)
* chore(nodejs): Use active LTS v20 for nodejs

This updates nodejs to use the active LTS v20 from the maintenance LTS
v18.  This expands compatibility with packages, adds native support for
more features, etc.

In addition, the ci-seed-chain workflow was updated to use the
.tool-version file instead of hardcoded version.

* chore(hardhat): Update hardhat for nodejs 20 support
2024-08-20 11:50:03 -07:00
Draco
eaacd83de5
chore: update internal testnet kava version (#2010) 2024-08-19 14:11:55 -04:00
Draco
6862cde560
fix: revert protonet voting period to 10min and change internal testnet period to 7d (#2009) 2024-08-19 12:51:21 -04:00
Draco
b8e6e584b8
chore(ci): update internal testnet genesis and seed to support committee voting (#2008)
* chore(ci): update internal testnet genesis and seed to support committee voting

* chore(ci): update gov proposal voting period to 7 days

* chore: use auto gas calculation
2024-08-19 12:23:51 -04:00
Evgeniy Scherbina
27d63f157c
ci: dispatch run-rosetta-tests event to rosetta-kava (#2007) 2024-08-15 12:00:24 -04:00
Robert Pirtle
7aede3390d
ci: add semantic pull request title linting (#2006)
enforces following conventional commit standard in all PR titles
2024-08-14 10:22:46 -07:00
Robert Pirtle
49f7be8486
docs: update latest mainnet kava version (#2005) 2024-08-13 12:49:12 -07:00
Nick DeLuca
fbce24abef
chore(precisebank): Add queries to swagger (#2004)
This adds the precisebank protobuf generated swagger documentation to
the swagger combine configuration in order to be rendered in the
swagger.yaml file.
2024-08-13 12:32:18 -07:00
Nick DeLuca
7e50ce8142
chore: Add ethermint to swagger (#2002)
This adds the upstream ethermint swagger file to the proto-deps and adds
the swagger combine config to include it in the kava generated swagger.

Run `make proto-all` to update.
2024-08-13 07:34:59 -07:00
Solovyov1796
43dd1a7c41
Merge pull request #66 from Solovyov1796/local-dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
fix wrong path of proto
2024-08-12 15:29:46 +08:00
Solovyov1796
72d30dde8a fix wrong path of proto 2024-08-12 14:55:53 +08:00
Solovyov1796
c18ca45188
Merge pull request #65 from Solovyov1796/local-dev
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
fix test build break
2024-08-12 11:51:46 +08:00
Solovyov1796
f50d847c4f fix test build break 2024-08-12 11:45:01 +08:00
0g-wh
568ff70ad7
Merge pull request #63 from Solovyov1796/local-dev
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
add conuncil back
2024-08-10 10:25:04 +08:00
MiniFrenchBread
1355bd6ab1
fix: missing designers (#62)
* fix: add dasigners back

* test: remove manually initialize genesis

* feat: generate all missing epochs on begin block; only panic on smaller block height

* chore: add logs, fix EpochBlocks
2024-08-10 10:24:11 +08:00
Solovyov1796
ceb4d774ff add conuncil back 2024-08-10 10:18:21 +08:00
Nick DeLuca
ab3cf7c994
feat!(precompile): Add registry and genesis tests (#1999)
* feat!(precompile): Add registry and genesis tests

Based on evgeniy-scherbina's work, this adds a new precompile module
which defines a contract moudule with an example noop contract that
will be will be used for implementing test functions.  In addition,
it defines a registry module that instantiates stateful precompile
contracts and associates them with an address in a global registry
defined in kava-labs/go-ethereum. See precompile/README.md for more
information.

The kava-labs/go-ethereum and kava-labs/etheremint replace statements
are updated to support these changes as well as an update to kvtool
which includes genesis state for the registry.NoopContractAddress and
initializes the contract's EthAccount with a non-zero sequence and
codehash set to keccak256(0x01), and sets the contract code to 0x01.
See tests/e2e/e2e_precompile_genesis_test.go for an overview of the
expected genesis state for an enabled precompile.

Co-authored-by: evgeniy-scherbina <evgeniy.shcherbina.es@gmail.com>

* chore: Precompile readme improvements

This fixes a typo (import -> important) and uses package terminology
instead of unclear module terminology.  This aligns best with golang
terminology were modules and packages are distinctly different and
modules are defined using go.mod.

* chore: Improve noop contract godoc

Add a more meaningful godoc where the noop contract is constructed.

* chore(e2e): Improve comments around query checks

Improve the clarity of comments around where the error is checked for
accounts and why it is not checked directly.

In addition, improve comment on why both grpc and rpc code is fetched
and where they are used.

---------

Co-authored-by: evgeniy-scherbina <evgeniy.shcherbina.es@gmail.com>
2024-08-09 09:55:31 -07:00
0g-wh
8bd14a6c00
Merge pull request #60 from 0g-wh/dev
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
fix mint denom in upgrades.go
2024-08-09 14:17:29 +08:00
0g-wh
78caabebe1 fix mint denom in upgrades.go 2024-08-09 14:11:35 +08:00
MiniFrenchBread
f4408080e6
fix: designers; test: designers, precompile (#59)
* test: dasigners test

* test: genesis

* fix: abci; test: abci

* test: types

* test: keeper test

* test: util

* test: dasigners precompile

* chore: remove log
2024-08-09 13:34:37 +08:00
0g-wh
1d2504b085
Merge pull request #58 from 0glabs/testnet/v0.3.x
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
merge wasm
2024-08-09 09:47:25 +08:00
cuiweiyuan
33932e8ad6
chore: fix some function names (#1998)
Signed-off-by: cuiweiyuan <cuiweiyuan@aliyun.com.>
2024-08-08 06:38:35 -07:00
Nick DeLuca
ab10ce628c
chore(lint): Disable funlen for test functions (#1993)
This adds a regular expression that matches `func Test...` or
`func (suite *Suite) Test...` style functions and disables the length
check. An example from e2e tests that failed lint:

`func (suite *IntegrationTestSuite) TestEip712BasicMessageAuthorization()`
2024-08-07 13:25:18 -07:00
sesheffield
edf2935f31
chore(prs): add codeowners (#1995) 2024-08-07 15:50:24 -04:00
Nick DeLuca
a4583be44b
fix(docker): Ignore local build and lint cache (#1994)
These should not be replicated to docker contexts as they are local to
the build host.  In addition, the golangci-lint currently doesn't assume
the host user nor add other group read permissions when writing files,
so this causes permission errors when other docker processes attempt to
copy the files.
2024-08-07 11:20:17 -07:00
sesheffield
3c4d91a443
chore(linter): enable gosec on golangci linter and reformat config file (#1983)
add in gosec to the golangci.yml config file and reorder the linters-settings to be in alphabetical order
2024-08-07 12:24:27 -04:00
Nick DeLuca
774e2efce8
chore(lint): Update local make lint to match CI (#1991)
* chore(lint): Update local make lint to match CI

This updates the `make lint` behavior to match the command being
run in CI.

In addition, we refactor the make lint command to use docker in order to
to ease cross platform install, use a local build cache that integrates
with make clean, use the same version file, and encapsulate the logic in
its own make include.

We also remove the old lint logic as to not introduce a duplicate target
and avoid confusion from a difference in behavior.

While solutions like act for running github actions locally work, it is
not as straightfoward, is slower, and uses the local git repository
instead of a clone (though I am not sure how the checkout step works
within act).

* fix(lint): Use shared timeout with .golangci.yml

Instead of using a local and different timeout in the lint makefile
target we can rely on golangci to load this configuration from
.golangci.yml instead and share this setting with CI.

* fix(lint): Fix golangci-lint cache mount path

This uses the correct cache dir default of ~/.cache enabling use
of cache between lint calls.

* fix(lint): Fix lint caching

This includes a couple fixes - 1) It adds support for full caching of go
mod and go build, speeding up the lint process quite a bit.  And 2) does
not mix lint cache with make clean files -- the docker container creates
root owned files that cause make clean to error and we choose not to
require make clean to run with higher permissions.  The cache must be
deleted manually.
2024-08-05 10:13:17 -07:00
0g-wh
30728b75e9
fix wasm static link (#57)
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
2024-08-05 23:11:18 +08:00
0g-wh
e822edfe8d
Merge pull request #56 from 0glabs/dev-047
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
update go version of workflow to 1.21
2024-08-05 09:58:29 +08:00
Solovyov1796
d2406d4efd
Merge pull request #55 from 0glabs/update-workflow-go-version-to-1.21
Some checks failed
Continuous Integration (Commit) / lint (push) Has been cancelled
Update go version of workflow to 1.21
2024-08-05 09:54:58 +08:00
0g-wh
e8e2e3abd3
Update upload-release-assets.yml 2024-08-05 09:53:17 +08:00
0g-wh
2da75122b3
Merge pull request #54 from 0g-wh/wasm
add ibcwasmtypes to upgrades.go
2024-08-05 09:10:39 +08:00
0g-wh
ed5646b71a add ibcwasmtypes to upgrades.go 2024-08-04 19:39:00 +08:00
0g-wh
4aa67c9efb
Merge pull request #53 from 0g-wh/wasm
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
feat: add 08-wasm module
2024-08-04 19:17:20 +08:00
0g-wh
bd3d947e9b clean code 2024-08-04 15:25:53 +08:00
aeryz
a8c86f7f8b feat: add 08-wasm module
Signed-off-by: aeryz <abdullaheryz@protonmail.com>
2024-08-04 15:23:35 +08:00
0g-wh
058d9f2285
Merge pull request #52 from 0glabs/dev-047
upgrade to kava 26 / cosmos v0.47
2024-08-04 14:20:12 +08:00
0g-wh
3709a23632 fix cmd/keys
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
2024-08-04 14:13:11 +08:00
0g-wh
04dfd2a2e9 fix review issues 2024-08-04 13:56:43 +08:00
0g-wh
932664efac prepare upgrade
Some checks are pending
Continuous Integration (Commit) / lint (push) Waiting to run
2024-08-03 15:05:14 +08:00
0g-wh
c949c06fce rebase to kava cosmos 0.47 upgrade
rename

rename

tidy

clean code
2024-08-03 15:05:14 +08:00
0g-wh
14ca626365 add Upload Release Assets workflow (#49)
* Create upload-release-assets.yml
2024-08-03 15:05:14 +08:00
Solovyov1796
008b421fd2 update gitignore 2024-08-03 15:05:14 +08:00
Solovyov1796
ac1fd4360d merge testnet script 2024-08-03 15:05:14 +08:00
0g-wh
d35b277cab add cosmovisor init script 2024-08-03 15:05:14 +08:00
Solovyov1796
2c248aff18 use 0glabs' cometbft 2024-08-03 15:05:14 +08:00
0xsatoshi
4ab0d3ee27 enable vesting msgs 2024-08-03 15:05:14 +08:00
0xsatoshi
8d761147a2 fix 2024-08-03 15:05:14 +08:00
0xsatoshi
4917eb5976 fix 2024-08-03 15:05:14 +08:00
MiniFrenchBread
f8d5f29078 refactor: epoch quorum storage 2024-08-03 15:05:14 +08:00
MiniFrenchBread
efee71e2e6 feat: getQuorumRow 2024-08-03 15:05:14 +08:00
Solovyov1796
6190839ddc keep the EthSecp256k1 from cosmos for compatible 2024-08-03 15:05:14 +08:00
Solovyov1796
c1efdaa507 recover "rename denoms" in 3 files 2024-08-03 15:05:14 +08:00
Solovyov1796
62c5eaf515 use chaincfg.MakeCoinForGasDenom 2024-08-03 15:05:14 +08:00
Solovyov1796
48c349c127 custom inflation calculation function 2024-08-03 15:05:14 +08:00
MiniFrenchBread
4b09c6cd37 chore: remove tmp output 2024-08-03 15:05:14 +08:00
MiniFrenchBread
02e96e6424 fix: decimals 2024-08-03 15:05:14 +08:00
MiniFrenchBread
b3a8343a19 refactor: delegator 2024-08-03 15:05:14 +08:00
MiniFrenchBread
c6e4563cac fix: localtestnet.sh 2024-08-03 15:05:14 +08:00
MiniFrenchBread
7d4828f415 tidy 2024-08-03 15:05:14 +08:00
MiniFrenchBread
1fbf607360 merge testnet/v0.1.x 2024-08-03 15:05:14 +08:00
Solovyov1796
154dd509ee remove the EthSecp256k1 from cosmos 2024-08-03 15:05:14 +08:00
Solovyov1796
986172d3a7 rename denoms 2024-08-03 15:05:13 +08:00
Solovyov1796
4fabd4d011 fix unit test 2024-08-03 15:05:03 +08:00
Solovyov1796
e25cc5f531 remove module's legacy code 2024-08-03 15:04:55 +08:00
Solovyov1796
6202424c27 recover go mod file 2024-08-03 15:04:55 +08:00
Solovyov1796
c7ed82b4f4 remove das module 2024-08-03 15:04:55 +08:00
MiniFrenchBread
a3f3aaaecc feat: add get functions 2024-08-03 15:04:55 +08:00
MiniFrenchBread
701a0ba97e fix: da signers begin block 2024-08-03 15:04:55 +08:00
MiniFrenchBread
1e0194262d feat: max quorum num 2024-08-03 15:04:55 +08:00
MiniFrenchBread
e3e47e5e2f fix: quorum 2024-08-03 15:04:55 +08:00
MiniFrenchBread
93cceff23c feat: quorum 2024-08-03 15:04:55 +08:00
MiniFrenchBread
1680cd6b32 fix: defaultGenesis 2024-08-03 15:04:55 +08:00
MiniFrenchBread
284181edc9 feat: update dasigners proto api 2024-08-03 15:04:55 +08:00
MiniFrenchBread
422e940c28 fix: dasigners module 2024-08-03 15:04:55 +08:00
MiniFrenchBread
bb5d5130cf chore: dependency 2024-08-03 15:04:55 +08:00
MiniFrenchBread
b53783447b feat: precompile 2024-08-03 15:04:55 +08:00
Solovyov1796
6f2b402294 rename the app name showed in usage (#10) 2024-08-03 15:04:55 +08:00
Peter Zhang
82139161be update max validator count 2024-08-03 15:04:55 +08:00
Peter Zhang
4798eea3ff update checkout branch 2024-08-03 15:04:55 +08:00
Solovyov1796
ee01ac7a7b update init-genesis.sh for devnet and testnet 2024-08-03 15:04:55 +08:00
Solovyov1796
47cee39c64 fix unit test 2024-08-03 15:04:53 +08:00
Solovyov1796
521f558f5d recv both cosmos denom and evm denom from bank keeper 2024-08-03 15:04:48 +08:00
0xsatoshi
adb09a7c82 fix 2024-08-03 15:04:48 +08:00
0xsatoshi
3da66a87e6 fix 2024-08-03 15:04:48 +08:00
Solovyov1796
19a202669a update scripts 2024-08-03 15:04:48 +08:00
Solovyov1796
eaf81e9465 update env vars 2024-08-03 15:04:48 +08:00
Peter Zhang
817a8a151a modify deploy script 2024-08-03 15:04:48 +08:00
Solovyov1796
78114aed73 update 2024-08-03 15:04:48 +08:00
Solovyov1796
34a76200f0 fix unit test for x 2024-08-03 15:04:48 +08:00
Solovyov1796
fe8c36f891 add scripts for devnet 2024-08-03 15:04:48 +08:00
Solovyov1796
8357cc2191 fix panic 2024-08-03 15:04:48 +08:00
Solovyov1796
f8e102fbd5 merge script from branch v0.1.0 2024-08-03 15:04:48 +08:00
Solovyov1796
d1c3f36bbe fix test 2024-08-03 15:04:48 +08:00
Solovyov1796
89d3829646 add 0g code 2024-08-03 15:04:48 +08:00
Solovyov1796
ffad9dbdd5 rename kava 2024-08-03 15:04:46 +08:00
Solovyov1796
0bbaeb0393 add vrf 2024-08-03 15:04:33 +08:00
Solovyov1796
cc4f72b165 revise file structure in cmd 2024-08-03 15:04:33 +08:00
Solovyov1796
6a197a5db5 add chaincfg to save all configration of chain 2024-08-03 15:04:33 +08:00
Solovyov1796
e787cd052e update build file 2024-08-03 15:04:33 +08:00
Solovyov1796
2454c94596 revise proto files 2024-08-03 15:04:33 +08:00
Solovyov1796
454733f55b remove useless modules 2024-08-03 15:04:32 +08:00
Solovyov1796
14e1e3a7d4 rename go mod path 2024-08-03 15:03:23 +08:00
Peter Zhang
ca3ab93657 add deploy scripts 2024-08-03 15:03:01 +08:00
Peter Zhang
c8bf4644c1 add deploy scripts 2024-08-03 15:03:01 +08:00
Nick DeLuca
272f82ec99
chore(lint): Enable localmodule for import linter (#1989)
We use three sections through-out the codebase -- standard, default, and
localmodule.  This change updates the linter to enforce this pattern as
files are added or modified.
2024-08-02 12:08:38 -07:00
Nick DeLuca
e198eeb3b4
fix(e2e): Use docker compose V2 for kvtool and Makefile (#1990)
* chore(Makefile): Migrate to docker compose v2

Use V2 `docker compose` instead of V1 `docker-compose`

* chore(kvtool): Update to latest master commit
2024-08-02 10:45:57 -07:00
drklee3
bbfaa54ddf
chore(deps): Bump cometbft to v0.37.9-kava.1 (#1988)
This resolves ASA-2024-008. Patched in 0.37.7 but that version has a breaking change which was reverted in 0.37.8.

The replace for golang.org/x/exp prevents breaking change in slices package causing compile error with gogoproto
2024-08-02 09:27:28 -07:00
dependabot[bot]
4e66a56208
chore(deps-dev): bump undici from 5.22.1 to 5.28.4 in /contracts (#1925)
Bumps [undici](https://github.com/nodejs/undici) from 5.22.1 to 5.28.4.
- [Release notes](https://github.com/nodejs/undici/releases)
- [Commits](https://github.com/nodejs/undici/compare/v5.22.1...v5.28.4)

---
updated-dependencies:
- dependency-name: undici
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-31 17:24:20 -07:00
dependabot[bot]
9c629ad113
chore(deps-dev): bump braces from 3.0.2 to 3.0.3 in /contracts (#1941)
Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3.
- [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md)
- [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3)

---
updated-dependencies:
- dependency-name: braces
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-31 17:17:25 -07:00
sesheffield
b0d737d354
chore(linter): update Golang linter configuration (#1977)
* update golinter + add go sec

* add golangci.yml
Co-authored-by: @faddat jacobgadikian@gmail.com

* update

* update

* fix release version

* remove sec, update from pr comments, cleanup golangci.yml to not break on master

* remove @faddat, not valid codeowner

* remove unnecessary make command

* remove incorrectly named golangci.yml file

* add --new-from-rev

* use master instead of main

* remove extra echo

* set the exports properly

* add setup go to work with act

* add some docs to golangci linter

* test new-from-rev

* enable more linters, but app.go back

* verify issues-exit-code being gone

* put it back

* enable more linters

* remove exclusions
2024-07-31 16:23:44 -04:00
dependabot[bot]
a8df31b31a
chore(deps): bump github.com/btcsuite/btcd from 0.23.4 to 0.24.0 (#1900)
* chore(deps): bump github.com/btcsuite/btcd from 0.23.4 to 0.24.0

Bumps [github.com/btcsuite/btcd](https://github.com/btcsuite/btcd) from 0.23.4 to 0.24.0.
- [Release notes](https://github.com/btcsuite/btcd/releases)
- [Changelog](https://github.com/btcsuite/btcd/blob/master/CHANGES)
- [Commits](https://github.com/btcsuite/btcd/compare/v0.23.4...v0.24.0)

---
updated-dependencies:
- dependency-name: github.com/btcsuite/btcd
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore: bump github.com/btcsuite/btcd in e2e-ibc

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: drklee3 <derrick@dlee.dev>
2024-07-31 12:52:55 -07:00
dependabot[bot]
6243944db6
chore(deps): bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5 (#1953)
* chore(deps): bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5

Bumps [github.com/hashicorp/go-getter](https://github.com/hashicorp/go-getter) from 1.7.1 to 1.7.5.
- [Release notes](https://github.com/hashicorp/go-getter/releases)
- [Changelog](https://github.com/hashicorp/go-getter/blob/main/.goreleaser.yml)
- [Commits](https://github.com/hashicorp/go-getter/compare/v1.7.1...v1.7.5)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/go-getter
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>

* chore: bump github.com/hashicorp/go-getter from 1.7.1 to 1.7.5 in e2e-ibc

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: drklee3 <derrick@dlee.dev>
2024-07-31 10:27:46 -07:00
sesheffield
7f339d20ca
fix(insolvency) kava lend insolvency check bug implementation (#1982)
* add additional tests that attempt to borrow funds from the insolvent market(should fail), and attempt to borrow funds from the not insolvent market (it will fail, but shouldn't). The not insolvent market should continue to processs borrows

* remove unused code

* make tests less specific for string contains

* add new get total reserves for denoms functionality

* start utilizing GetTotalReservesForDenoms in ValidateBorrow

* update tests for Borrow to not fail when borrowing from an insolvent market

* use get total reseves in GetTotalReservesForDenoms for reusability

* refactor GetTotalReservesForDenoms to GetTotalReservesByCoinDenoms for more clarity

* change the structure for new and old tests and add more verbosity for other tests

* remove print

* remove unneeded code

* add paren

* adjust structure again after initial PR

* remove duplicate test case with invalid test name, and update to use error contains in places where it was validating if true for strings contains

* no need for keeper method
2024-07-30 13:08:48 -04:00
sesheffield
916ec6d30c
test(insolvency): add tests for Kava lend insolvency check (#1981)
* add additional tests that attempt to borrow funds from the insolvent market(should fail), and attempt to borrow funds from the not insolvent market (it will fail, but shouldn't). The not insolvent market should continue to processs borrows

* remove unused code

* make tests less specific for string contains

* change the structure for new and old tests and add more verbosity for other tests

* remove print

* remove unneeded code

* add paren

* remove duplicate test case with invalid test name, and update to use error contains in places where it was validating if true for strings contains

---------

Co-authored-by: Sam Sheffield <sam.sheffield@kavalabs.io>
2024-07-29 20:51:08 -04:00
Nick DeLuca
b4c04656ab
docs(x/precisebank): Add spec for logic (#1969) 2024-07-29 09:42:17 -07:00
drklee3
837e57ec2e
docs(x/evmutil): Remove akava and evmbankkeeper from spec (#1968) 2024-07-26 14:01:53 -07:00
drklee3
5f802fcfbd
feat(x/precisebank): Emit coin_spent and coin_received events (#1978) 2024-07-26 13:05:49 -07:00
riyueguang
f229afce1a
chore: fix some comments (#1980)
Signed-off-by: riyueguang <rustruby@outlook.com>
2024-07-26 12:38:06 -07:00
drklee3
608f70b20a
feat: Add gRPC query for remainder and account fractional balance (#1971) 2024-07-25 13:36:36 -07:00
Evgeniy Scherbina
74f76d125c
Upgrade opendb (#1972) 2024-07-19 15:44:34 -04:00
drklee3
3853e276a6
feat(x/precisebank): Add query service with TotalFractionalBalances (#1970)
Add query service to precisebank, mostly for e2e test purposes in #1966
Also fix client grpc codec
2024-07-19 10:24:23 -07:00
Evgeniy Scherbina
7aef2f09e9
Upgrade ethermint and opendb (#1965) 2024-07-15 17:45:49 -04:00
Evgeniy Scherbina
58d7c89f8e
Replace opendb package from kava with generic opendb repo (#1959)
* Upgrade ethermint

* Remove opendb package from kava and add custom dbOpener function

* Open metadata.db with custom opendb function
2024-07-11 09:23:31 -04:00
drklee3
d2d661276e
feat: Use x/precisebank for x/evm keeper (#1960)
Replace x/evmutil EvmBankKeeper usage for x/evm
2024-07-10 14:20:12 -07:00
drklee3
9de9de671e
feat(x/precisebank): Display 0 reserve balance to module consumers (#1958)
Module reserve represents fractional balances, so it should be hidden to consumers to not have a misleading total balance that doubles the fractional balances. This modifies GetBalance() and SpendableCoin() to always return zero coins when fetching the reserve address balance for fractional amounts.
2024-07-10 11:14:17 -07:00
drklee3
ce6aac3a72
refactor(x/precisebank): Replace FractionalAmount wrapper with func (#1961)
Removal of unnecessary wrapper type, along with using conversionFactor-1 instead of maxFractionalAmount
2024-07-09 15:33:31 -07:00
drklee3
23ce7d8169
feat(x/precisebank): Return full balances in GetBalance(), add SpendableCoin method (#1957)
Change GetBalance() to return full balances instead of spendable to align behavior with x/bank. Add SpendableCoin() method with support of akava for use in x/evm.
2024-06-28 18:06:48 -07:00
drklee3
60a8073574
feat(x/precisebank): Emit events for send/mint/burn (#1955)
Emits the **total** akava amount for both ukava and akava send/mint/burns. If both akava,ukava are sent (not possible via x/evm nor cosmos messages but still an edge case), then the sum is emitted. No other denoms are emitted by x/precisebank as they will be emitted by the underlying x/bank
2024-06-27 19:40:17 -07:00
Robert Pirtle
2d07988994
e2e-ibc: add ERC20 convert to coin & IBC test (#1950)
* generate erc20 golang interface

* write interchain test that deploys ERC20

* enable deployed erc20 as a conversion pair

* convert erc20 to sdk coin!

* refactor: move RandomMnemonic() to util

* erc20 -> cosmos coin -> ibc e2e test

* add NewEvmSignerFromMnemonic to util

* ci: update ibc-test cache dependency list

* fix ci dependencies
2024-06-24 14:55:40 -07:00
Paul Downing
6a9eda8634
bump deploy version for internal-testnet (#1952) 2024-06-24 11:33:37 -06:00
Paul Downing
4788c064bf
ci: add native wbtc to internal-testnet evm params proposal (#1951)
* add native wbtc to internal-testnet evm params proposal

* Update seed-internal-testnet.sh

remove leading whitespace
2024-06-24 11:12:18 -06:00
drklee3
1743cf5275
fix(x/precisebank): Ensure exact reserve balance on integer carry when minting (#1932)
Fix reserve minting an extra coin when the recipient module both carries fractional over to integer balance AND remainder is insufficient. Adjusts fractional carry to simply send from reserve, instead of doing an additional mint. Add invariant to ensure reserve matches exactly with fractional balances + remainder, failing on both insufficient and excess funds.
2024-06-20 15:20:13 -07:00
rustco
9aef8e4971
chore: fix mismatched method comments (#1949)
Signed-off-by: rustco <ruster@111.com>
2024-06-20 15:09:21 -07:00
drklee3
38230d35e3
feat(x/precisebank): Implement BurnCoins (#1934)
Implement & test BurnCoins method
2024-06-20 15:02:23 -07:00
Paul Downing
af5eea690b
update internal-testnet verison to latest commit (#1948) 2024-06-18 13:57:22 -06:00
Paul Downing
1c1db357f5
update internal testnet wbtc contract config (#1947) 2024-06-18 13:33:33 -06:00
drklee3
409841c79c
feat(x/precisebank): Implement SendCoins (#1923)
Implements methods SendCoins, SendCoinsFromModuleToAccount, SendCoinsFromAccountToModule
2024-06-17 10:53:41 -07:00
Robert Pirtle
4c3f6533a0 ci: bump internal testnet version 2024-06-14 14:04:38 -07:00
Robert Pirtle
e1bd6ffa2f
ci: prefer checkout action to manual pull (#1945)
for internal testnet deployment, record the desired deployment version
as an action variable that can be used by the checkout action instead of
using manual pull & checkout commands
2024-06-14 13:15:45 -07:00
Paul Downing
5b0e7c8c58
bump internal testnet version (#1944) 2024-06-13 18:58:59 -06:00
Paul Downing
8d85c1ae1e
Update genesis.json (#1943)
* Update genesis.json

* align native wbtc naming conventions

* fitx testnet native wbtc naming in genesis

* alphabetically order denoms for internal-testnet genesis
2024-06-13 17:54:39 -06:00
todaymoon
80f2370d68
chore: make function comments match function names (#1935)
Signed-off-by: todaymoon <csgcgl@foxmail.com>
2024-06-13 12:35:22 -07:00
Paul Downing
16233d6031
Update KAVA.VERSION internal-testnet (#1942) 2024-06-12 14:54:31 -06:00
Nick DeLuca
828f17897e
use step output directly instead of fetching more than once (#1940)
Fix issue in finding ref
2024-06-12 12:45:29 -07:00
Paul Downing
a79d852d1c
Update KAVA.VERSION on internal-testnet (#1938)
- use most recent commit to deploy to internal-testnet
2024-06-12 12:24:02 -06:00
Paul Downing
0306bec0ae
bump internal-testnet VERSION and genesis file for wbtc config (#1937)
* bump internal-testnet VERSION and genesis file for wbtc config

* Fix EOF on validate genesis by add missing modules; Fix validation by fixing gov params; update total escrow to default

* bump version for latest genesis

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-06-12 10:43:15 -06:00
Paul Downing
5c51530b8e
add new native wbtc contract and seeds to internal-testnet (#1933)
## What Changes
- add a native `wbtc` contract to internal-testnet for testing
- seed the dev wallet and some e2e test wallets with funds for this new contract
2024-06-07 13:26:24 -06:00
Robert Pirtle
21dc0e21b3
ci: don't lint on release tag push (#1930)
the release tag CI is run when semantic versioned tags are pushed.
it is presumed that the commit and/or PR to the release branch being
tagged has already passed the lints.

this gets around Github Actions CI running check-proto-breaking-remote
which compares the pushed commit against _master_ (not the previous release)
2024-06-04 13:02:13 -07:00
zoupingshi
8d07d9cb3b
chore: fix some function names (#1929)
Signed-off-by: zoupingshi <hellocatty@tom.com>
2024-05-31 07:15:15 -07:00
Robert Pirtle
e7cc89a642
deps: use cosmos-sdk v0.47.10-iavl-v1-kava.1 (#1926)
previously, v0.47.10-kava.2 used iavl v1, but this version will be
retracted because that branch & tag should only be used for iavl v0.

this sdk version is the same as v0.47.10-kava.2, but also includes a bug
fix to the initial iavl version used when adding new modules
see https://github.com/Kava-Labs/cosmos-sdk/pull/545
2024-05-28 16:03:47 -07:00
Robert Pirtle
2e8c7ce337
feat(cli): add iavlviewer command (#1922)
* port iavlviewer to kava v0.26.x to debug app hash

* add hash subcommand to iavlviewer

additionally, use better error handling

* update changelog

* separate iavlviewer command into subcommands

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-05-28 11:00:28 -07:00
drklee3
110adcab2c
feat(x/precisebank): Implement MintCoins (#1920)
Implement MintCoins method that matches x/bank MintCoins validation behavior
2024-05-24 12:03:09 -07:00
Ruaridh
3d5f5902b8
chore(docs): update security contact (#1921)
* update readme

* update comment in ERC20 contract

* Revert "update comment in ERC20 contract"

This reverts commit c50a80d83d936ade7df2c82482717432d6c83db8.
2024-05-24 09:38:36 -07:00
drklee3
4cf41d18c2
feat(x/precisebank): Implement GetBalance (#1916)
Implement GetBalance for extended balances which passes through to `x/bank` for non-extended denoms. This diverges from `x/evmutil` behavior which will panic on non-"akava" calls.

Add bank / account keeper mocks for testing, with mockery config for [mockery package setup](https://vektra.github.io/mockery/latest/migrating_to_packages/)
2024-05-21 14:11:13 -07:00
drklee3
dbc3ad7fd2
feat(x/precisebank): Implement ExportGenesis (#1915) 2024-05-20 09:50:31 -07:00
Uditya Kumar
7990021431
Update README.md (#1919) 2024-05-17 11:54:09 -07:00
Uditya Kumar
fa33947496
docs: Fix README link to Run Validator Node docs 2024-05-17 09:46:36 -07:00
drklee3
4ff43eb270
feat(x/precisebank): Add keeper methods for store (#1912)
- Add store methods to get/set/delete/etc account fractional balances & remainder amount
- Add invariants to ensure stored state is correct
2024-05-16 15:30:31 -07:00
largemouth
d66b7d2705
chore: fix some typos (#1913)
Signed-off-by: largemouth <largemouth@aliyun.com>
2024-05-16 13:27:48 -07:00
drklee3
025b7b2cdb
feat(x/precisebank): Add remainder amount to genesis (#1911)
- Validate total fractional amounts in genesis type
- Validate against fractional balances such that `(sum(balances) + remainder) % conversionFactor == 0`
- Add new utility type `SplitBalance` for splitting up full balances into each
2024-05-15 14:07:24 -07:00
drklee3
94914d4ca1
feat(x/precisebank): Add FractionalBalance types (#1907)
- Add necessary types to track account fractional balances.
- Add FractionalBalance type to genesis
2024-05-13 14:16:05 -07:00
drklee3
3c53e72220
feat: Add x/precisebank module basic setup (#1906)
- Add initial setup and empty genesis type for x/precisebank
- Basic tests with mostly empty values, to be filled out with additional implementation
2024-05-10 09:30:28 -07:00
Robert Pirtle
871e26670c
chore(rocksdb): bump deps for rocksdb v8.10.0 (#1903) 2024-05-09 09:56:18 -07:00
Robert Pirtle
da2f835bf7
docs: update README for v0.26.0 (#1904) 2024-05-08 12:59:10 -07:00
Robert Pirtle
6a7fd4c8bd
test(e2e-ibc): downgrade to ibc v7 for ibc tests (#1901)
* downgrade to ibc v7 for ibc tests

* add conformance test (does not pass consistently)

* limit number of nodes for more consistent passing

* update to upstream v7 branch of interchaintest

also, remove unnecessary go.mod replace statements

* better names for int pointers
2024-05-07 13:15:38 -07:00
Robert Pirtle
f72b628b71
ci: extract separate rocksdb base image (#1898)
* docker: separate rocksdb base image from build

* ci: inject go build cache for docker img builds
2024-05-02 10:36:49 -07:00
Robert Pirtle
3e877aca88
test: expose evm and add query to EVM (#1892) 2024-04-29 10:13:28 -07:00
forcedebug
360f21f9f8
Fix mismatched method names in comments (#1890)
Signed-off-by: forcedebug <forcedebug@outlook.com>
2024-04-23 14:44:04 -07:00
Robert Pirtle
d981070ede
test: add packet-forwarding middleware e2e test (#1883)
* setup interchaintest IBC test

* e2e test of packet forwarding middleware

* rename interchain -> e2e-ibc & add make cmd

* add CI step that runs e2e-ibc tests

* use current branch for docker image in e2e-ibc
2024-04-19 12:35:13 -07:00
careworry
346f4be683
chore: fix some typos in comments (#1881)
Signed-off-by: careworry <worrycare@outlook.com>
2024-04-18 06:56:52 -07:00
CoolCu
1b6f1468ec
Fix some typos in comments (#1878)
Signed-off-by: CoolCu <coolcui@qq.com>
2024-04-16 11:54:09 -07:00
Robert Pirtle
72e8641c8d
build: inject brew deps for MacOS rocksdb build (#1812)
* build: inject brew deps for MacOS rocksdb build

* configure make build-rocksdb-brew
2024-04-08 17:00:07 -07:00
dependabot[bot]
ac2e46f91e
chore(deps-dev): bump follow-redirects in /contracts (#1850)
Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.15.2 to 1.15.6.
- [Release notes](https://github.com/follow-redirects/follow-redirects/releases)
- [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.15.2...v1.15.6)

---
updated-dependencies:
- dependency-name: follow-redirects
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-08 16:59:09 -07:00
Evgeniy Scherbina
4686a2a3e9
Fix typo in seed-internal-testnet script (#1875) 2024-04-05 13:30:56 -04:00
mergify[bot]
667e532aaa
chore: remove repetitive words (#1869) (#1873)
Signed-off-by: frameflare <yangzenghua@outlook.com>
(cherry picked from commit 41b79e44af)

Co-authored-by: frameflare <166090836+frameflare@users.noreply.github.com>
2024-04-05 07:27:10 -07:00
Nick DeLuca
32a0193c45
bump release/v0.26.x deps (#1871) 2024-04-05 07:15:29 -07:00
Nick DeLuca
543417c01f
bump deps (#1870) 2024-04-05 07:15:13 -07:00
mergify[bot]
be5bf62ab8
fix:paramaters->paramaters (#1796) (#1872)
Fix misspellings across docs & comments

(cherry picked from commit 0ea92335de)

Co-authored-by: alex <152680487+bodhi-crypo@users.noreply.github.com>
2024-04-05 07:14:49 -07:00
frameflare
41b79e44af
chore: remove repetitive words (#1869)
Signed-off-by: frameflare <yangzenghua@outlook.com>
2024-04-05 07:13:40 -07:00
alex
0ea92335de
fix:paramaters->paramaters (#1796)
Fix misspellings across docs & comments
2024-04-05 07:02:52 -07:00
Adam Robert Turman
2a93c41fcc
Internal testnet: include EVM contracts & funds for remaining bep3 denoms (#1868)
* include remaining bep3 denoms

* typos
2024-04-04 12:21:05 -05:00
Robert Pirtle
3033529d9f
ci: start all internal testnet regardless of state (#1866)
even if the nodes are not in standby, target them for the start job in
the internal testnet deployment CI
2024-03-29 11:38:06 -07:00
Draco
7ca43024e4
Add v26 migrate docs (#1863)
* add migrate docs

* change date to TDB
2024-03-29 11:05:52 -07:00
Nick DeLuca
c9d900be2c
Set the CDP Block Interval to 100 during v0.26.x upgrade (#1865)
* set CDP block interval to 100 to only run interest synchronization
for risky cdps every 100 blocks instead of every block

* refactor and use constant for setting to improve clarity; update
block interval to 50 instead of 100.  This will decrease risk by
running around every 6 minutes instead of 12 mintues for current
mainnet block times.
2024-03-29 11:05:26 -07:00
Nick DeLuca
8f93ca2048
Use IAVL 0.20.x for v0.26.x release, update deps (#1862)
* revert back to iavl v1 to avoid hash changes on new modules like
we are seeing on the v0.24.x to v0.25.x upgrade block.  Also, add
replace statements for exp and rapid to match upstream cosmos-sdk

* fix sharding prune store logging and error return.

* add comment to clarify WithKeyTable usage
2024-03-28 14:38:07 -07:00
Adam Robert Turman
198b620cb4
Add "axlBNB" to internal testnet (#1860)
* - add contract address for axlBNB
- seed EVM wallets with axlBNB

* update proposal to include new contract address

* Feedback

Co-authored-by: Ruaridh <rhuairahrighairidh@users.noreply.github.com>

* reorder coins in alphabetical order

* clean up

---------

Co-authored-by: Ruaridh <rhuairahrighairidh@users.noreply.github.com>
2024-03-28 10:56:38 -05:00
mergify[bot]
e34c94aa62
bep3 conversion msg server tests (#1859) (#1861)
(cherry picked from commit d3233d65d5)

Co-authored-by: Draco <draco@dracoli.com>
2024-03-28 07:16:15 -07:00
Draco
d3233d65d5
bep3 conversion msg server tests (#1859) 2024-03-27 16:52:15 -07:00
Nick DeLuca
6ea518960a
Optimize CDP Begin Blocker (#1822)
* optimize cdp begin blocker by removing unnecessary checks, reusing data
and prefix stores in loops, and reducing number of repeated calculations

* fix panic for new cdp types if both previous accural time and global
interest factor are not set

* do not touch global interest factor if no CDP's exist; revert to panic
if global interest factor is not found since this is an unreachable
state by normal keeper operation -- it can only be reached if store
is modified outside of public interface and normal operation
2024-03-26 13:06:26 -07:00
Nick DeLuca
673790465d
Optimize Pricefeed EndBlocker (#1851)
* optimize pricefeed endblocker to iterate all markets only once to remove
overhead of opening and closing iterator for each market individually.
In addition, extend tests to cover 100% of abci and price updating
behavior.

* use test cases that can't be confused with mean to ensure median is
always used
2024-03-26 13:05:52 -07:00
mergify[bot]
a548de05c2
Implement bep3 evm native conversion logic (#1848) (#1858)
* Implement bep3 evm native conversion logic

* Update changelog

* Fix indentation

* Add bep3 conversion keeper tests

* make DefaultBEP3ConversionDenoms private

* refactor bep3 conversion

* update bep3 tests to cover all bep3 assets

* minor refactor

(cherry picked from commit 3afb656d1f)

Co-authored-by: Draco <draco@dracoli.com>
2024-03-25 14:46:52 -04:00
Draco
3afb656d1f
Implement bep3 evm native conversion logic (#1848)
* Implement bep3 evm native conversion logic

* Update changelog

* Fix indentation

* Add bep3 conversion keeper tests

* make DefaultBEP3ConversionDenoms private

* refactor bep3 conversion

* update bep3 tests to cover all bep3 assets

* minor refactor
2024-03-25 13:43:31 -04:00
Draco
7f73061c13
Add packet-forwarding store to upgrade (#1856) 2024-03-23 10:01:39 -04:00
mergify[bot]
da8a4f8787
Bump cosmos-sdk to v0.47.10-kava.2 with iavl v1 support (#1846) (#1855)
* Bump to cosmos-sdk to v0.47.10-kava.2

* Update go version in dockerfile to 1.21

* Fix shard logic for iavl v1

* Update changelog

(cherry picked from commit 969614d555)

Co-authored-by: Draco <draco@dracoli.com>
2024-03-22 18:27:21 -07:00
mergify[bot]
e96fd1735d
update deps and add mergify config for v0.26.x release branch (backport #1853) (#1854)
* update deps and add mergify config for v0.26.x release branch (#1853)

(cherry picked from commit 7866ee2f74)

# Conflicts:
#	go.sum

* fix go.sum conflicts

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-03-22 15:47:47 -07:00
Kevin Davis
eb9d3a3ed8
add legacy rest removal notice (#1857) 2024-03-22 14:36:24 -07:00
Draco
969614d555
Bump cosmos-sdk to v0.47.10-kava.2 with iavl v1 support (#1846)
* Bump to cosmos-sdk to v0.47.10-kava.2

* Update go version in dockerfile to 1.21

* Fix shard logic for iavl v1

* Update changelog
2024-03-22 09:40:18 -04:00
Nick DeLuca
7866ee2f74
update deps and add mergify config for v0.26.x release branch (#1853) 2024-03-21 08:45:10 -07:00
Robert Pirtle
66e41733e7
target all internal testnet instances on update (#1844)
(not just those in standby)
2024-03-12 13:31:33 -07:00
Levi Schoen
a681a5e631
upgrade to iavl v1 (#1845) 2024-03-12 15:04:10 -04:00
Draco
6b41ed84b8
Add packet forward store to upgrade (#1843) 2024-03-08 16:34:31 -05:00
Draco
17691e95a8
Merge branch 'master' into release/v0.26.x 2024-03-08 09:10:32 -05:00
Levi Schoen
cf2d83b020
consolidate conditional for internal testnet cd 2024-03-07 13:56:34 -08:00
Draco
5c0f27b952
Add ibc packet forward middleware for ibc transfer (#1839)
* Add ibc packet forward middleware for ibc transfer

* Update changelog for ibc packet forwarding
2024-03-07 16:47:46 -05:00
Levi Schoen
322f2ac056
only deploy internal testnet if deploy version or genesis state changes (#1842) 2024-03-07 15:44:42 -05:00
Adam Robert Turman
9bfd1ffd7d
fund internal testnet e2e account with USDt (#1841) 2024-03-06 14:07:26 -06:00
Robert Pirtle
f5384a1f11
cli: refactor shard command & add recovery options (#1837)
* expose SkipLoadLatest override via AppOpts

* add --force-app-version option to shard command

* refactor sharding of application.db

* refactor sharding of blockstore & state.db

* add --only-cometbft-state flag

* add comment divisions

* update usage doc

* prevent infinite loop during cometbft rollback
2024-02-29 16:22:09 -08:00
Draco
2a1e9a6631
Update x/incentive cli to use grpc (#1836)
* Update x/incentive cli to use grpc

* Update changelog
2024-02-29 15:09:52 -05:00
Robert Pirtle
069be95dde
fix: update vesting tests to use fixed block time (#1838)
* fix: update SWP circulating supply test constant

* use fixed blocktime for vesting tests
2024-02-29 12:09:07 -08:00
Draco
1d944d5219
Validator Vesting gRPC Endpoints (#1832)
* Add validator-vesting grpc

* Update validator REST API endpoints to use grpc

* Update validator-vesting cli to use grpc

* Update changelog for added grpc changes

* Add grpc query tests
2024-02-27 14:40:52 -05:00
Draco
11d3ba3466
Remove used legacy querier types (#1835)
These were left out by accident when the legacy queriers are removed from the modules in this commit 3ba4078ec1
2024-02-27 14:25:56 -05:00
Levi Schoen
2bc0c62570
register solomachine module for v6 to v7 ibc-go migration (#1831) 2024-02-16 12:49:58 -05:00
Levi Schoen
58d04e2996 register solomachine module for v6 to v7 ibc-go migration (#1830) 2024-02-15 15:24:55 -08:00
Draco
ad387e6a42
Add upgrade handler for Kava v0.26.0 (#1827)
* Add upgrade handler for Kava v0.26.0

* Update tests e2e to run upgrade tests
2024-02-13 13:27:27 -05:00
Draco
550ecc8027
Change e2e kvtool config template back to master (#1829) 2024-02-12 15:02:31 -05:00
Draco
5914f1db85
Update Kava grpc client with consensus client (#1828)
* Add consensus client to kava grpc client

* Update kvtool to latest version
This version includes the v25 kava template
2024-02-12 12:49:49 -05:00
Levi Schoen
ad03a75679
emulate block broadcast mode in seed scripts (#1824) 2024-02-08 18:32:44 -05:00
Levi Schoen
c0df29333b
use broadcast mode sync vs block in cli scripts (#1823) 2024-02-08 14:52:36 -05:00
Levi Schoen
45aa631f18
allow for running manual cd for protonet ad hoc (#1821) 2024-02-07 14:18:53 -05:00
Draco
614d4e40fe
Update cosmos-sdk to v0.47.7 (#1811)
* Update cometbft, cosmos, ethermint, and ibc-go

* Replace github.com/tendermint/tendermint by github.com/cometbft/cometbft

* Replace github.com/tendermint/tm-db by github.com/cometbft/cometbft-db

* Replace gogo/protobuf with cosmos/gogoproto & simapp replacement

* Replace cosmos-sdk/simapp/helpers with cosmos-sdk/testutil/sims

* Remove no longer used simulations

* Replace ibchost with ibcexported
See https://github.com/cosmos/ibc-go/blob/v7.2.2/docs/migrations/v6-to-v7.md#ibc-module-constants

* Add new consensus params keeper

* Add consensus keeper to blockers

* Fix keeper and module issues in app.go

* Add IsSendEnabledCoins and update SetParams interface changes

* Fix protobuf build for cosmos 47 (#1800)

* fix cp errors by using -f; fix lint by only linting our proto dir;
and use proofs.proto directly from ics23 for ibc-go v7

* run proto-all; commit updated third party deps and swagger changes

* regenerate proto files

* use correct gocosmos build plugin for buf

* re-gen all protobuf files to update paths for new gocosmos plugin

* update protoc and buf to latest versions

* fix staking keeper issues in app.go

* update tally handler for gov changes

* chain id fix and flag fixes

* update deps for cometbft 47.7 upgrade

* remove all module legacy queriers

* update stakingKeeper to pointer

* Replace ModuleCdc from govv1beta1 to govcodec

* remove simulations

* abci.LastCommitInfo → abci.CommitInfo

* Remove unused code in keys.go

* simapp.MakeTestEncodingConfig -> moduletestutil.MakeTestEncodingConfi

* Fix chain id issues in tests

* Fix remaining unit test issues

* Update changelog for upgrade

* Fix e2e tests using updated kvtool

* Update protonet to v47 compatible genesis

* Bump cometbft-db to v0.9.1-kava.1

* Update kvtool

* Remove extra changelog

* Fix merged rocksdb issues

* go mod cleanup

* Bump cometbft-db to v9 and go to 1.21

* Bump rocksdb version to v8.10.0

* Update kvtool to latest version

* Update gin to v1.9.0

* Use ibctm.ModuleName in app_test

* Fallback to genesis chain id instead of client toml

* Remove all simulations

* Fix cdp migrations issue with v47

* Update dependencies to correct tags

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2024-02-06 17:54:10 -05:00
Levi Schoen
7efee6d536
ensure pricefeed is updated every x/cdp begin blocker (#1819) 2024-02-02 16:16:35 -05:00
Levi Schoen
c59a491788
feat(x/cdp): update begin bloocker to run liquidations every X blocks based off params (#1818)
* (feat) update x/cdp to run every X blocks based off params (#1814)

* add new cdp module param to protonet genesis

* update cdp / cdp related tests for new module param

* update telemetry docs and setup for collecting against local node

* update kvool commit for new cdp param

(cherry picked from commit 4d62f47773)

* add tests for configurable x/cdp begin blocker interval param
add migration for default value of param

* make adjustments based off pr feedback

* fix proto back compat check
2024-02-02 13:21:11 -05:00
Nick DeLuca
2bccb6deaf
Revert "(feat) update x/cdp to run every X blocks based off params" (#1816) 2024-01-25 09:35:56 -07:00
Levi Schoen
7b5de0a12a
add github action for manually triggering protonet cd (#1815) 2024-01-24 18:12:34 -05:00
Levi Schoen
4d62f47773
(feat) update x/cdp to run every X blocks based off params (#1814)
* add new cdp module param to protonet genesis

* update cdp / cdp related tests for new module param

* update telemetry docs and setup for collecting against local node

* update kvool commit for new cdp param
2024-01-24 17:10:07 -05:00
drklee3
3767030005
feat(cli): Add rocksdb compact command (#1804)
* Add rocksdb compact command

* Increase compaction log output to 1 min

* Use GetClient/ServerContextFromCmd

* Update cmd info

* Add doc to logColumnFamilyMetadata

* Update RocksDBCmd docs

* Add changelog entry

* Load latest options from rocksdb

* Allow application.db to be compacted

* Rename more store -> db

* Ensure compaction stats output does not run when db is closed

* Add flag for custom stat output interval, return error
2024-01-19 10:17:13 -08:00
Robert Pirtle
58621577ae
feat(cli): add shard CLI command (#1785)
* stub out shard CLI command

* prune blocks before and after desired range

* update rollback to use patched cometbft

* temp override for local patched versions

* handle pruning cometbft & block store state

* include docs & support -1 for "latest"

* update changelog

* add --only-app-state flag to match cosmos-sdk prune cmd

* give -1 magic number a name & reuse home from ctx

* refactor to only open state.db & blockstore.db once

* write rollback progress to one line

* prevent attempting rollback of future blocks

* make shard inclusive of endblock

* use tagged cosmo-sdk & cometbft versions
2024-01-05 11:25:53 -08:00
Evgeniy Scherbina
f00727fe85
Add release/v0.25.x branch to backporting flow (#1799) 2023-12-21 15:23:59 -05:00
Evgeniy Scherbina
b170f3bdd5
Update ethermint (#1798) 2023-12-21 15:23:44 -05:00
Levi Schoen
1aa1348944 add docs and docker config for local and remote app telemetry collection and display 2023-12-14 09:56:44 -08:00
Draco
ffd306ef52
Kava gRPC Client (#1784)
* add grpc client

* add e2e tests for grpc client

* add grpc client readme

* doc update

* fix more doc issues

* remove util namespace & move grpc client to chain

* rename GrpcClient to Grpc

* add 3rd party query clients

* fix invalid url in readme

* update e2e tests to use grpc client (#1787)
2023-12-13 12:17:37 -05:00
Robert Pirtle
c63fab1317
release: update docs & changelog for v0.25.0 (#1794) 2023-12-07 10:21:02 -08:00
Levi Schoen
f70349ab02 update ethermint with patches for traceTx/traceBlock 2023-12-06 11:19:14 -08:00
Draco
1d4ccf6657
formatting fixes (#1783) 2023-11-21 14:08:07 -05:00
Robert Pirtle
aca738fbc6
fix(rocksdb): correctly resolve rocksdb path (#1767) (#1776)
* fix(rocksdb): correctly resolve rocksdb path (#1767)

ensure we use KAVA_HOME/data/application.db and not a nested
application.db within that path

* update changelog

---------

Co-authored-by: Nick DeLuca <nickdeluca08@gmail.com>
2023-11-20 15:27:16 -08:00
drklee3
39146747ac
fix: update ledger-cosmos-go v0.13.1 to resolve signing error with cosmos ledger app 2.34.12 (#1770)
* Update ledger-cosmos-go v0.13.1 with cosmos fork update

* Bump cosmos-sdk v0.46.11-kava.2

* Update changelog

* Update cosmos-sdk tag v0.46.11-kava.3

Incorrect kava.2 tag
2023-11-16 12:28:38 -08:00
Robert Pirtle
967c6857e8
ci: update CI workflows and pipelines (#1768)
* build & publish rocksdb docker images merge to master
* publish docker images on push of release version tags

NOTE: New docker image tag pattern. ALL tags now include database suffix
ex. <githash>-goleveldb, v0.25.0-alpha.1-rocksdb, master-rocksdb, etc

* update dockerfiles for better caching
* update all github action workflow versions
* improve caching of go packages
* cache docker image layers for reuse between runs
* update dockerignore to remove non-essential files
2023-11-08 14:06:03 -08:00
578 changed files with 52421 additions and 19252 deletions

View File

@ -11,5 +11,10 @@ docs/
networks/
scratch/
# Ignore build cache directories to avoid
# errors when addings these to docker images
build/.cache
build/.golangci-lint
go.work
go.work.sum

3
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,3 @@
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# Global rule:
* @rhuairahrighairidh @karzak @pirtleshell @drklee3 @nddeluca @DracoLi @evgeniy-scherbina @sesheffield @boodyvo @lbayas

20
.github/mergify.yml vendored
View File

@ -25,6 +25,8 @@ pull_request_rules:
- release/v0.21.x
- release/v0.23.x
- release/v0.24.x
- release/v0.25.x
- release/v0.26.x
- name: Backport patches to the release/v0.17.x branch
conditions:
@ -79,3 +81,21 @@ pull_request_rules:
backport:
branches:
- release/v0.24.x
- name: Backport patches to the release/v0.25.x branch
conditions:
- base=master
- label=A:backport/v0.25.x
actions:
backport:
branches:
- release/v0.25.x
- name: Backport patches to the release/v0.26.x branch
conditions:
- base=master
- label=A:backport/v0.26.x
actions:
backport:
branches:
- release/v0.26.x

View File

@ -33,7 +33,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode block
kava config broadcast-mode sync
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god

View File

@ -1,6 +1,14 @@
#!/bin/bash
set -ex
# by sleeping 1 block in between tx's
# we can emulate the behavior of the
# the deprecated and now removed (as of Kava 16)
# broadcast mode of `block` in order to
# minimize the chance tx's fail due to an
# account sequence number mismatch
AVG_SECONDS_BETWEEN_BLOCKS=6.5
# configure kava binary to talk to the desired chain endpoint
kava config node "${CHAIN_API_URL}"
kava config chain-id "${CHAIN_ID}"
@ -9,7 +17,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode block
kava config broadcast-mode sync
# setup dev wallet
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
@ -23,6 +31,8 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
# fund evm-contract-deployer account (using issuance)
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# deploy and fund USDC ERC20 contract
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
@ -73,6 +83,31 @@ TETHER_USDT_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NA
TETHER_USDT_CONTRACT_ADDRESS=${TETHER_USDT_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000
# deploy and fund axlBNB ERC20 contract
AXL_BNB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBNB" axlBNB 18)
AXL_BNB_CONTRACT_ADDRESS=${AXL_BNB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBUSD ERC20 contract
AXL_BUSD_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBUSD" axlBUSD 18)
AXL_BUSD_CONTRACT_ADDRESS=${AXL_BUSD_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlXRPB ERC20 contract
AXL_XRPB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlXRPB" axlXRPB 18)
AXL_XRPB_CONTRACT_ADDRESS=${AXL_XRPB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund axlBTC ERC20 contract
AXL_BTCB_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "axlBTCB" axlBTCB 18)
AXL_BTCB_CONTRACT_ADDRESS=${AXL_BTCB_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 1000000000000000000000
# deploy and fund native wBTC ERC20 contract
WBTC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "wBTC" wBTC 8)
WBTC_CONTRACT_ADDRESS=${WBTC_CONTRACT_DEPLOY: -42}
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" 0x6767114FFAA17C6439D7AEA480738B982CE63A02 100000000000000000
# seed some evm wallets
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
@ -81,6 +116,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 100000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 1000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$DEV_TEST_WALLET_ADDRESS" 10000000000000
# seed webapp E2E whale account
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_WBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 100000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_wBTC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
@ -89,6 +129,11 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$wETH_CONTRAC
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$MULTICHAIN_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 1000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BNB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BUSD_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_BTCB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_XRPB_CONTRACT_ADDRESS" "$WEBAPP_E2E_WHALE_ADDRESS" 10000000000000000000000
npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$WBTC_CONTRACT_ADDRESS" "$WBTC_CONTRACT_ADDRESS" 10000000000000
# give dev-wallet enough delegation power to pass proposals by itself
@ -96,6 +141,8 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$TETHER_USDT_
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
--from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# parse space seperated list of validators
# into bash array
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
@ -103,11 +150,14 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<<"$GENESIS_VALIDATOR_ADDRESSES"
# delegate 300KAVA to each validator
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"; do
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
done
# create a text proposal
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
@ -123,7 +173,7 @@ PARAM_CHANGE_PROP_TEMPLATE=$(
{
"subspace": "evmutil",
"key": "EnabledConversionPairs",
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"}]"
"value": "[{\"kava_erc20_address\":\"MULTICHAIN_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdc\"},{\"kava_erc20_address\":\"MULTICHAIN_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/usdt\"},{\"kava_erc20_address\":\"MULTICHAIN_wBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/multichain/wbtc\"},{\"kava_erc20_address\":\"AXL_USDC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/usdc\"},{\"kava_erc20_address\":\"AXL_WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/wbtc\"},{\"kava_erc20_address\":\"wETH_CONTRACT_ADDRESS\",\"denom\":\"erc20/axelar/eth\"},{\"kava_erc20_address\":\"TETHER_USDT_CONTRACT_ADDRESS\",\"denom\":\"erc20/tether/usdt\"},{\"kava_erc20_address\":\"AXL_BNB_CONTRACT_ADDRESS\",\"denom\":\"bnb\"},{\"kava_erc20_address\":\"AXL_BUSD_CONTRACT_ADDRESS\",\"denom\":\"busd\"},{\"kava_erc20_address\":\"AXL_BTCB_CONTRACT_ADDRESS\",\"denom\":\"btcb\"},{\"kava_erc20_address\":\"AXL_XRPB_CONTRACT_ADDRESS\",\"denom\":\"xrpb\"},{\"kava_erc20_address\":\"WBTC_CONTRACT_ADDRESS\",\"denom\":\"erc20/bitgo/wbtc\"}]"
}
]
}
@ -140,6 +190,11 @@ finalProposal="${finalProposal/AXL_USDC_CONTRACT_ADDRESS/$AXL_USDC_CONTRACT_ADDR
finalProposal="${finalProposal/AXL_WBTC_CONTRACT_ADDRESS/$AXL_WBTC_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/wETH_CONTRACT_ADDRESS/$wETH_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/TETHER_USDT_CONTRACT_ADDRESS/$TETHER_USDT_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BNB_CONTRACT_ADDRESS/$AXL_BNB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BUSD_CONTRACT_ADDRESS/$AXL_BUSD_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_BTCB_CONTRACT_ADDRESS/$AXL_BTCB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/AXL_XRPB_CONTRACT_ADDRESS/$AXL_XRPB_CONTRACT_ADDRESS}"
finalProposal="${finalProposal/WBTC_CONTRACT_ADDRESS/$WBTC_CONTRACT_ADDRESS}"
# create unique proposal filename
proposalFileName="$(date +%s)-proposal.json"
@ -159,16 +214,37 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
# committee 1 is the stability committee. on internal testnet, this has only one member.
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fetch current module params
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"
# submit a kava token committee proposal
COMMITTEE_PROP_TEMPLATE=$(
cat <<'END_HEREDOC'
{
"@type": "/cosmos.gov.v1beta1.TextProposal",
"title": "The next big thing signaling proposal.",
"description": "The purpose of this proposal is to signal support/opposition to the next big thing"
}
END_HEREDOC
)
committeeProposalFileName="$(date +%s)-committee-proposal.json"
echo "$COMMITTEE_PROP_TEMPLATE" >$committeeProposalFileName
tokenCommitteeId=4
kava tx committee submit-proposal "$tokenCommitteeId" "$committeeProposalFileName" --gas auto --gas-adjustment 1.5 --gas-prices 0.01ukava --from god -y
# if adding more cosmos coins -> er20s, ensure that the deployment order below remains the same.
# convert 1 HARD to an erc20. doing this ensures the contract is deployed.
kava tx evmutil convert-cosmos-coin-to-erc20 \
"$DEV_TEST_WALLET_ADDRESS" \
1000000hard \
--from dev-wallet --gas 2000000 --gas-prices 0.001ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS

View File

@ -1,6 +1,14 @@
#!/bin/bash
set -ex
# by sleeping 1 block in between tx's
# we can emulate the behavior of the
# the deprecated and now removed (as of Kava 16)
# broadcast mode of `block` in order to
# minimize the chance tx's fail due to an
# account sequence number mismatch
AVG_SECONDS_BETWEEN_BLOCKS=6.5
# configure kava binary to talk to the desired chain endpoint
kava config node "${CHAIN_API_URL}"
kava config chain-id "${CHAIN_ID}"
@ -9,7 +17,7 @@ kava config chain-id "${CHAIN_ID}"
kava config keyring-backend test
# wait for transactions to be committed per CLI command
kava config broadcast-mode block
kava config broadcast-mode sync
# setup dev wallet
echo "${DEV_WALLET_MNEMONIC}" | kava keys add --recover dev-wallet
@ -23,9 +31,13 @@ echo "sweet ocean blush coil mobile ten floor sample nuclear power legend where
# fund evm-contract-deployer account (using issuance)
kava tx issuance issue 200000000ukava kava1van3znl6597xgwwh46jgquutnqkwvwszjg04fz --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fund 5k kava to x/community account
kava tx community fund-community-pool 5000000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# deploy and fund USDC ERC20 contract
MULTICHAIN_USDC_CONTRACT_DEPLOY=$(npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" deploy-erc20 "USD Coin" USDC 6)
MULTICHAIN_USDC_CONTRACT_ADDRESS=${MULTICHAIN_USDC_CONTRACT_DEPLOY: -42}
@ -89,6 +101,8 @@ npx hardhat --network "${ERC20_DEPLOYER_NETWORK_NAME}" mint-erc20 "$AXL_USDC_CON
kava tx issuance issue 6000000000ukava kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq \
--from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# parse space seperated list of validators
# into bash array
read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
@ -97,11 +111,14 @@ read -r -a GENESIS_VALIDATOR_ADDRESS_ARRAY <<< "$GENESIS_VALIDATOR_ADDRESSES"
for validator in "${GENESIS_VALIDATOR_ADDRESS_ARRAY[@]}"
do
kava tx staking delegate "${validator}" 300000000ukava --from dev-wallet --gas-prices 0.5ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
done
# create a text proposal
kava tx gov submit-legacy-proposal --deposit 1000000000ukava --type "Text" --title "Example Proposal" --description "This is an example proposal" --gas auto --gas-adjustment 1.2 --from dev-wallet --gas-prices 0.01ukava -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# setup god's wallet
echo "${KAVA_TESTNET_GOD_MNEMONIC}" | kava keys add --recover god
@ -150,9 +167,13 @@ printf "original evm util module params\n %s" , "$originalEvmUtilParams"
# https://github.com/0glabs/0g-chain/pull/1556/files#diff-0bd6043650c708661f37bbe6fa5b29b52149e0ec0069103c3954168fc9f12612R900-R903
kava tx committee submit-proposal 1 "$proposalFileName" --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# vote on the proposal. this assumes no other committee proposal has ever been submitted (id=1)
kava tx committee vote 1 yes --gas 2000000 --gas-prices 0.01ukava --from god -y
sleep $AVG_SECONDS_BETWEEN_BLOCKS
# fetch current module params
updatedEvmUtilParams=$(curl https://api.app.internal.testnet.us-east.production.kava.io/kava/evmutil/v1beta1/params)
printf "updated evm util module params\n %s" , "$updatedEvmUtilParams"

View File

@ -1,5 +1,6 @@
name: Continuous Deployment (Internal Testnet)
# run after every successful CI job of new commits to the master branch
# if deploy version or config has changed
on:
workflow_run:
workflows: [Continuous Integration (Kava Master)]
@ -7,6 +8,23 @@ on:
- completed
jobs:
changed_files:
runs-on: ubuntu-latest
# define output for first job forwarding output of changedInternalTestnetConfig job
outputs:
changedInternalTestnetConfig: ${{ steps.changed-internal-testnet-config.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # OR "2" -> To retrieve the preceding commit.
- name: Get all changed internal testnet files
id: changed-internal-testnet-config
uses: tj-actions/changed-files@v42
with:
# Avoid using single or double quotes for multiline patterns
files: |
ci/env/kava-internal-testnet/**
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
@ -14,8 +32,9 @@ jobs:
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
needs: [changed_files]
# only start cd pipeline if last ci run was successful
if: ${{ github.event.workflow_run.conclusion == 'success' }}
if: ${{ github.event.workflow_run.conclusion == 'success' && needs.changed_files.outputs.changedInternalTestnetConfig == 'true' }}
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1

View File

@ -0,0 +1,54 @@
name: Manual Deployment (Protonet)
# allow to be triggered manually
on: workflow_dispatch
jobs:
# in order:
# enter standby (prevents autoscaling group from killing node during deploy)
# stop kava
# take ebs + zfs snapshots
# download updated binary and genesis
# reset application database state (only done on internal testnet)
reset-chain-to-zero-state:
uses: ./.github/workflows/cd-reset-internal-testnet.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: reset-protonet-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
# start kava with new binary and genesis state on api, peer and seed nodes, place nodes in service once they start and are synched to live
start-chain-api:
uses: ./.github/workflows/cd-start-chain.yml
with:
aws-region: us-east-1
chain-id: proto_2221-17000
ssm-document-name: kava-testnet-internal-node-update
playbook-name: start-chain-api-playbook.yml
playbook-infrastructure-branch: master
secrets: inherit
needs: [reset-chain-to-zero-state]
# setup test and development accounts and balances, deploy contracts by calling the chain's api
seed-chain-state:
uses: ./.github/workflows/cd-seed-chain.yml
with:
chain-api-url: https://rpc.app.protonet.us-east.production.kava.io:443
chain-id: proto_2221-17000
seed-script-filename: seed-protonet.sh
erc20-deployer-network-name: protonet
genesis_validator_addresses: "kavavaloper14w4avgdvqrlpww6l5dhgj4egfn6ln7gmtp7r2m"
kava_version_filepath: ./ci/env/kava-protonet/KAVA.VERSION
secrets: inherit
needs: [start-chain-api]
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes
with:
aws-region: us-east-1
metric-name: kava.deploys.testnet.proto
namespace: Kava/ContinuousDeployment
secrets: inherit
needs: [seed-chain-state]

View File

@ -67,7 +67,6 @@ jobs:
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=false
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}

View File

@ -35,12 +35,16 @@ jobs:
uses: actions/checkout@v4
with:
ref: master
- name: checkout version of kava used by network
- name: get desired version of network
id: kava-version
run: |
git pull -p
git checkout $(cat ${KAVA_VERSION_FILEPATH})
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
env:
KAVA_VERSION_FILEPATH: ${{ inputs.kava_version_filepath }}
- name: checkout version of kava used by network
uses: actions/checkout@v4
with:
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:
@ -68,7 +72,7 @@ jobs:
uses: actions/setup-node@v3
with:
cache: npm
node-version: 18
node-version-file: .tool-versions
cache-dependency-path: kava-bridge/contract/package.json
- name: "install ERC20 contract deployment dependencies"
run: "npm install"
@ -76,8 +80,8 @@ jobs:
- name: compile default erc20 contracts
run: make compile-contracts
working-directory: kava-bridge
- name: download seed script from master
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/master/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
- name: download seed script from current commit
run: wget https://raw.githubusercontent.com/Kava-Labs/kava/${GITHUB_SHA}/.github/scripts/${SEED_SCRIPT_FILENAME} && chmod +x ${SEED_SCRIPT_FILENAME}
working-directory: kava-bridge/contract
env:
SEED_SCRIPT_FILENAME: ${{ inputs.seed-script-filename }}

View File

@ -63,7 +63,6 @@ jobs:
--update-playbook-filename=$PLAYBOOK_NAME \
--chain-id=$CHAIN_ID \
--max-upgrade-batch-size=0 \
--node-states=Standby \
--wait-for-node-sync-after-upgrade=true
env:
SSM_DOCUMENT_NAME: ${{ inputs.ssm-document-name }}

View File

@ -35,6 +35,35 @@ jobs:
run: make test
- name: run e2e tests
run: make docker-build test-e2e
fuzz:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache-dependency-path: |
go.sum
- name: run fuzz tests
run: make test-fuzz
ibc-test:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version-file: tests/e2e-ibc/go.mod
cache-dependency-path: |
tests/e2e-ibc/go.sum
go.sum
- name: run ibc e2e tests
run: make test-ibc
validate-internal-testnet-genesis:
runs-on: ubuntu-latest
steps:
@ -44,16 +73,10 @@ jobs:
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
- name: checkout repo from master
- name: checkout version of kava that will be deployed if this pr is merged
uses: actions/checkout@v4
with:
ref: master
- name: checkout version of kava that will be deployed if this pr is merged
run: |
git pull -p
git checkout $KAVA_VERSION
env:
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
ref: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v4
with:

View File

@ -50,6 +50,17 @@ jobs:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (goleveldb)
uses: docker/build-push-action@v5
@ -89,6 +100,17 @@ jobs:
username: ${{ inputs.dockerhub-username }}
password: ${{ secrets.CI_DOCKERHUB_TOKEN }}
- name: Go Build Cache for Docker
uses: actions/cache@v3
with:
path: go-build-cache
key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }}
- name: inject go-build-cache into docker
uses: reproducible-containers/buildkit-cache-dance@v2.1.2
with:
cache-source: go-build-cache
# publish to docker hub, tag with short git hash
- name: Build and push (rocksdb)
uses: docker/build-push-action@v5

View File

@ -7,11 +7,25 @@ jobs:
uses: ./.github/workflows/proto.yml
golangci-lint:
runs-on: ubuntu-latest
permissions:
checks: write # allow write access to checks to allow the action to annotate code in the PR.
steps:
- uses: actions/checkout@v4
- name: golangci-lint
uses: reviewdog/action-golangci-lint@v2
- name: Checkout code
uses: actions/checkout@v4
with: { fetch-depth: 0 }
- name: Set up Go
uses: actions/setup-go@v4
with:
github_token: ${{ secrets.github_token }}
reporter: github-pr-review
golangci_lint_flags: --timeout 10m
go-version-file: go.mod
- name: Load Version
id: load-version
run: |
GOLANGCI_VERSION=$(cat .golangci-version)
REV=$(git merge-base origin/master HEAD)
echo "GOLANGCI_VERSION=$GOLANGCI_VERSION" >> $GITHUB_ENV
echo "REV=$REV" >> $GITHUB_ENV
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: ${{ env.GOLANGCI_VERSION }}
args: -v -c .golangci.yml --new-from-rev ${{ env.REV }}

View File

@ -29,7 +29,7 @@ jobs:
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
env:
ROCKSDB_VERSION: v8.1.1
ROCKSDB_VERSION: v8.10.0
- name: Build and upload release artifacts
run: bash ${GITHUB_WORKSPACE}/.github/scripts/publish-internal-release-artifacts.sh
env:
@ -45,6 +45,9 @@ jobs:
dockerhub-username: kavaops
extra-image-tag: master
secrets: inherit
rosetta:
uses: ./.github/workflows/ci-rosetta.yml
secrets: inherit
post-pipeline-metrics:
uses: ./.github/workflows/metric-pipeline.yml
if: always() # always run so we metric failures and successes

27
.github/workflows/ci-pr-lint.yml vendored Normal file
View File

@ -0,0 +1,27 @@
# this workflow is responsible for ensuring quality titles are given to all PRs
# for PR checks to pass, the title must follow the Conventional Commits standard
# https://www.conventionalcommits.org/en/v1.0.0/
# this workflow was adapted from a similar workflow in https://github.com/cosmos/cosmos-sdk
name: "Lint PR Title"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
contents: read
jobs:
main:
permissions:
pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs
statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR
runs-on: ubuntu-latest
steps:
# https://github.com/marketplace/actions/semantic-pull-request
- uses: amannn/action-semantic-pull-request@v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -4,9 +4,6 @@ on:
tags:
- "v[0-9]+.[0-9]+.[0-9]+*"
jobs:
# run per commit ci checks against released version
lint-checks:
uses: ./.github/workflows/ci-lint.yml
# run default ci checks against released version
default-checks:
uses: ./.github/workflows/ci-default.yml
@ -14,7 +11,7 @@ jobs:
# get the version tag that triggered this workflow
get-version-tag:
# prep version release only if all checks pass
needs: [lint-checks, default-checks]
needs: default-checks
runs-on: ubuntu-latest
outputs:
git-tag: ${{ steps.git-tag.outputs.tag }}

View File

@ -1,7 +1,7 @@
name: Continuous Integration (Rocksdb Build)
env:
ROCKSDB_VERSION: v8.1.1
ROCKSDB_VERSION: v8.10.0
on:
workflow_call:
@ -19,25 +19,3 @@ jobs:
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
- name: build application
run: make build COSMOS_BUILD_OPTIONS=rocksdb
test:
runs-on: ubuntu-latest
steps:
- name: install RocksDB dependencies
run: sudo apt-get update
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
- name: install RocksDB as shared library
run: git clone https://github.com/facebook/rocksdb.git
&& cd rocksdb
&& git checkout $ROCKSDB_VERSION
&& sudo make -j$(nproc) install-shared
&& sudo ldconfig
- name: checkout repo from current commit
uses: actions/checkout@v4
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
- name: run unit tests
run: make test-rocksdb

16
.github/workflows/ci-rosetta.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: Dispatch run-rosetta-tests event to rosetta-kava
on:
workflow_call:
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Dispatch run-rosetta-tests event to rosetta-kava
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.KAVA_PUBLIC_GITHUB_ACCESS_TOKEN }}
repository: Kava-Labs/rosetta-kava
event-type: run-rosetta-tests
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

View File

@ -12,11 +12,11 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.20'
go-version: '1.21'
- name: Build
run: make build
run: sudo LINK_STATICALLY=true make build-release
- name: Rename file
run: mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
run: sudo mv ./out/linux/0gchaind ./out/linux/0gchaind-linux-${{ github.ref_name }}
- name: Upload Release Asset
uses: softprops/action-gh-release@v2
with:

3
.gitignore vendored
View File

@ -31,6 +31,9 @@ out
# Ignore build cache dir
build/.cache
# Ignore make lint cache
build/.golangci-lint
# Ignore installed binaires
build/bin

1
.golangci-version Normal file
View File

@ -0,0 +1 @@
v1.59

130
.golangci.yml Normal file
View File

@ -0,0 +1,130 @@
run:
timeout: 20m # set maximum time allowed for the linter to run. If the linting process exceeds this duration, it will be terminated
modules-download-mode: readonly # Ensures that modules are not modified during the linting process
allow-parallel-runners: true # enables parallel execution of linters to speed up linting process
linters:
disable-all: true
enable:
- asasalint
- asciicheck
- bidichk
- bodyclose
- containedctx
- contextcheck
- decorder
- dogsled
# - dupl
# - dupword
- durationcheck
- errcheck
- errchkjson
- errname
- errorlint
# - exhaustive
- exportloopref
- funlen
- gci
- ginkgolinter
- gocheckcompilerdirectives
# - gochecknoglobals
# - gochecknoinits
- goconst
- gocritic
- godox
- gofmt
# - gofumpt
- goheader
- goimports
- mnd
# - gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- grouper
- importas
- ineffassign
# - interfacebloat
- lll
- loggercheck
- makezero
- mirror
- misspell
- musttag
# - nakedret
# - nestif
- nilerr
# - nilnil
# - noctx
- nolintlint
# - nonamedreturns
- nosprintfhostport
- prealloc
- predeclared
- promlinter
# - reassign
- revive
- rowserrcheck
- staticcheck
# - stylecheck
- tagalign
# - testpackage
# - thelper
# - tparallel
- typecheck
# - unconvert
- unparam
- unused
# - usestdlibvars
- wastedassign
# - whitespace
- wrapcheck
issues:
exclude-rules:
# Disable funlen for "func Test..." or func (suite *Suite) Test..." type functions
# These functions tend to be descriptive and exceed length limits.
- source: "^func (\\(.*\\) )?Test"
linters:
- funlen
linters-settings:
errcheck:
check-blank: true # check for assignments to the blank identifier '_' when errors are returned
check-type-assertions: false # check type assertion
errorlint:
check-generated: false # disabled linting of generated files
default-signifies-exhaustive: false # exhaustive handling of error types
exhaustive:
default-signifies-exhaustive: false # exhaustive handling of error types
gci:
sections: # defines the order of import sections
- standard
- default
- localmodule
goconst:
min-len: 3 # min length for string constants to be checked
min-occurrences: 3 # min occurrences of the same constant before it's flagged
godox:
keywords: # specific keywords to flag for further action
- BUG
- FIXME
- HACK
gosec:
exclude-generated: true
lll:
line-length: 120
misspell:
locale: US
ignore-words: expect
nolintlint:
allow-leading-space: false
require-explanation: true
require-specific: true
prealloc:
simple: true # enables simple preallocation checks
range-loops: true # enabled preallocation checks in range loops
for-loops: false # disables preallocation checks in for loops
unparam:
check-exported: true # checks exported functions and methods for unused params

16
.mockery.yaml Normal file
View File

@ -0,0 +1,16 @@
# Generate EXPECT() methods, type-safe methods to generate call expectations
with-expecter: true
# Generate mocks in adjacent mocks directory to the interfaces
dir: "{{.InterfaceDir}}/mocks"
mockname: "Mock{{.InterfaceName}}"
outpkg: "mocks"
filename: "Mock{{.InterfaceName}}.go"
packages:
github.com/0glabs/0g-chain/x/precisebank/types:
# package-specific config
config:
interfaces:
AccountKeeper:
BankKeeper:

View File

@ -1,2 +1,2 @@
golang 1.20
nodejs 18.16.0
golang 1.21.9
nodejs 20.16.0

View File

@ -36,6 +36,29 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
## [v0.26.0]
### Features
- (precisebank) [#1906] Add new `x/precisebank` module with bank decimal extension for EVM usage.
- (cli) [#1922] Add `iavlviewer` CLI command for low-level iavl db debugging.
### Improvements
- (rocksdb) [#1903] Bump cometbft-db dependency for use with rocksdb v8.10.0
- (deps) [#1988] Bump cometbft to v0.37.9-kava.1
## [v0.26.0]
### Features
- (cli) [#1785] Add `shard` CLI command to support creating partitions of data for standalone nodes
- (cdp) [#1818] Add module param and logic for running x/cdp begin blocker every `n` blocks
- (cli) [#1804] Add `rocksdb compact` command for manual DB compaction of state or blockstore
- (cosmos-sdk) [#1811] [#1846] Upgrades app to cosmos-sdk v0.47.10 with iavl v1 support
- (validator-vesting) [#1832] Add grpc query service to replace removed legacy querier
- (incentive) [#1836] Update x/incentive cli to use grpc query client
- (ibc) [#1839] Add ibc packet forward middleware for ibc transfers
- (evmutil) [#1848] Update evm native conversion logic to handle bep3 assets
## [v0.25.0]
### Features
@ -43,11 +66,12 @@ Ref: https://keepachangelog.com/en/1.0.0/
- (community) [#1704] Add module params
- (community) [#1706] Add disable inflation upgrade
- (community) [#1745] Enable params update via governance with `MsgUpdateParams`
- (client) [#1784] Add Kava gRPC client
### Bug Fixes
- (ethermint) [#1788] Fixes issue where tracing a transaction could show it's status as successful when isolated in simulation even if the tx when executed on the chain failed due to an error such as exhausting the block gas meter
- (evmutil) [#1655] Initialize x/evmutil module account in InitGenesis
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
cosmos ledger app 2.34.12
## State Machine Breaking
@ -60,9 +84,19 @@ Ref: https://keepachangelog.com/en/1.0.0/
- (community) [#1755] Keep funds in `x/community` in `CommunityPoolLendWithdrawProposal` handler
- (staking) [#1761] Set validator minimum commission to 5% for all validators under 5%
## [v0.24.3]
### Bug Fixes
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
- (rocksdb) [#1767] Fix resolution of rocksdb database path introduced in v0.24.2
**Note**: There was a bug released as v0.24.2. The tag has been removed and the commit should not be used.
## [v0.24.1]
### Features
- (metrics) [#1668] Adds non-state breaking x/metrics module for custom telemetry.
- (metrics) [#1669] Add performance timing metrics to all Begin/EndBlockers
- (community) [#1751] Add `AnnualizedRewards` query endpoint
@ -306,6 +340,19 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
large-scale simulations remotely using aws-batch
[#1988]: https://github.com/Kava-Labs/kava/pull/1988
[#1922]: https://github.com/Kava-Labs/kava/pull/1922
[#1906]: https://github.com/Kava-Labs/kava/pull/1906
[#1903]: https://github.com/Kava-Labs/kava/pull/1903
[#1846]: https://github.com/Kava-Labs/kava/pull/1846
[#1848]: https://github.com/Kava-Labs/kava/pull/1848
[#1839]: https://github.com/Kava-Labs/kava/pull/1839
[#1836]: https://github.com/Kava-Labs/kava/pull/1836
[#1832]: https://github.com/Kava-Labs/kava/pull/1832
[#1811]: https://github.com/Kava-Labs/kava/pull/1811
[#1804]: https://github.com/Kava-Labs/kava/pull/1804
[#1785]: https://github.com/Kava-Labs/kava/pull/1785
[#1784]: https://github.com/Kava-Labs/kava/pull/1784
[#1770]: https://github.com/Kava-Labs/kava/pull/1770
[#1755]: https://github.com/Kava-Labs/kava/pull/1755
[#1761]: https://github.com/Kava-Labs/kava/pull/1761
@ -359,14 +406,13 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
[#750]: https://github.com/Kava-Labs/kava/pull/750
[#751]: https://github.com/Kava-Labs/kava/pull/751
[#780]: https://github.com/Kava-Labs/kava/pull/780
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.25.0...HEAD
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.25.0
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.24.1
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.23.2...v0.24.0
[unreleased]: https://github.com/Kava-Labs/kava/compare/v0.26.0...HEAD
[v0.26.0]: https://github.com/Kava-Labs/kava/compare/v0.25.0...v0.26.0
[v0.25.0]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.25.0
[v0.24.3]: https://github.com/Kava-Labs/kava/compare/v0.24.3...v0.24.1
[v0.24.1]: https://github.com/Kava-Labs/kava/compare/v0.24.1...v0.24.0
[v0.24.0]: https://github.com/Kava-Labs/kava/compare/v0.24.0...v0.23.2
[v0.23.2]: https://github.com/Kava-Labs/kava/compare/v0.23.1...v0.23.2
[v0.23.1]: https://github.com/Kava-Labs/kava/compare/v0.23.0...v0.23.1
[v0.23.0]: https://github.com/Kava-Labs/kava/compare/v0.21.1...v0.23.0
[v0.16.1]: https://github.com/Kava-Labs/kava/compare/v0.16.0...v0.16.1
[v0.16.0]: https://github.com/Kava-Labs/kava/compare/v0.15.2...v0.16.0

View File

@ -1,4 +1,4 @@
FROM golang:1.20-alpine AS build-env
FROM golang:1.21-alpine AS build-env
# Set up dependencies
# bash, jq, curl for debugging
@ -19,6 +19,15 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go version && go mod download
# Cosmwasm - Download correct libwasmvm version
RUN ARCH=$(uname -m) && WASMVM_VERSION=$(go list -m github.com/CosmWasm/wasmvm | sed 's/.* //') && \
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/libwasmvm_muslc.$ARCH.a \
-O /lib/libwasmvm.$ARCH.a && \
# verify checksum
wget https://github.com/CosmWasm/wasmvm/releases/download/$WASMVM_VERSION/checksums.txt -O /tmp/checksums.txt && \
sha256sum /lib/libwasmvm.$ARCH.a | grep $(cat /tmp/checksums.txt | grep libwasmvm_muslc.$ARCH | cut -d ' ' -f 1)
# Add source files
COPY . .
@ -27,6 +36,7 @@ COPY . .
# Mount go build and mod caches as container caches, persisted between builder invocations
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
LINK_STATICALLY=true \
make install
FROM alpine:3.15

42
Dockerfile-node Normal file
View File

@ -0,0 +1,42 @@
FROM --platform=linux/amd64 ubuntu:24.04
# Install dependencies
RUN apt-get update && \
apt-get install -y \
git \
sudo \
wget \
jq \
make \
gcc \
unzip && \
rm -rf /var/lib/apt/lists/*
# Install Go
RUN wget https://golang.org/dl/go1.22.5.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go1.22.5.linux-amd64.tar.gz && \
rm go1.22.5.linux-amd64.tar.gz
# Set Go environment variables
ENV GOPATH=/root/go
ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
# Create Go workspace directory
RUN mkdir -p /root/go
WORKDIR /root
# https://docs.0g.ai/0g-doc/run-a-node/validator-node
RUN git clone -b v0.2.3 https://github.com/0glabs/0g-chain.git
RUN ./0g-chain/networks/testnet/install.sh
RUN 0gchaind config chain-id zgtendermint_16600-2
RUN 0gchaind init testnetnode --chain-id zgtendermint_16600-2
RUN rm ~/.0gchain/config/genesis.json
RUN wget -P ~/.0gchain/config https://github.com/0glabs/0g-chain/releases/download/v0.2.3/genesis.json
RUN 0gchaind validate-genesis
RUN sed -i 's|seeds = ""|seeds = "81987895a11f6689ada254c6b57932ab7ed909b6@54.241.167.190:26656,010fb4de28667725a4fef26cdc7f9452cc34b16d@54.176.175.48:26656,e9b4bc203197b62cc7e6a80a64742e752f4210d5@54.193.250.204:26656,68b9145889e7576b652ca68d985826abd46ad660@18.166.164.232:26656"|' $HOME/.0gchain/config/config.toml
ENTRYPOINT ["0gchaind", "start"]

View File

@ -1,23 +1,6 @@
FROM golang:1.20-bullseye AS chain-builder
FROM kava/rocksdb:v8.10.1-go1.21 AS kava-builder
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.1.1
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig
RUN apt-get update
WORKDIR /root/0gchain
# Copy dependency files first to facilitate dependency caching

22
Dockerfile-rocksdb-base Normal file
View File

@ -0,0 +1,22 @@
# published to https://hub.docker.com/repository/docker/kava/rocksdb/tags
# docker buildx build --platform linux/amd64,linux/arm64 -t kava/rocksdb:v8.10.1-go1.21 -f Dockerfile-rocksdb-base . --push
FROM golang:1.21-bullseye
# Set up dependencies
RUN apt-get update \
&& apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev \
&& rm -rf /var/lib/apt/lists/*
# Set working directory for the build
WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v8.10.0
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \
&& cd rocksdb \
&& git checkout $ROCKSDB_VERSION \
&& make -j$(nproc) install-shared \
&& ldconfig

View File

@ -6,6 +6,8 @@ BINARY_NAME := 0gchaind
MAIN_ENTRY := ./cmd/$(BINARY_NAME)
DOCKER_IMAGE_NAME := 0glabs/$(PROJECT_NAME)
GO_BIN ?= go
ARCH := $(shell uname -m)
WASMVM_VERSION := $(shell $(GO_BIN) list -m github.com/CosmWasm/wasmvm | sed 's/.* //')
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
GIT_COMMIT := $(shell git rev-parse HEAD)
@ -30,7 +32,7 @@ VERSION := $(GIT_COMMIT_SHORT)
VERSION_NUMBER := $(VERSION)
endif
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/tendermint/tendermint | sed 's:.* ::')
TENDERMINT_VERSION := $(shell $(GO_BIN) list -m github.com/cometbft/cometbft | sed 's:.* ::')
COSMOS_SDK_VERSION := $(shell $(GO_BIN) list -m github.com/cosmos/cosmos-sdk | sed 's:.* ::')
.PHONY: print-git-info
@ -103,6 +105,8 @@ include $(BUILD_DIR)/deps.mk
include $(BUILD_DIR)/proto.mk
include $(BUILD_DIR)/proto-deps.mk
include $(BUILD_DIR)/lint.mk
#export GO111MODULE = on
# process build tags
build_tags = netgo
@ -149,7 +153,7 @@ ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=$(PROJECT_NAME) \
-X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION_NUMBER) \
-X github.com/cosmos/cosmos-sdk/version.Commit=$(GIT_COMMIT) \
-X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \
-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(TENDERMINT_VERSION)
-X github.com/cometbft/cometbft/version.TMCoreSemVer=$(TENDERMINT_VERSION)
# DB backend selection
ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
@ -174,6 +178,10 @@ endif
ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS)))
ldflags += -w -s
endif
ifeq ($(LINK_STATICALLY),true)
ldflags += -linkmode=external -extldflags "-Wl,-z,muldefs -static -lm"
endif
ldflags += $(LDFLAGS)
ldflags := $(strip $(ldflags))
@ -195,9 +203,21 @@ else
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
endif
build-release: go.sum
wget -q https://github.com/CosmWasm/wasmvm/releases/download/$(WASMVM_VERSION)/libwasmvm_muslc.$(ARCH).a -O /lib/libwasmvm.$(ARCH).a
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/$(BINARY_NAME) $(MAIN_ENTRY)
build-linux: go.sum
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
# build on rocksdb-backed kava on macOS with shared libs from brew
# this assumes you are on macOS & these deps have been installed with brew:
# rocksdb, snappy, lz4, and zstd
# use like `make build-rocksdb-brew COSMOS_BUILD_OPTIONS=rocksdb`
build-rocksdb-brew:
export CGO_CFLAGS := -I$(shell brew --prefix rocksdb)/include
export CGO_LDFLAGS := -L$(shell brew --prefix rocksdb)/lib -lrocksdb -lstdc++ -lm -lz -L$(shell brew --prefix snappy)/lib -L$(shell brew --prefix lz4)/lib -L$(shell brew --prefix zstd)/lib
install: go.sum
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) $(MAIN_ENTRY)
@ -224,13 +244,6 @@ link-check:
# TODO: replace kava in following line with project name
liche -r . --exclude "^http://127.*|^https://riot.im/app*|^http://kava-testnet*|^https://testnet-dex*|^https://kava3.data.kava.io*|^https://ipfs.io*|^https://apps.apple.com*|^https://kava.quicksync.io*"
lint:
golangci-lint run
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" | xargs gofmt -d -s
$(GO_BIN) mod verify
.PHONY: lint
format:
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs gofmt -w -s
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -name '*.pb.go' | xargs misspell -w
@ -255,11 +268,11 @@ build-docker-local-0gchain:
# Run a 4-node testnet locally
localnet-start: build-linux localnet-stop
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z $(DOCKER_IMAGE_NAME)-node testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
docker-compose up -d
@if ! [ -f build/node0/kvd/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/kvd:Z kava/kavanode testnet --v 4 -o . --starting-ip-address 192.168.10.2 --keyring-backend=test ; fi
$(DOCKER) compose up -d
localnet-stop:
docker-compose down
$(DOCKER) compose down
# Launch a new single validator chain
start:
@ -301,12 +314,14 @@ test-basic: test
test-e2e: docker-build
$(GO_BIN) test -failfast -count=1 -v ./tests/e2e/...
# run interchaintest tests (./tests/e2e-ibc)
test-ibc: docker-build
cd tests/e2e-ibc && KAVA_TAG=local $(GO_BIN) test -timeout 10m .
.PHONY: test-ibc
test:
@$(GO_BIN) test $$($(GO_BIN) list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
test-rocksdb:
@go test -tags=rocksdb $(MAIN_ENTRY)/opendb
# Run cli integration tests
# `-p 4` to use 4 cores, `-tags cli_test` to tell $(GO_BIN) not to ignore the cli package
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.
@ -317,6 +332,18 @@ test-cli: build
test-migrate:
@$(GO_BIN) test -v -count=1 ./migrate/...
# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169
ifeq ($(OS_FAMILY),Darwin)
FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic
endif
test-fuzz:
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzMintCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzBurnCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzSendCoins ./x/precisebank/keeper
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_NonZeroRemainder ./x/precisebank/types
@$(GO_BIN) test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz=FuzzGenesisStateValidate_ZeroRemainder ./x/precisebank/types
# Kick start lots of sims on an AWS cluster.
# This submits an AWS Batch job to run a lot of sims, each within a docker image. Results are uploaded to S3
start-remote-sims:
@ -327,13 +354,14 @@ start-remote-sims:
# submit an array job on AWS Batch, using 1000 seeds, spot instances
aws batch submit-job \
-—job-name "master-$(VERSION)" \
-—job-queue “simulation-1-queue-spot" \
-—job-queue "simulation-1-queue-spot" \
-—array-properties size=1000 \
-—job-definition $(BINARY_NAME)-sim-master \
-—container-override environment=[{SIM_NAME=master-$(VERSION)}]
update-kvtool:
git submodule update
git submodule init || true
git submodule update --remote
cd tests/e2e/kvtool && make install
.PHONY: all build-linux install clean build test test-cli test-all test-rest test-basic start-remote-sims
.PHONY: all build-linux install build test test-cli test-all test-rest test-basic test-fuzz start-remote-sims

View File

@ -17,13 +17,13 @@ Reference implementation of 0G Chain, the first modular AI chain. Built using th
<!---
## Mainnet
The current recommended version of the software for mainnet is [v0.25.0](https://github.com/Kava-Labs/kava/releases/tag/v0.25.0) The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
The current recommended version of the software for mainnet is [v0.26.2](https://github.com/Kava-Labs/kava/releases/tag/v0.26.2) The `master` branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
### Installation and Setup
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/participate/validator-node).
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/nodes-and-validators/validator-node).
```bash
git checkout v0.25.0
git checkout v0.26.2
make install
```
@ -49,7 +49,7 @@ If you have technical questions or concerns, ask a developer or community member
## Security
If you find a security issue, please report it to security [at] kava.io. Depending on the verification and severity, a bug bounty may be available.
If you find a security issue, please report it to security [at] kavalabs.io. Depending on the verification and severity, a bug bounty may be available.
## License

View File

@ -11,15 +11,15 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
dbm "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
"cosmossdk.io/simapp"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/store"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
"github.com/cosmos/cosmos-sdk/x/auth"
@ -275,7 +275,7 @@ func TestAppStateDeterminism(t *testing.T) {
config.ExportParamsPath = ""
config.OnOperation = false
config.AllInvariants = false
config.ChainID = helpers.SimAppChainID
config.ChainID = sims.SimAppChainID
numTimesToRunPerSeed := 2
appHashList := make([]json.RawMessage, numTimesToRunPerSeed)

View File

@ -11,6 +11,10 @@ import (
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
abci "github.com/cometbft/cometbft/abci/types"
tmbytes "github.com/cometbft/cometbft/libs/bytes"
ctypes "github.com/cometbft/cometbft/rpc/core/types"
jsonrpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types"
"github.com/cosmos/cosmos-sdk/client/context"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/rest"
@ -19,10 +23,6 @@ import (
"github.com/gorilla/mux"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
jsonrpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)
type SimulateRequestTestSuite struct {

View File

@ -5,16 +5,16 @@ import (
"runtime/debug"
errorsmod "cosmossdk.io/errors"
tmlog "github.com/cometbft/cometbft/libs/log"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
authante "github.com/cosmos/cosmos-sdk/x/auth/ante"
authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing"
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
ibcante "github.com/cosmos/ibc-go/v6/modules/core/ante"
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
ibcante "github.com/cosmos/ibc-go/v7/modules/core/ante"
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
evmante "github.com/evmos/ethermint/app/ante"
evmtypes "github.com/evmos/ethermint/x/evm/types"
tmlog "github.com/tendermint/tendermint/libs/log"
)
// HandlerOptions extend the SDK's AnteHandler options by requiring the IBC

View File

@ -7,9 +7,13 @@ import (
"time"
sdkmath "cosmossdk.io/math"
tmdb "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
@ -17,9 +21,6 @@ import (
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmdb "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
@ -57,10 +58,11 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
nil,
encodingConfig,
opts,
baseapp.SetChainID(app.TestChainId),
),
}
chainID := "kavatest_1-1"
chainID := app.TestChainId
tApp = tApp.InitializeFromGenesisStatesWithTimeAndChainID(
time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
chainID,
@ -107,7 +109,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
stdTx, err := helpers.GenSignedMockTx(
stdTx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
encodingConfig.TxConfig,
[]sdk.Msg{
@ -118,7 +120,7 @@ func TestAppAnteHandler_AuthorizedMempool(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
chainID,
[]uint64{0},
[]uint64{0}, // fixed sequence numbers will cause tests to fail sig verification if the same address is used twice
@ -210,7 +212,7 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
return msg
}
chainID := "kavatest_1-1"
chainID := app.TestChainId
encodingConfig := app.MakeEncodingConfig()
testcases := []struct {
@ -239,12 +241,12 @@ func TestAppAnteHandler_RejectMsgsInAuthz(t *testing.T) {
chainID,
)
stdTx, err := helpers.GenSignedMockTx(
stdTx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
encodingConfig.TxConfig,
[]sdk.Msg{tc.msg},
sdk.NewCoins(), // no fee
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
chainID,
[]uint64{0},
[]uint64{0},

View File

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/stretchr/testify/require"
@ -39,7 +39,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
fetcher := mockAddressFetcher(testAddresses[1:]...)
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := helpers.GenSignedMockTx(
tx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -50,7 +50,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_NotCheckTx(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},
@ -74,7 +74,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := helpers.GenSignedMockTx(
tx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -90,7 +90,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Pass(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0, 123},
[]uint64{0, 123},
@ -115,7 +115,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
decorator := ante.NewAuthenticatedMempoolDecorator(fetcher)
tx, err := helpers.GenSignedMockTx(
tx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
@ -126,7 +126,7 @@ func TestAuthenticatedMempoolDecorator_AnteHandle_Reject(t *testing.T) {
),
},
sdk.NewCoins(), // no fee
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -5,7 +5,7 @@ import (
"testing"
"time"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/x/authz"
@ -213,12 +213,12 @@ func TestAuthzLimiterDecorator(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tx, err := helpers.GenSignedMockTx(
tx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
tc.msgs,
sdk.NewCoins(),
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/client"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/tx/signing"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
@ -20,6 +20,11 @@ import (
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/crypto/tmhash"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmversion "github.com/cometbft/cometbft/proto/tendermint/version"
"github.com/cometbft/cometbft/version"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/evmos/ethermint/ethereum/eip712"
"github.com/evmos/ethermint/tests"
@ -27,11 +32,6 @@ import (
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/stretchr/testify/suite"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
"github.com/tendermint/tendermint/version"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/chaincfg"
@ -42,7 +42,7 @@ import (
)
const (
ChainID = "kavatest_1-1"
ChainID = app.TestChainId
USDCCoinDenom = "erc20/usdc"
USDCCDPType = "erc20-usdc"
)
@ -137,6 +137,7 @@ func (suite *EIP712TestSuite) createTestEIP712CosmosTxBuilder(
func (suite *EIP712TestSuite) SetupTest() {
tApp := app.NewTestApp()
suite.tApp = tApp
cdc := tApp.AppCodec()
suite.evmutilKeeper = tApp.GetEvmutilKeeper()
@ -290,6 +291,11 @@ func (suite *EIP712TestSuite) SetupTest() {
)
suite.usdcEVMAddr = pair.GetAddress()
// update consensus params
cParams := tApp.GetConsensusParams(suite.ctx)
cParams.Block.MaxGas = sims.DefaultGenTxGas * 20
tApp.StoreConsensusParams(suite.ctx, cParams)
// Add a contract to evmutil conversion pair
evmutilParams := suite.evmutilKeeper.GetParams(suite.ctx)
evmutilParams.EnabledConversionPairs =
@ -399,7 +405,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
// usdxToMintAmt: 99,
// },
{
name: "fails when convertion more erc20 usdc than balance",
name: "fails when conversion more erc20 usdc than balance",
usdcDepositAmt: 51_000,
usdxToMintAmt: 100,
errMsg: "transfer amount exceeds balance",
@ -455,7 +461,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
var option *codectypes.Any
option, _ = codectypes.NewAnyWithValue(&etherminttypes.ExtensionOptionsWeb3Tx{
FeePayer: suite.testAddr.String(),
TypedDataChainID: 1,
TypedDataChainID: 2221,
FeePayerSig: []byte("sig"),
})
builder, _ := txBuilder.(authtx.ExtensionOptionsTxBuilder)
@ -484,7 +490,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
return suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr, suite.testPrivKey, "kavatest_12-1", uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
)
},
},
@ -497,7 +503,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
updateTx: func(txBuilder client.TxBuilder, msgs []sdk.Msg) client.TxBuilder {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
return suite.createTestEIP712CosmosTxBuilder(
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr2, suite.testPrivKey2, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
)
},
},
@ -525,7 +531,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx() {
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
txBuilder := suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, msgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, msgs,
)
if tc.updateTx != nil {
txBuilder = tc.updateTx(txBuilder, msgs)
@ -599,7 +605,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
// deliver deposit msg
gasAmt := sdk.NewCoins(chaincfg.MakeCoinForGasDenom(20))
txBuilder := suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, depositMsgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, depositMsgs,
)
txBytes, err := encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
suite.Require().NoError(err)
@ -633,7 +639,7 @@ func (suite *EIP712TestSuite) TestEIP712Tx_DepositAndWithdraw() {
// deliver withdraw msg
txBuilder = suite.createTestEIP712CosmosTxBuilder(
suite.testAddr, suite.testPrivKey, ChainID, uint64(helpers.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
suite.testAddr, suite.testPrivKey, ChainID, uint64(sims.DefaultGenTxGas*10), gasAmt, withdrawMsgs,
)
txBytes, err = encodingConfig.TxConfig.TxEncoder()(txBuilder.GetTx())
suite.Require().NoError(err)

View File

@ -4,12 +4,12 @@ import (
"strings"
"testing"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmtime "github.com/cometbft/cometbft/types/time"
sdk "github.com/cosmos/cosmos-sdk/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/ante"

View File

@ -7,7 +7,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
vesting "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
@ -73,14 +73,14 @@ func TestVestingMempoolDecorator_MsgCreateVestingAccount_Unauthorized(t *testing
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tx, err := helpers.GenSignedMockTx(
tx, err := sims.GenSignedMockTx(
rand.New(rand.NewSource(time.Now().UnixNano())),
txConfig,
[]sdk.Msg{
tt.msg,
},
sdk.NewCoins(),
helpers.DefaultGenTxGas,
sims.DefaultGenTxGas,
"testing-chain-id",
[]uint64{0},
[]uint64{0},

View File

@ -5,6 +5,10 @@ import (
"io"
"net/http"
dbm "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
tmjson "github.com/cometbft/cometbft/libs/json"
tmlog "github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
nodeservice "github.com/cosmos/cosmos-sdk/client/grpc/node"
@ -20,6 +24,7 @@ import (
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/cosmos-sdk/x/auth"
authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper"
authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation"
authtx "github.com/cosmos/cosmos-sdk/x/auth/tx"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
"github.com/cosmos/cosmos-sdk/x/auth/vesting"
@ -34,11 +39,13 @@ import (
"github.com/cosmos/cosmos-sdk/x/capability"
capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper"
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
consensus "github.com/cosmos/cosmos-sdk/x/consensus"
consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper"
consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
"github.com/cosmos/cosmos-sdk/x/crisis"
crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper"
crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types"
distr "github.com/cosmos/cosmos-sdk/x/distribution"
distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client"
distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper"
distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
"github.com/cosmos/cosmos-sdk/x/evidence"
@ -70,16 +77,25 @@ import (
upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client"
upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
transfer "github.com/cosmos/ibc-go/v6/modules/apps/transfer"
ibctransferkeeper "github.com/cosmos/ibc-go/v6/modules/apps/transfer/keeper"
ibctransfertypes "github.com/cosmos/ibc-go/v6/modules/apps/transfer/types"
ibc "github.com/cosmos/ibc-go/v6/modules/core"
ibcclient "github.com/cosmos/ibc-go/v6/modules/core/02-client"
ibcclientclient "github.com/cosmos/ibc-go/v6/modules/core/02-client/client"
ibcclienttypes "github.com/cosmos/ibc-go/v6/modules/core/02-client/types"
porttypes "github.com/cosmos/ibc-go/v6/modules/core/05-port/types"
ibchost "github.com/cosmos/ibc-go/v6/modules/core/24-host"
ibckeeper "github.com/cosmos/ibc-go/v6/modules/core/keeper"
"github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward"
packetforwardkeeper "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/keeper"
packetforwardtypes "github.com/cosmos/ibc-apps/middleware/packet-forward-middleware/v7/packetforward/types"
ibcwasm "github.com/cosmos/ibc-go/modules/light-clients/08-wasm"
ibcwasmkeeper "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/keeper"
ibcwasmtypes "github.com/cosmos/ibc-go/modules/light-clients/08-wasm/types"
transfer "github.com/cosmos/ibc-go/v7/modules/apps/transfer"
ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper"
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
ibc "github.com/cosmos/ibc-go/v7/modules/core"
ibcclient "github.com/cosmos/ibc-go/v7/modules/core/02-client"
ibcclientclient "github.com/cosmos/ibc-go/v7/modules/core/02-client/client"
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
ibcporttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types"
ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported"
ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper"
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
"github.com/ethereum/go-ethereum/core/vm"
evmante "github.com/evmos/ethermint/app/ante"
ethermintconfig "github.com/evmos/ethermint/server/config"
"github.com/evmos/ethermint/x/evm"
@ -90,11 +106,6 @@ import (
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
"github.com/gorilla/mux"
abci "github.com/tendermint/tendermint/abci/types"
tmjson "github.com/tendermint/tendermint/libs/json"
tmlog "github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app/ante"
chainparams "github.com/0glabs/0g-chain/app/params"
"github.com/0glabs/0g-chain/chaincfg"
@ -119,6 +130,9 @@ import (
issuance "github.com/0glabs/0g-chain/x/issuance"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
"github.com/0glabs/0g-chain/x/precisebank"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeed "github.com/0glabs/0g-chain/x/pricefeed"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
@ -126,14 +140,13 @@ import (
validatorvestingrest "github.com/0glabs/0g-chain/x/validator-vesting/client/rest"
validatorvestingtypes "github.com/0glabs/0g-chain/x/validator-vesting/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
)
var (
// ModuleBasics manages simple versions of full app modules.
// It's used for things such as codec registration and genesis file verification.
ModuleBasics = module.NewBasicManager(
genutil.AppModuleBasic{},
genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator),
auth.AppModuleBasic{},
bank.AppModuleBasic{},
capability.AppModuleBasic{},
@ -141,7 +154,6 @@ var (
distr.AppModuleBasic{},
gov.NewAppModuleBasic([]govclient.ProposalHandler{
paramsclient.ProposalHandler,
distrclient.ProposalHandler,
upgradeclient.LegacyProposalHandler,
upgradeclient.LegacyCancelProposalHandler,
ibcclientclient.UpdateClientProposalHandler,
@ -152,6 +164,9 @@ var (
crisis.AppModuleBasic{},
slashing.AppModuleBasic{},
ibc.AppModuleBasic{},
ibctm.AppModuleBasic{},
solomachine.AppModuleBasic{},
packetforward.AppModuleBasic{},
upgrade.AppModuleBasic{},
evidence.AppModuleBasic{},
authzmodule.AppModuleBasic{},
@ -166,8 +181,11 @@ var (
validatorvesting.AppModuleBasic{},
evmutil.AppModuleBasic{},
mint.AppModuleBasic{},
precisebank.AppModuleBasic{},
council.AppModuleBasic{},
dasigners.AppModuleBasic{},
consensus.AppModuleBasic{},
ibcwasm.AppModuleBasic{},
)
// module account permissions
@ -185,13 +203,13 @@ var (
issuancetypes.ModuleAccountName: {authtypes.Minter, authtypes.Burner},
bep3types.ModuleName: {authtypes.Burner, authtypes.Minter},
minttypes.ModuleName: {authtypes.Minter},
precisebanktypes.ModuleName: {authtypes.Minter, authtypes.Burner}, // used for reserve account to back fractional amounts
}
)
// Verify app interface at compile time
var (
_ servertypes.Application = (*App)(nil)
_ servertypes.ApplicationQueryService = (*App)(nil)
_ servertypes.Application = (*App)(nil)
)
// Options bundles several configuration params for an App.
@ -227,31 +245,35 @@ type App struct {
memKeys map[string]*storetypes.MemoryStoreKey
// keepers from all the modules
accountKeeper authkeeper.AccountKeeper
bankKeeper bankkeeper.Keeper
capabilityKeeper *capabilitykeeper.Keeper
stakingKeeper stakingkeeper.Keeper
distrKeeper distrkeeper.Keeper
govKeeper govkeeper.Keeper
paramsKeeper paramskeeper.Keeper
authzKeeper authzkeeper.Keeper
crisisKeeper crisiskeeper.Keeper
slashingKeeper slashingkeeper.Keeper
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
evmKeeper *evmkeeper.Keeper
evmutilKeeper evmutilkeeper.Keeper
feeMarketKeeper feemarketkeeper.Keeper
upgradeKeeper upgradekeeper.Keeper
evidenceKeeper evidencekeeper.Keeper
transferKeeper ibctransferkeeper.Keeper
CouncilKeeper councilkeeper.Keeper
issuanceKeeper issuancekeeper.Keeper
bep3Keeper bep3keeper.Keeper
pricefeedKeeper pricefeedkeeper.Keeper
committeeKeeper committeekeeper.Keeper
vestingKeeper vestingkeeper.VestingKeeper
mintKeeper mintkeeper.Keeper
dasignersKeeper dasignerskeeper.Keeper
accountKeeper authkeeper.AccountKeeper
bankKeeper bankkeeper.Keeper
capabilityKeeper *capabilitykeeper.Keeper
stakingKeeper *stakingkeeper.Keeper
distrKeeper distrkeeper.Keeper
govKeeper govkeeper.Keeper
paramsKeeper paramskeeper.Keeper
authzKeeper authzkeeper.Keeper
crisisKeeper crisiskeeper.Keeper
slashingKeeper slashingkeeper.Keeper
ibcWasmClientKeeper ibcwasmkeeper.Keeper
ibcKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly
packetForwardKeeper *packetforwardkeeper.Keeper
evmKeeper *evmkeeper.Keeper
evmutilKeeper evmutilkeeper.Keeper
feeMarketKeeper feemarketkeeper.Keeper
upgradeKeeper upgradekeeper.Keeper
evidenceKeeper evidencekeeper.Keeper
transferKeeper ibctransferkeeper.Keeper
CouncilKeeper councilkeeper.Keeper
issuanceKeeper issuancekeeper.Keeper
bep3Keeper bep3keeper.Keeper
pricefeedKeeper pricefeedkeeper.Keeper
committeeKeeper committeekeeper.Keeper
vestingKeeper vestingkeeper.VestingKeeper
mintKeeper mintkeeper.Keeper
dasignersKeeper dasignerskeeper.Keeper
consensusParamsKeeper consensusparamkeeper.Keeper
precisebankKeeper precisebankkeeper.Keeper
// make scoped keepers public for test purposes
ScopedIBCKeeper capabilitykeeper.ScopedKeeper
@ -291,8 +313,8 @@ func NewApp(
keys := sdk.NewKVStoreKeys(
authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey,
distrtypes.StoreKey, slashingtypes.StoreKey,
govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey,
distrtypes.StoreKey, slashingtypes.StoreKey, packetforwardtypes.StoreKey,
govtypes.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey,
upgradetypes.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey,
evmtypes.StoreKey, feemarkettypes.StoreKey, authzkeeper.StoreKey,
capabilitytypes.StoreKey,
@ -302,12 +324,15 @@ func NewApp(
counciltypes.StoreKey,
dasignerstypes.StoreKey,
vestingtypes.StoreKey,
consensusparamtypes.StoreKey, crisistypes.StoreKey, precisebanktypes.StoreKey,
ibcwasmtypes.StoreKey,
)
tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey)
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey)
// Authority for gov proposals, using the x/gov module account address
govAuthorityAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
govAuthAddr := authtypes.NewModuleAddress(govtypes.ModuleName)
govAuthAddrStr := govAuthAddr.String()
app := &App{
BaseApp: bApp,
@ -336,18 +361,20 @@ func NewApp(
issuanceSubspace := app.paramsKeeper.Subspace(issuancetypes.ModuleName)
bep3Subspace := app.paramsKeeper.Subspace(bep3types.ModuleName)
pricefeedSubspace := app.paramsKeeper.Subspace(pricefeedtypes.ModuleName)
ibcSubspace := app.paramsKeeper.Subspace(ibchost.ModuleName)
ibcSubspace := app.paramsKeeper.Subspace(ibcexported.ModuleName)
ibctransferSubspace := app.paramsKeeper.Subspace(ibctransfertypes.ModuleName)
packetforwardSubspace := app.paramsKeeper.Subspace(packetforwardtypes.ModuleName).WithKeyTable(packetforwardtypes.ParamKeyTable())
feemarketSubspace := app.paramsKeeper.Subspace(feemarkettypes.ModuleName)
evmSubspace := app.paramsKeeper.Subspace(evmtypes.ModuleName)
evmutilSubspace := app.paramsKeeper.Subspace(evmutiltypes.ModuleName)
mintSubspace := app.paramsKeeper.Subspace(minttypes.ModuleName)
bApp.SetParamStore(
app.paramsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()),
)
// set the BaseApp's parameter store
app.consensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, keys[consensusparamtypes.StoreKey], govAuthAddrStr)
bApp.SetParamStore(&app.consensusParamsKeeper)
app.capabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey])
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibchost.ModuleName)
scopedIBCKeeper := app.capabilityKeeper.ScopeToModule(ibcexported.ModuleName)
scopedTransferKeeper := app.capabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName)
app.capabilityKeeper.Seal()
@ -355,17 +382,17 @@ func NewApp(
app.accountKeeper = authkeeper.NewAccountKeeper(
appCodec,
keys[authtypes.StoreKey],
authSubspace,
authtypes.ProtoBaseAccount,
mAccPerms,
sdk.GetConfig().GetBech32AccountAddrPrefix(),
govAuthAddrStr,
)
app.bankKeeper = bankkeeper.NewBaseKeeper(
appCodec,
keys[banktypes.StoreKey],
app.accountKeeper,
bankSubspace,
app.loadBlockedMaccAddrs(),
govAuthAddrStr,
)
app.vestingKeeper = vestingkeeper.NewVestingKeeper(app.accountKeeper, app.bankKeeper, keys[vestingtypes.StoreKey])
@ -375,7 +402,7 @@ func NewApp(
app.accountKeeper,
app.bankKeeper,
app.vestingKeeper,
stakingSubspace,
govAuthAddrStr,
)
app.authzKeeper = authzkeeper.NewKeeper(
keys[authzkeeper.StoreKey],
@ -386,52 +413,68 @@ func NewApp(
app.distrKeeper = distrkeeper.NewKeeper(
appCodec,
keys[distrtypes.StoreKey],
distrSubspace,
app.accountKeeper,
app.bankKeeper,
&app.stakingKeeper,
app.stakingKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
app.slashingKeeper = slashingkeeper.NewKeeper(
appCodec,
app.legacyAmino,
keys[slashingtypes.StoreKey],
&app.stakingKeeper,
slashingSubspace,
app.stakingKeeper,
govAuthAddrStr,
)
app.crisisKeeper = crisiskeeper.NewKeeper(
crisisSubspace,
app.crisisKeeper = *crisiskeeper.NewKeeper(
appCodec,
keys[crisistypes.StoreKey],
options.InvariantCheckPeriod,
app.bankKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
app.upgradeKeeper = upgradekeeper.NewKeeper(
app.upgradeKeeper = *upgradekeeper.NewKeeper(
options.SkipUpgradeHeights,
keys[upgradetypes.StoreKey],
appCodec,
homePath,
app.BaseApp,
govAuthorityAddr.String(),
govAuthAddrStr,
)
app.evidenceKeeper = *evidencekeeper.NewKeeper(
appCodec,
keys[evidencetypes.StoreKey],
&app.stakingKeeper,
app.stakingKeeper,
app.slashingKeeper,
)
app.ibcKeeper = ibckeeper.NewKeeper(
appCodec,
keys[ibchost.StoreKey],
keys[ibcexported.StoreKey],
ibcSubspace,
app.stakingKeeper,
app.upgradeKeeper,
scopedIBCKeeper,
)
app.ibcWasmClientKeeper = ibcwasmkeeper.NewKeeperWithConfig(
appCodec,
keys[ibcwasmtypes.StoreKey],
app.ibcKeeper.ClientKeeper,
authtypes.NewModuleAddress(govtypes.ModuleName).String(),
ibcwasmtypes.WasmConfig{
DataDir: "ibc_08-wasm",
SupportedCapabilities: "iterator,stargate",
ContractDebugMode: false,
},
app.GRPCQueryRouter(),
)
// Create Ethermint keepers
app.feeMarketKeeper = feemarketkeeper.NewKeeper(
appCodec,
govAuthorityAddr,
govAuthAddr,
keys[feemarkettypes.StoreKey],
tkeys[feemarkettypes.TransientKey],
feemarketSubspace,
@ -445,9 +488,15 @@ func NewApp(
app.accountKeeper,
)
evmBankKeeper := evmutilkeeper.NewEvmBankKeeper(app.evmutilKeeper, app.bankKeeper, app.accountKeeper)
app.precisebankKeeper = precisebankkeeper.NewKeeper(
app.appCodec,
keys[precisebanktypes.StoreKey],
app.bankKeeper,
app.accountKeeper,
)
// dasigners keeper
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper)
app.dasignersKeeper = dasignerskeeper.NewKeeper(keys[dasignerstypes.StoreKey], appCodec, app.stakingKeeper, govAuthAddrStr)
// precopmiles
precompiles := make(map[common.Address]vm.PrecompiledContract)
daSignersPrecompile, err := dasignersprecompile.NewDASignersPrecompile(app.dasignersKeeper)
@ -455,35 +504,63 @@ func NewApp(
panic("initialize precompile failed")
}
precompiles[daSignersPrecompile.Address()] = daSignersPrecompile
// evm keeper
app.evmKeeper = evmkeeper.NewKeeper(
appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey],
govAuthorityAddr,
app.accountKeeper, evmBankKeeper, app.stakingKeeper, app.feeMarketKeeper,
govAuthAddr,
app.accountKeeper,
app.precisebankKeeper, // x/precisebank in place of x/bank
app.stakingKeeper,
app.feeMarketKeeper,
options.EVMTrace,
evmSubspace,
precompiles,
)
app.evmutilKeeper.SetEvmKeeper(app.evmKeeper)
// It's important to note that the PFM Keeper must be initialized before the Transfer Keeper
app.packetForwardKeeper = packetforwardkeeper.NewKeeper(
appCodec,
keys[packetforwardtypes.StoreKey],
nil, // will be zero-value here, reference is set later on with SetTransferKeeper.
app.ibcKeeper.ChannelKeeper,
app.distrKeeper,
app.bankKeeper,
app.ibcKeeper.ChannelKeeper,
govAuthAddrStr,
)
app.transferKeeper = ibctransferkeeper.NewKeeper(
appCodec,
keys[ibctransfertypes.StoreKey],
ibctransferSubspace,
app.ibcKeeper.ChannelKeeper,
app.packetForwardKeeper,
app.ibcKeeper.ChannelKeeper,
&app.ibcKeeper.PortKeeper,
app.accountKeeper,
app.bankKeeper,
scopedTransferKeeper,
)
app.packetForwardKeeper.SetTransferKeeper(app.transferKeeper)
transferModule := transfer.NewAppModule(app.transferKeeper)
transferIBCModule := transfer.NewIBCModule(app.transferKeeper)
// allow ibc packet forwarding for ibc transfers.
// transfer stack contains (from top to bottom):
// - Packet Forward Middleware
// - Transfer
var transferStack ibcporttypes.IBCModule
transferStack = transfer.NewIBCModule(app.transferKeeper)
transferStack = packetforward.NewIBCMiddleware(
transferStack,
app.packetForwardKeeper,
0, // retries on timeout
packetforwardkeeper.DefaultForwardTransferPacketTimeoutTimestamp,
packetforwardkeeper.DefaultRefundTransferPacketTimeoutTimestamp,
)
// Create static IBC router, add transfer route, then set and seal it
ibcRouter := porttypes.NewRouter()
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferIBCModule)
ibcRouter := ibcporttypes.NewRouter()
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
app.ibcKeeper.SetRouter(ibcRouter)
app.issuanceKeeper = issuancekeeper.NewKeeper(
@ -510,11 +587,11 @@ func NewApp(
app.mintKeeper = mintkeeper.NewKeeper(
appCodec,
keys[minttypes.StoreKey],
mintSubspace,
app.stakingKeeper,
app.accountKeeper,
app.bankKeeper,
authtypes.FeeCollectorName,
govAuthAddrStr,
)
// create committee keeper with router
@ -522,8 +599,7 @@ func NewApp(
committeeGovRouter.
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper))
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper))
// Note: the committee proposal handler is not registered on the committee router. This means committees cannot create or update other committees.
// Adding the committee proposal handler to the router is possible but awkward as the handler depends on the keeper which depends on the handler.
app.committeeKeeper = committeekeeper.NewKeeper(
@ -536,12 +612,11 @@ func NewApp(
)
// register the staking hooks
// NOTE: These keepers are passed by reference above, so they will contain these hooks.
app.stakingKeeper = *(app.stakingKeeper.SetHooks(
app.stakingKeeper.SetHooks(
stakingtypes.NewMultiStakingHooks(
app.distrKeeper.Hooks(),
app.slashingKeeper.Hooks(),
)))
))
// create gov keeper with router
// NOTE this must be done after any keepers referenced in the gov router (ie committee) are defined
@ -549,27 +624,27 @@ func NewApp(
govRouter.
AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler).
AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.paramsKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.upgradeKeeper)).
AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(&app.upgradeKeeper)).
AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.ibcKeeper.ClientKeeper)).
AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.distrKeeper)).
AddRoute(committeetypes.RouterKey, committee.NewProposalHandler(app.committeeKeeper))
govConfig := govtypes.DefaultConfig()
app.govKeeper = govkeeper.NewKeeper(
govKeeper := govkeeper.NewKeeper(
appCodec,
keys[govtypes.StoreKey],
govSubspace,
app.accountKeeper,
app.bankKeeper,
&app.stakingKeeper,
govRouter,
app.stakingKeeper,
app.MsgServiceRouter(),
govConfig,
govAuthAddrStr,
)
govKeeper.SetLegacyRouter(govRouter)
app.govKeeper = *govKeeper
// override x/gov tally handler with custom implementation
tallyHandler := NewTallyHandler(
app.govKeeper, app.stakingKeeper, app.bankKeeper,
app.govKeeper, *app.stakingKeeper, app.bankKeeper,
)
app.govKeeper.SetTallyHandler(tallyHandler)
@ -581,22 +656,24 @@ func NewApp(
// must be passed by reference here.)
app.mm = module.NewManager(
genutil.NewAppModule(app.accountKeeper, app.stakingKeeper, app.BaseApp.DeliverTx, encodingConfig.TxConfig),
auth.NewAppModule(appCodec, app.accountKeeper, nil),
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper),
capability.NewAppModule(appCodec, *app.capabilityKeeper),
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper),
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
gov.NewAppModule(appCodec, app.govKeeper, app.accountKeeper, app.bankKeeper),
auth.NewAppModule(appCodec, app.accountKeeper, authsims.RandomGenesisAccounts, authSubspace),
bank.NewAppModule(appCodec, app.bankKeeper, app.accountKeeper, bankSubspace),
capability.NewAppModule(appCodec, *app.capabilityKeeper, false), // todo: confirm if this is okay to not be sealed
staking.NewAppModule(appCodec, app.stakingKeeper, app.accountKeeper, app.bankKeeper, stakingSubspace),
distr.NewAppModule(appCodec, app.distrKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, distrSubspace),
gov.NewAppModule(appCodec, &app.govKeeper, app.accountKeeper, app.bankKeeper, govSubspace),
params.NewAppModule(app.paramsKeeper),
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants),
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper),
crisis.NewAppModule(&app.crisisKeeper, options.SkipGenesisInvariants, crisisSubspace),
slashing.NewAppModule(appCodec, app.slashingKeeper, app.accountKeeper, app.bankKeeper, app.stakingKeeper, slashingSubspace),
consensus.NewAppModule(appCodec, app.consensusParamsKeeper),
ibc.NewAppModule(app.ibcKeeper),
packetforward.NewAppModule(app.packetForwardKeeper, packetforwardSubspace),
evm.NewAppModule(app.evmKeeper, app.accountKeeper),
feemarket.NewAppModule(app.feeMarketKeeper, feemarketSubspace),
upgrade.NewAppModule(app.upgradeKeeper),
upgrade.NewAppModule(&app.upgradeKeeper),
evidence.NewAppModule(app.evidenceKeeper),
transferModule,
vesting.NewAppModule(app.accountKeeper, app.vestingKeeper),
vesting.NewAppModule(app.accountKeeper, app.bankKeeper, app.vestingKeeper),
authzmodule.NewAppModule(appCodec, app.authzKeeper, app.accountKeeper, app.bankKeeper, app.interfaceRegistry),
issuance.NewAppModule(app.issuanceKeeper, app.accountKeeper, app.bankKeeper),
bep3.NewAppModule(app.bep3Keeper, app.accountKeeper, app.bankKeeper),
@ -605,9 +682,11 @@ func NewApp(
committee.NewAppModule(app.committeeKeeper, app.accountKeeper),
evmutil.NewAppModule(app.evmutilKeeper, app.bankKeeper, app.accountKeeper),
// nil InflationCalculationFn, use SDK's default inflation function
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, chaincfg.NextInflationRate),
council.NewAppModule(app.CouncilKeeper, app.stakingKeeper),
dasigners.NewAppModule(app.dasignersKeeper, app.stakingKeeper),
mint.NewAppModule(appCodec, app.mintKeeper, app.accountKeeper, nil, mintSubspace),
precisebank.NewAppModule(app.precisebankKeeper, app.bankKeeper, app.accountKeeper),
council.NewAppModule(app.CouncilKeeper),
ibcwasm.NewAppModule(app.ibcWasmClientKeeper),
dasigners.NewAppModule(app.dasignersKeeper, *app.stakingKeeper),
)
// Warning: Some begin blockers must run before others. Ensure the dependencies are understood before modifying this list.
@ -635,7 +714,7 @@ func NewApp(
// It should be run before cdp begin blocker which cancels out debt with stable and starts more auctions.
bep3types.ModuleName,
issuancetypes.ModuleName,
ibchost.ModuleName,
ibcexported.ModuleName,
// Add all remaining modules with an empty begin blocker below since cosmos 0.45.0 requires it
vestingtypes.ModuleName,
pricefeedtypes.ModuleName,
@ -649,8 +728,11 @@ func NewApp(
paramstypes.ModuleName,
authz.ModuleName,
evmutiltypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -673,7 +755,7 @@ func NewApp(
upgradetypes.ModuleName,
evidencetypes.ModuleName,
vestingtypes.ModuleName,
ibchost.ModuleName,
ibcexported.ModuleName,
validatorvestingtypes.ModuleName,
authtypes.ModuleName,
banktypes.ModuleName,
@ -684,6 +766,10 @@ func NewApp(
evmutiltypes.ModuleName,
minttypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName,
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
@ -697,7 +783,7 @@ func NewApp(
slashingtypes.ModuleName, // iterates over validators, run after staking
govtypes.ModuleName,
minttypes.ModuleName,
ibchost.ModuleName,
ibcexported.ModuleName,
evidencetypes.ModuleName,
authz.ModuleName,
ibctransfertypes.ModuleName,
@ -709,18 +795,21 @@ func NewApp(
committeetypes.ModuleName,
evmutiltypes.ModuleName,
genutiltypes.ModuleName, // runs arbitrary txs included in genisis state, so run after modules have been initialized
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
// Add all remaining modules with an empty InitGenesis below since cosmos 0.45.0 requires it
vestingtypes.ModuleName,
paramstypes.ModuleName,
upgradetypes.ModuleName,
validatorvestingtypes.ModuleName,
counciltypes.ModuleName,
consensusparamtypes.ModuleName,
packetforwardtypes.ModuleName,
precisebanktypes.ModuleName, // Must be run after x/bank to verify reserve balance
crisistypes.ModuleName, // runs the invariants at genesis, should run after other modules
ibcwasmtypes.ModuleName,
dasignerstypes.ModuleName,
)
app.mm.RegisterInvariants(&app.crisisKeeper)
app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino)
app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter())
app.RegisterServices(app.configurator)
@ -792,6 +881,15 @@ func NewApp(
}
}
if manager := app.SnapshotManager(); manager != nil {
err := manager.RegisterExtensions(
ibcwasmkeeper.NewWasmSnapshotter(app.CommitMultiStore(), &app.ibcWasmClientKeeper),
)
if err != nil {
panic(fmt.Errorf("failed to register snapshot extension: %s", err))
}
}
app.ScopedIBCKeeper = scopedIBCKeeper
app.ScopedTransferKeeper = scopedTransferKeeper

View File

@ -3,22 +3,26 @@ package app
import (
"encoding/json"
"fmt"
"github.com/0glabs/0g-chain/chaincfg"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmtypes "github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
"os"
"sort"
"testing"
"time"
"github.com/0glabs/0g-chain/chaincfg"
db "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/testutil/sims"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx"
vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types"
solomachine "github.com/cosmos/ibc-go/v7/modules/light-clients/06-solomachine"
ibctm "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewApp(t *testing.T) {
@ -36,7 +40,7 @@ func TestNewApp(t *testing.T) {
func TestExport(t *testing.T) {
chaincfg.SetSDKConfig()
db := db.NewMemDB()
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions)
app := NewApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, chaincfg.DefaultNodeHome, nil, MakeEncodingConfig(), DefaultOptions, baseapp.SetChainID(TestChainId))
genesisState := GenesisStateWithSingleValidator(&TestApp{App: *app}, NewDefaultGenesisState())
@ -45,21 +49,23 @@ func TestExport(t *testing.T) {
initRequest := abci.RequestInitChain{
Time: time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC),
ChainId: "kavatest_1-1",
ChainId: TestChainId,
InitialHeight: 1,
ConsensusParams: tmtypes.TM2PB.ConsensusParams(tmtypes.DefaultConsensusParams()),
ConsensusParams: sims.DefaultConsensusParams,
Validators: nil,
AppStateBytes: stateBytes,
}
app.InitChain(initRequest)
app.Commit()
exportedApp, err := app.ExportAppStateAndValidators(false, []string{})
exportedApp, err := app.ExportAppStateAndValidators(false, []string{}, []string{})
require.NoError(t, err)
// Assume each module is exported correctly, so only check modules in genesis are present in export
initialModules, err := unmarshalJSONKeys(initRequest.AppStateBytes)
require.NoError(t, err)
// note ibctm is only registered in the BasicManager and not module manager so can be ignored
initialModules = removeIbcTmModule(initialModules)
exportedModules, err := unmarshalJSONKeys(exportedApp.AppState)
require.NoError(t, err)
assert.ElementsMatch(t, initialModules, exportedModules)
@ -143,3 +149,13 @@ func unmarshalJSONKeys(jsonBytes []byte) ([]string, error) {
return keys, nil
}
func removeIbcTmModule(modules []string) []string {
var result []string
for _, str := range modules {
if str != ibctm.ModuleName && str != solomachine.ModuleName {
result = append(result, str)
}
}
return result
}

View File

@ -4,7 +4,7 @@ import (
"encoding/json"
"log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
sdk "github.com/cosmos/cosmos-sdk/types"
@ -14,7 +14,7 @@ import (
)
// ExportAppStateAndValidators export the state of the app for a genesis file
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string,
func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []string, modulesToExport []string,
) (servertypes.ExportedApp, error) {
// as if they could withdraw from the start of the next block
// block time is not available and defaults to Jan 1st, 0001
@ -26,7 +26,7 @@ func (app *App) ExportAppStateAndValidators(forZeroHeight bool, jailWhiteList []
app.prepForZeroHeightGenesis(ctx, jailWhiteList)
}
genState := app.mm.ExportGenesis(ctx, app.appCodec)
genState := app.mm.ExportGenesisForModules(ctx, app.appCodec, modulesToExport)
newAppState, err := json.MarshalIndent(genState, "", " ")
if err != nil {
return servertypes.ExportedApp{}, err

View File

@ -3,9 +3,9 @@ Package params defines the simulation parameters for the 0gChain app.
It contains the default weights used for each transaction used on the module's
simulation. These weights define the chance for a transaction to be simulated at
any gived operation.
any given operation.
You can repace the default values for the weights by providing a params.json
You can replace the default values for the weights by providing a params.json
file with the weights defined for each of the transaction operations:
{

View File

@ -143,7 +143,7 @@ func (th TallyHandler) Tally(
totalVotingPower = totalVotingPower.Add(votingPower)
}
tallyParams := th.gk.GetTallyParams(ctx)
tallyParams := th.gk.GetParams(ctx)
tallyResults = govv1.NewTallyResultFromMap(results)
// TODO: Upgrade the spec to cover all of these cases & remove pseudocode.
@ -155,7 +155,7 @@ func (th TallyHandler) Tally(
// If there is not enough quorum of votes, the proposal fails
percentVoting := totalVotingPower.Quo(sdk.NewDecFromInt(th.stk.TotalBondedTokens(ctx)))
if percentVoting.LT(sdk.MustNewDecFromStr(tallyParams.Quorum)) {
return false, true, tallyResults
return false, tallyParams.BurnVoteQuorum, tallyResults
}
// If no one votes (everyone abstains), proposal fails
@ -165,7 +165,7 @@ func (th TallyHandler) Tally(
// If more than 1/3 of voters veto, proposal fails
if results[govv1.OptionNoWithVeto].Quo(totalVotingPower).GT(sdk.MustNewDecFromStr(tallyParams.VetoThreshold)) {
return false, true, tallyResults
return false, tallyParams.BurnVoteVeto, tallyResults
}
// If more than 1/2 of non-abstaining voters vote Yes, proposal passes

View File

@ -5,6 +5,7 @@ import (
"time"
sdkmath "cosmossdk.io/math"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
sdk "github.com/cosmos/cosmos-sdk/types"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
@ -15,7 +16,6 @@ import (
stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/stretchr/testify/suite"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
)
// d is an alias for sdk.MustNewDecFromStr
@ -41,12 +41,13 @@ func (suite *tallyHandlerSuite) SetupTest() {
genesisTime := time.Date(1998, 1, 1, 0, 0, 0, 0, time.UTC)
suite.ctx = suite.app.NewContext(false, tmproto.Header{Height: 1, Time: genesisTime})
suite.staking = stakingHelper{suite.app.GetStakingKeeper()}
stakingKeeper := *suite.app.GetStakingKeeper()
suite.staking = stakingHelper{stakingKeeper}
suite.staking.setBondDenom(suite.ctx, "ukava")
suite.tallier = NewTallyHandler(
suite.app.GetGovKeeper(),
suite.app.GetStakingKeeper(),
stakingKeeper,
suite.app.GetBankKeeper(),
)
}
@ -129,7 +130,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
})
suite.Run("VetoedFails", func() {
suite.SetupTest()
@ -144,7 +145,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to fail, tally: %v", tally)
suite.Truef(burns, "expected desposit to be burned, tally: %v", tally)
suite.Truef(burns, "expected deposit to be burned, tally: %v", tally)
})
suite.Run("UnvetoedAndYesAboveThresholdPasses", func() {
suite.SetupTest()
@ -161,7 +162,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Truef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndYesBelowThresholdFails", func() {
suite.SetupTest()
@ -178,7 +179,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("NotEnoughStakeFails", func() {
suite.SetupTest()
@ -190,7 +191,7 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
suite.Run("UnvetoedAndAllAbstainedFails", func() {
suite.SetupTest()
@ -203,17 +204,18 @@ func (suite *tallyHandlerSuite) TestTallyOutcomes() {
passes, burns, tally := suite.tallier.Tally(suite.ctx, proposal)
suite.Falsef(passes, "expected proposal to pass, tally: %v", tally)
suite.Falsef(burns, "expected desposit to not burn, tally: %v", tally)
suite.Falsef(burns, "expected deposit to not burn, tally: %v", tally)
})
}
func (suite *tallyHandlerSuite) setTallyParams(quorum, threshold, veto sdk.Dec) {
suite.app.GetGovKeeper().SetTallyParams(suite.ctx, govv1.TallyParams{
Quorum: quorum.String(),
Threshold: threshold.String(),
VetoThreshold: veto.String(),
})
params := suite.app.GetGovKeeper().GetParams(suite.ctx)
params.Quorum = quorum.String()
params.Threshold = threshold.String()
params.VetoThreshold = veto.String()
params.BurnVoteQuorum = true
suite.app.GetGovKeeper().SetParams(suite.ctx, params)
}
func (suite *tallyHandlerSuite) voteOnProposal(
@ -234,7 +236,7 @@ func (suite *tallyHandlerSuite) voteOnProposal(
func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
gk := suite.app.GetGovKeeper()
deposit := gk.GetDepositParams(suite.ctx).MinDeposit
deposit := gk.GetParams(suite.ctx).MinDeposit
proposer := suite.createAccount(deposit...)
msg, err := govv1beta1.NewMsgSubmitProposal(
@ -244,7 +246,7 @@ func (suite *tallyHandlerSuite) createProposal() govv1.Proposal {
)
suite.Require().NoError(err)
msgServerv1 := govkeeper.NewMsgServerImpl(gk)
msgServerv1 := govkeeper.NewMsgServerImpl(&gk)
govAcct := gk.GetGovernanceAccount(suite.ctx).GetAddress()
msgServer := govkeeper.NewLegacyMsgServerImpl(govAcct.String(), msgServerv1)
@ -364,7 +366,7 @@ func (h stakingHelper) createUnbondedValidator(ctx sdk.Context, address sdk.ValA
return nil, err
}
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
_, err = msgServer.CreateValidator(sdk.WrapSDKContext(ctx), msg)
if err != nil {
return nil, err
@ -384,7 +386,7 @@ func (h stakingHelper) delegate(ctx sdk.Context, delegator sdk.AccAddress, valid
h.newBondCoin(ctx, amount),
)
msgServer := stakingkeeper.NewMsgServerImpl(h.keeper)
msgServer := stakingkeeper.NewMsgServerImpl(&h.keeper)
_, err := msgServer.Delegate(sdk.WrapSDKContext(ctx), msg)
if err != nil {
return sdk.Dec{}, err

View File

@ -9,6 +9,12 @@ import (
"time"
sdkmath "cosmossdk.io/math"
dasignerskeeper "github.com/0glabs/0g-chain/x/dasigners/v1/keeper"
tmdb "github.com/cometbft/cometbft-db"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cometbft/cometbft/libs/log"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
@ -35,26 +41,23 @@ import (
evmkeeper "github.com/evmos/ethermint/x/evm/keeper"
feemarketkeeper "github.com/evmos/ethermint/x/feemarket/keeper"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtypes "github.com/tendermint/tendermint/types"
tmdb "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/chaincfg"
bep3keeper "github.com/0glabs/0g-chain/x/bep3/keeper"
committeekeeper "github.com/0glabs/0g-chain/x/committee/keeper"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
issuancekeeper "github.com/0glabs/0g-chain/x/issuance/keeper"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
pricefeedkeeper "github.com/0glabs/0g-chain/x/pricefeed/keeper"
)
var (
emptyTime time.Time
testChainID = "kavatest_1-1"
defaultInitialHeight int64 = 1
)
const TestChainId = "zgchain_8888-1"
// TestApp is a simple wrapper around an App. It exposes internal keepers for use in integration tests.
// This file also contains test helpers. Ideally they would be in separate package.
// Basic Usage:
@ -89,32 +92,41 @@ func NewTestAppFromSealed() TestApp {
encCfg := MakeEncodingConfig()
app := NewApp(log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil, encCfg, DefaultOptions)
app := NewApp(
log.NewNopLogger(), db, chaincfg.DefaultNodeHome, nil,
encCfg, DefaultOptions, baseapp.SetChainID(TestChainId),
)
return TestApp{App: *app}
}
// nolint
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetAccountKeeper() authkeeper.AccountKeeper { return tApp.accountKeeper }
func (tApp TestApp) GetBankKeeper() bankkeeper.Keeper { return tApp.bankKeeper }
func (tApp TestApp) GetMintKeeper() mintkeeper.Keeper { return tApp.mintKeeper }
func (tApp TestApp) GetStakingKeeper() *stakingkeeper.Keeper { return tApp.stakingKeeper }
func (tApp TestApp) GetSlashingKeeper() slashingkeeper.Keeper { return tApp.slashingKeeper }
func (tApp TestApp) GetDistrKeeper() distkeeper.Keeper { return tApp.distrKeeper }
func (tApp TestApp) GetGovKeeper() govkeeper.Keeper { return tApp.govKeeper }
func (tApp TestApp) GetCrisisKeeper() crisiskeeper.Keeper { return tApp.crisisKeeper }
func (tApp TestApp) GetParamsKeeper() paramskeeper.Keeper { return tApp.paramsKeeper }
func (tApp TestApp) GetIssuanceKeeper() issuancekeeper.Keeper { return tApp.issuanceKeeper }
func (tApp TestApp) GetBep3Keeper() bep3keeper.Keeper { return tApp.bep3Keeper }
func (tApp TestApp) GetPriceFeedKeeper() pricefeedkeeper.Keeper { return tApp.pricefeedKeeper }
func (tApp TestApp) GetCommitteeKeeper() committeekeeper.Keeper { return tApp.committeeKeeper }
func (tApp TestApp) GetEvmutilKeeper() evmutilkeeper.Keeper { return tApp.evmutilKeeper }
func (tApp TestApp) GetEvmKeeper() *evmkeeper.Keeper { return tApp.evmKeeper }
func (tApp TestApp) GetFeeMarketKeeper() feemarketkeeper.Keeper { return tApp.feeMarketKeeper }
func (tApp TestApp) GetDASignersKeeper() dasignerskeeper.Keeper { return tApp.dasignersKeeper }
func (tApp TestApp) GetPrecisebankKeeper() precisebankkeeper.Keeper { return tApp.precisebankKeeper }
func (tApp TestApp) GetKVStoreKey(key string) *storetypes.KVStoreKey {
return tApp.keys[key]
}
func (tApp TestApp) GetBlockedMaccAddrs() map[string]bool {
return tApp.loadBlockedMaccAddrs()
}
// LegacyAmino returns the app's amino codec.
func (app *App) LegacyAmino() *codec.LegacyAmino {
return app.legacyAmino
@ -246,6 +258,7 @@ func genesisStateWithValSet(
balances,
totalSupply,
currentBankGenesis.DenomMetadata,
currentBankGenesis.SendEnabled,
)
// set genesis state
@ -259,13 +272,13 @@ func genesisStateWithValSet(
// InitializeFromGenesisStates calls InitChain on the app using the provided genesis states.
// If any module genesis states are missing, defaults are used.
func (tApp TestApp) InitializeFromGenesisStates(genesisStates ...GenesisState) TestApp {
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, testChainID, defaultInitialHeight, true, genesisStates...)
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(emptyTime, TestChainId, defaultInitialHeight, true, genesisStates...)
}
// InitializeFromGenesisStatesWithTime calls InitChain on the app using the provided genesis states and time.
// If any module genesis states are missing, defaults are used.
func (tApp TestApp) InitializeFromGenesisStatesWithTime(genTime time.Time, genesisStates ...GenesisState) TestApp {
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, testChainID, defaultInitialHeight, true, genesisStates...)
return tApp.InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(genTime, TestChainId, defaultInitialHeight, true, genesisStates...)
}
// InitializeFromGenesisStatesWithTimeAndChainID calls InitChain on the app using the provided genesis states, time, and chain id.
@ -322,8 +335,8 @@ func (tApp TestApp) InitializeFromGenesisStatesWithTimeAndChainIDAndHeight(
AppStateBytes: stateBytes,
ChainId: chainID,
// Set consensus params, which is needed by x/feemarket
ConsensusParams: &abci.ConsensusParams{
Block: &abci.BlockParams{
ConsensusParams: &tmproto.ConsensusParams{
Block: &tmproto.BlockParams{
MaxBytes: 200000,
MaxGas: 20000000,
},
@ -458,7 +471,7 @@ func (tApp TestApp) SetInflation(ctx sdk.Context, value sdk.Dec) {
mk.SetParams(ctx, mintParams)
}
// GeneratePrivKeyAddressPairsFromRand generates (deterministically) a total of n private keys and addresses.
// GeneratePrivKeyAddressPairs generates (deterministically) a total of n private keys and addresses.
func GeneratePrivKeyAddressPairs(n int) (keys []cryptotypes.PrivKey, addrs []sdk.AccAddress) {
r := rand.New(rand.NewSource(12345)) // make the generation deterministic
keys = make([]cryptotypes.PrivKey, n)

View File

@ -4,86 +4,40 @@ import (
"fmt"
sdkmath "cosmossdk.io/math"
evmutilkeeper "github.com/0glabs/0g-chain/x/evmutil/keeper"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)
const (
UpgradeName_Mainnet = "v0.25.0"
UpgradeName_Testnet = "v0.25.0-alpha.0"
UpgradeName_E2ETest = "v0.25.0-testing"
)
var (
// KAVA to ukava - 6 decimals
kavaConversionFactor = sdk.NewInt(1000_000)
secondsPerYear = sdk.NewInt(365 * 24 * 60 * 60)
// 10 Million KAVA per year in staking rewards, inflation disable time 2024-01-01T00:00:00 UTC
// CommunityParams_Mainnet = communitytypes.NewParams(
// time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
// // before switchover
// sdkmath.LegacyZeroDec(),
// // after switchover - 10M KAVA to ukava per year / seconds per year
// sdkmath.LegacyNewDec(10_000_000).
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear),
// )
// Testnet -- 15 Trillion KAVA per year in staking rewards, inflation disable time 2023-11-16T00:00:00 UTC
// CommunityParams_Testnet = communitytypes.NewParams(
// time.Date(2023, 11, 16, 0, 0, 0, 0, time.UTC),
// // before switchover
// sdkmath.LegacyZeroDec(),
// // after switchover
// sdkmath.LegacyNewDec(15_000_000).
// MulInt64(1_000_000). // 15M * 1M = 15T
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear),
// )
// CommunityParams_E2E = communitytypes.NewParams(
// time.Now().Add(10*time.Second).UTC(), // relative time for testing
// sdkmath.LegacyNewDec(0), // stakingRewardsPerSecond
// sdkmath.LegacyNewDec(1000), // upgradeTimeSetstakingRewardsPerSecond
// )
// ValidatorMinimumCommission is the new 5% minimum commission rate for validators
ValidatorMinimumCommission = sdk.NewDecWithPrec(5, 2)
UpgradeName_Testnet = "v0.4.0"
)
// RegisterUpgradeHandlers registers the upgrade handlers for the app.
func (app App) RegisterUpgradeHandlers() {
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_Mainnet,
// upgradeHandler(app, UpgradeName_Mainnet, CommunityParams_Mainnet),
// )
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_Testnet,
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_Testnet),
// )
// app.upgradeKeeper.SetUpgradeHandler(
// UpgradeName_E2ETest,
// upgradeHandler(app, UpgradeName_Testnet, CommunityParams_E2E),
// )
app.upgradeKeeper.SetUpgradeHandler(
UpgradeName_Testnet,
upgradeHandler(app, UpgradeName_Testnet),
)
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
panic(err)
}
doUpgrade := upgradeInfo.Name == UpgradeName_Mainnet ||
upgradeInfo.Name == UpgradeName_Testnet ||
upgradeInfo.Name == UpgradeName_E2ETest
doUpgrade := upgradeInfo.Name == UpgradeName_Testnet
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
storeUpgrades := storetypes.StoreUpgrades{
Added: []string{
// x/community added store
// communitytypes.ModuleName,
precisebanktypes.ModuleName,
},
}
@ -96,163 +50,219 @@ func (app App) RegisterUpgradeHandlers() {
func upgradeHandler(
app App,
name string,
// communityParams communitytypes.Params,
) upgradetypes.UpgradeHandler {
return func(
ctx sdk.Context,
plan upgradetypes.Plan,
fromVM module.VersionMap,
) (module.VersionMap, error) {
app.Logger().Info(fmt.Sprintf("running %s upgrade handler", name))
logger := app.Logger()
logger.Info(fmt.Sprintf("running %s upgrade handler", name))
toVM, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
// Run migrations for all modules and return new consensus version map.
versionMap, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
if err != nil {
return toVM, err
return nil, err
}
//
// Staking validator minimum commission
//
UpdateValidatorMinimumCommission(ctx, app)
logger.Info("completed store migrations")
//
// Community Params
//
// app.communityKeeper.SetParams(ctx, communityParams)
// app.Logger().Info(
// "initialized x/community params",
// "UpgradeTimeDisableInflation", communityParams.UpgradeTimeDisableInflation,
// "StakingRewardsPerSecond", communityParams.StakingRewardsPerSecond,
// "UpgradeTimeSetStakingRewardsPerSecond", communityParams.UpgradeTimeSetStakingRewardsPerSecond,
// )
// Migration of fractional balances from x/evmutil to x/precisebank
if err := MigrateEvmutilToPrecisebank(
ctx,
app.accountKeeper,
app.bankKeeper,
app.evmutilKeeper,
app.precisebankKeeper,
); err != nil {
return nil, err
}
//
// Kavadist gov grant
//
// msgGrant, err := authz.NewMsgGrant(
// app.accountKeeper.GetModuleAddress(kavadisttypes.ModuleName), // granter
// app.accountKeeper.GetModuleAddress(govtypes.ModuleName), // grantee
// authz.NewGenericAuthorization(sdk.MsgTypeURL(&banktypes.MsgSend{})), // authorization
// nil, // expiration
// )
// if err != nil {
// return toVM, err
// }
// _, err = app.authzKeeper.Grant(ctx, msgGrant)
// if err != nil {
// return toVM, err
// }
// app.Logger().Info("created gov grant for kavadist funds")
logger.Info("completed x/evmutil to x/precisebank migration")
//
// Gov Quorum
//
govTallyParams := app.govKeeper.GetTallyParams(ctx)
oldQuorum := govTallyParams.Quorum
govTallyParams.Quorum = sdkmath.LegacyMustNewDecFromStr("0.2").String()
app.govKeeper.SetTallyParams(ctx, govTallyParams)
app.Logger().Info(fmt.Sprintf("updated tally quorum from %s to %s", oldQuorum, govTallyParams.Quorum))
//
// Incentive Params
//
UpdateIncentiveParams(ctx, app)
return toVM, nil
return versionMap, nil
}
}
// UpdateValidatorMinimumCommission updates the commission rate for all
// validators to be at least the new min commission rate, and sets the minimum
// commission rate in the staking params.
func UpdateValidatorMinimumCommission(
// MigrateEvmutilToPrecisebank migrates all required state from x/evmutil to
// x/precisebank and ensures the resulting state is correct.
// This migrates the following state:
// - Fractional balances
// - Fractional balance reserve
// Initializes the following state in x/precisebank:
// - Remainder amount
func MigrateEvmutilToPrecisebank(
ctx sdk.Context,
app App,
) {
resultCount := make(map[stakingtypes.BondStatus]int)
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
// Iterate over *all* validators including inactive
app.stakingKeeper.IterateValidators(
aggregateSum, err := TransferFractionalBalances(
ctx,
func(index int64, validator stakingtypes.ValidatorI) (stop bool) {
// Skip if validator commission is already >= 5%
if validator.GetCommission().GTE(ValidatorMinimumCommission) {
return false
}
val, ok := validator.(stakingtypes.Validator)
if !ok {
panic("expected stakingtypes.Validator")
}
// Set minimum commission rate to 5%, when commission is < 5%
val.Commission.Rate = ValidatorMinimumCommission
val.Commission.UpdateTime = ctx.BlockTime()
// Update MaxRate if necessary
if val.Commission.MaxRate.LT(ValidatorMinimumCommission) {
val.Commission.MaxRate = ValidatorMinimumCommission
}
if err := app.stakingKeeper.BeforeValidatorModified(ctx, val.GetOperator()); err != nil {
panic(fmt.Sprintf("failed to call BeforeValidatorModified: %s", err))
}
app.stakingKeeper.SetValidator(ctx, val)
// Keep track of counts just for logging purposes
switch val.GetStatus() {
case stakingtypes.Bonded:
resultCount[stakingtypes.Bonded]++
case stakingtypes.Unbonded:
resultCount[stakingtypes.Unbonded]++
case stakingtypes.Unbonding:
resultCount[stakingtypes.Unbonding]++
}
return false
},
evmutilKeeper,
precisebankKeeper,
)
if err != nil {
return fmt.Errorf("fractional balances transfer: %w", err)
}
logger.Info(
"fractional balances transferred from x/evmutil to x/precisebank",
"aggregate sum", aggregateSum,
)
app.Logger().Info(
"updated validator minimum commission rate for all existing validators",
stakingtypes.BondStatusBonded, resultCount[stakingtypes.Bonded],
stakingtypes.BondStatusUnbonded, resultCount[stakingtypes.Unbonded],
stakingtypes.BondStatusUnbonding, resultCount[stakingtypes.Unbonding],
)
remainder := InitializeRemainder(ctx, precisebankKeeper, aggregateSum)
logger.Info("remainder amount initialized in x/precisebank", "remainder", remainder)
stakingParams := app.stakingKeeper.GetParams(ctx)
stakingParams.MinCommissionRate = ValidatorMinimumCommission
app.stakingKeeper.SetParams(ctx, stakingParams)
// Migrate fractional balances, reserve, and ensure reserve fully backs all
// fractional balances.
if err := TransferFractionalBalanceReserve(
ctx,
accountKeeper,
bankKeeper,
precisebankKeeper,
); err != nil {
return fmt.Errorf("reserve transfer: %w", err)
}
app.Logger().Info(
"updated x/staking params minimum commission rate",
"MinCommissionRate", stakingParams.MinCommissionRate,
)
return nil
}
// UpdateIncentiveParams modifies the earn rewards period for bkava to be 600K KAVA per year.
func UpdateIncentiveParams(
// TransferFractionalBalances migrates fractional balances from x/evmutil to
// x/precisebank. It sets the fractional balance in x/precisebank and deletes
// the account from x/evmutil. Returns the aggregate sum of all fractional
// balances.
func TransferFractionalBalances(
ctx sdk.Context,
app App,
) {
// incentiveParams := app.incentiveKeeper.GetParams(ctx)
evmutilKeeper evmutilkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) (sdkmath.Int, error) {
aggregateSum := sdkmath.ZeroInt()
// bkava annualized rewards: 600K KAVA
// newAmount := sdkmath.LegacyNewDec(600_000).
// MulInt(kavaConversionFactor).
// QuoInt(secondsPerYear).
// TruncateInt()
var iterErr error
// for i := range incentiveParams.EarnRewardPeriods {
// if incentiveParams.EarnRewardPeriods[i].CollateralType != "bkava" {
// continue
// }
evmutilKeeper.IterateAllAccounts(ctx, func(acc evmutiltypes.Account) bool {
// Set account balance in x/precisebank
precisebankKeeper.SetFractionalBalance(ctx, acc.Address, acc.Balance)
// // Update rewards per second via index
// incentiveParams.EarnRewardPeriods[i].RewardsPerSecond = sdk.NewCoins(
// sdk.NewCoin("ukava", newAmount),
// )
// }
// Delete account from x/evmutil
iterErr := evmutilKeeper.SetAccount(ctx, evmutiltypes.Account{
Address: acc.Address,
// Set balance to 0 to delete it
Balance: sdkmath.ZeroInt(),
})
// app.incentiveKeeper.SetParams(ctx, incentiveParams)
// Halt iteration if there was an error
if iterErr != nil {
return true
}
// Aggregate sum of all fractional balances
aggregateSum = aggregateSum.Add(acc.Balance)
// Continue iterating
return false
})
return aggregateSum, iterErr
}
// InitializeRemainder initializes the remainder amount in x/precisebank. It
// calculates the remainder amount that is needed to ensure that the sum of all
// fractional balances is a multiple of the conversion factor. The remainder
// amount is stored in the store and returned.
func InitializeRemainder(
ctx sdk.Context,
precisebankKeeper precisebankkeeper.Keeper,
aggregateSum sdkmath.Int,
) sdkmath.Int {
// Extra fractional coins that exceed the conversion factor.
// This extra + remainder should equal the conversion factor to ensure
// (sum(fBalances) + remainder) % conversionFactor = 0
extraFractionalAmount := aggregateSum.Mod(precisebanktypes.ConversionFactor())
remainder := precisebanktypes.ConversionFactor().
Sub(extraFractionalAmount).
// Mod conversion factor to ensure remainder is valid.
// If extraFractionalAmount is a multiple of conversion factor, the
// remainder is 0.
Mod(precisebanktypes.ConversionFactor())
// Panics if the remainder is invalid. In a correct chain state and only
// mint/burns due to transfers, this would be 0.
precisebankKeeper.SetRemainderAmount(ctx, remainder)
return remainder
}
// TransferFractionalBalanceReserve migrates the fractional balance reserve from
// x/evmutil to x/precisebank. It transfers the reserve balance from x/evmutil
// to x/precisebank and ensures that the reserve fully backs all fractional
// balances. It mints or burns coins to back the fractional balances exactly.
func TransferFractionalBalanceReserve(
ctx sdk.Context,
accountKeeper evmutiltypes.AccountKeeper,
bankKeeper bankkeeper.Keeper,
precisebankKeeper precisebankkeeper.Keeper,
) error {
logger := ctx.Logger()
// Transfer x/evmutil reserve to x/precisebank.
evmutilAddr := accountKeeper.GetModuleAddress(evmutiltypes.ModuleName)
reserveBalance := bankKeeper.GetBalance(ctx, evmutilAddr, precisebanktypes.IntegerCoinDenom)
if err := bankKeeper.SendCoinsFromModuleToModule(
ctx,
evmutiltypes.ModuleName, // from x/evmutil
precisebanktypes.ModuleName, // to x/precisebank
sdk.NewCoins(reserveBalance),
); err != nil {
return fmt.Errorf("failed to transfer reserve from x/evmutil to x/precisebank: %w", err)
}
logger.Info(fmt.Sprintf("transferred reserve balance: %s", reserveBalance))
// Ensure x/precisebank reserve fully backs all fractional balances.
totalFractionalBalances := precisebankKeeper.GetTotalSumFractionalBalances(ctx)
// Does NOT ensure state is correct, total fractional balances should be a
// multiple of conversion factor but is not guaranteed due to the remainder.
// Remainder initialization is handled by InitializeRemainder.
// Determine how much the reserve is off by, e.g. unbacked amount
expectedReserveBalance := totalFractionalBalances.Quo(precisebanktypes.ConversionFactor())
// If there is a remainder (totalFractionalBalances % conversionFactor != 0),
// then expectedReserveBalance is rounded up to the nearest integer.
if totalFractionalBalances.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBalance = expectedReserveBalance.Add(sdkmath.OneInt())
}
unbackedAmount := expectedReserveBalance.Sub(reserveBalance.Amount)
logger.Info(fmt.Sprintf("total account fractional balances: %s", totalFractionalBalances))
// Three possible cases:
// 1. Reserve is not enough, mint coins to back the fractional balances
// 2. Reserve is too much, burn coins to back the fractional balances exactly
// 3. Reserve is exactly enough, no action needed
if unbackedAmount.IsPositive() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount))
if err := bankKeeper.MintCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to mint extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("unbacked amount minted to reserve: %s", unbackedAmount))
} else if unbackedAmount.IsNegative() {
coins := sdk.NewCoins(sdk.NewCoin(precisebanktypes.IntegerCoinDenom, unbackedAmount.Neg()))
if err := bankKeeper.BurnCoins(ctx, precisebanktypes.ModuleName, coins); err != nil {
return fmt.Errorf("failed to burn extra reserve coins: %w", err)
}
logger.Info(fmt.Sprintf("extra reserve amount burned: %s", unbackedAmount.Neg()))
} else {
logger.Info("reserve exactly backs fractional balances, no mint/burn needed")
}
return nil
}

View File

@ -1,239 +1,434 @@
package app_test
import (
"strconv"
"testing"
"time"
sdkmath "cosmossdk.io/math"
"github.com/0glabs/0g-chain/app"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
precisebankkeeper "github.com/0glabs/0g-chain/x/precisebank/keeper"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
sdk "github.com/cosmos/cosmos-sdk/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/evmos/ethermint/crypto/ethsecp256k1"
"github.com/stretchr/testify/require"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// func TestUpgradeCommunityParams_Mainnet(t *testing.T) {
// require.Equal(
// t,
// sdkmath.LegacyZeroDec().String(),
// app.CommunityParams_Mainnet.StakingRewardsPerSecond.String(),
// )
// require.Equal(
// t,
// // Manually confirmed
// "317097.919837645865043125",
// app.CommunityParams_Mainnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
// "mainnet kava per second should be correct",
// )
// }
// func TestUpgradeCommunityParams_Testnet(t *testing.T) {
// require.Equal(
// t,
// sdkmath.LegacyZeroDec().String(),
// app.CommunityParams_Testnet.StakingRewardsPerSecond.String(),
// )
// require.Equal(
// t,
// // Manually confirmed
// "475646879756.468797564687975646",
// app.CommunityParams_Testnet.UpgradeTimeSetStakingRewardsPerSecond.String(),
// "testnet kava per second should be correct",
// )
// }
func TestUpdateValidatorMinimumCommission(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
sk := tApp.GetStakingKeeper()
stakingParams := sk.GetParams(ctx)
stakingParams.MinCommissionRate = sdk.ZeroDec()
sk.SetParams(ctx, stakingParams)
// Set some validators with varying commission rates
vals := []struct {
name string
operatorAddr sdk.ValAddress
consPriv *ethsecp256k1.PrivKey
commissionRateMin sdk.Dec
commissionRateMax sdk.Dec
shouldBeUpdated bool
func TestMigrateEvmutilToPrecisebank(t *testing.T) {
// Full test case with all components together
tests := []struct {
name string
initialReserve sdkmath.Int
fractionalBalances []sdkmath.Int
}{
{
name: "zero commission rate",
operatorAddr: sdk.ValAddress("val0"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.ZeroDec(),
commissionRateMax: sdk.ZeroDec(),
shouldBeUpdated: true,
"no fractional balances",
sdkmath.NewInt(0),
[]sdkmath.Int{},
},
{
name: "0.01 commission rate",
operatorAddr: sdk.ValAddress("val1"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.01"),
commissionRateMax: sdk.MustNewDecFromStr("0.01"),
shouldBeUpdated: true,
"sufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
name: "0.05 commission rate",
operatorAddr: sdk.ValAddress("val2"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.05"),
commissionRateMax: sdk.MustNewDecFromStr("0.05"),
shouldBeUpdated: false,
"insufficient reserve, 0 remainder",
// Accounts adding up to 2 int units, but only 1 int unit in reserve
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
name: "0.06 commission rate",
operatorAddr: sdk.ValAddress("val3"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.06"),
commissionRateMax: sdk.MustNewDecFromStr("0.06"),
shouldBeUpdated: false,
"excess reserve, 0 remainder",
// Accounts adding up to 2 int units, but 3 int unit in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
name: "0.5 commission rate",
operatorAddr: sdk.ValAddress("val4"),
consPriv: generateConsKey(t),
commissionRateMin: sdk.MustNewDecFromStr("0.5"),
commissionRateMax: sdk.MustNewDecFromStr("0.5"),
shouldBeUpdated: false,
"sufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, same as reserve
sdkmath.NewInt(2),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, less than reserve,
// Reserve should be 2 and remainder 0.5
sdkmath.NewInt(1),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"excess reserve, non-zero remainder",
// Accounts adding up to 1.5 int units, 3 int units in reserve
sdkmath.NewInt(3),
[]sdkmath.Int{
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, v := range vals {
val, err := stakingtypes.NewValidator(
v.operatorAddr,
v.consPriv.PubKey(),
stakingtypes.Description{},
)
require.NoError(t, err)
val.Commission.Rate = v.commissionRateMin
val.Commission.MaxRate = v.commissionRateMax
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
err = sk.SetValidatorByConsAddr(ctx, val)
require.NoError(t, err)
sk.SetValidator(ctx, val)
}
ak := tApp.GetAccountKeeper()
bk := tApp.GetBankKeeper()
evmuk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
require.NotPanics(
t, func() {
app.UpdateValidatorMinimumCommission(ctx, tApp.App)
},
)
reserveCoin := sdk.NewCoin(precisebanktypes.IntegerCoinDenom, tt.initialReserve)
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(reserveCoin))
require.NoError(t, err)
stakingParamsAfter := sk.GetParams(ctx)
require.Equal(t, stakingParamsAfter.MinCommissionRate, app.ValidatorMinimumCommission)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Check that all validators have a commission rate >= 5%
for _, val := range vals {
t.Run(val.name, func(t *testing.T) {
validator, found := sk.GetValidator(ctx, val.operatorAddr)
require.True(t, found, "validator should be found")
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(
t,
validator.GetCommission().GTE(app.ValidatorMinimumCommission),
"commission rate should be >= 5%",
)
require.Equal(t, tt.initialReserve, oldReserveBalance.Amount, "initial x/evmutil reserve balance")
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
require.True(
t,
validator.Commission.MaxRate.GTE(app.ValidatorMinimumCommission),
"commission rate max should be >= 5%, got %s",
validator.Commission.MaxRate,
)
// Set accounts
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
if val.shouldBeUpdated {
require.Equal(
t,
ctx.BlockTime(),
validator.Commission.UpdateTime,
"commission update time should be set to block time",
)
} else {
require.Equal(
t,
time.Unix(0, 0).UTC(),
validator.Commission.UpdateTime,
"commission update time should not be changed -- default value is 0",
)
err := evmuk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// Run full x/evmutil -> x/precisebank migration
err = app.MigrateEvmutilToPrecisebank(
ctx,
ak,
bk,
evmuk,
pbk,
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
remainder := pbk.GetRemainderAmount(ctx)
expectedReserveBal := fractionalBalanceTotal.Add(remainder)
require.Equal(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount.Mul(precisebanktypes.ConversionFactor()),
"new reserve should equal total fractional balances",
)
// Check balances are deleted in evmutil and migrated to precisebank
for i := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmuk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
// Checks balances valid and remainder
res, stop := precisebankkeeper.AllInvariants(pbk)(ctx)
require.Falsef(t, stop, "invariants should pass: %s", res)
})
}
}
// func TestUpdateIncentiveParams(t *testing.T) {
// tApp := app.NewTestApp()
// tApp.InitializeFromGenesisStates()
// ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
func TestTransferFractionalBalances(t *testing.T) {
tests := []struct {
name string
fractionalBalances []sdkmath.Int
}{
{
"no fractional balances",
[]sdkmath.Int{},
},
{
"balanced fractional balances",
[]sdkmath.Int{
// 4 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"unbalanced balances",
[]sdkmath.Int{
// 3 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
// ik := tApp.GetIncentiveKeeper()
// params := ik.GetParams(ctx)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
// startPeriod := time.Date(2021, 10, 26, 15, 0, 0, 0, time.UTC)
// endPeriod := time.Date(2022, 10, 26, 15, 0, 0, 0, time.UTC)
evmutilk := tApp.GetEvmutilKeeper()
pbk := tApp.GetPrecisebankKeeper()
// params.EarnRewardPeriods = incentivetypes.MultiRewardPeriods{
// incentivetypes.NewMultiRewardPeriod(
// true,
// "bkava",
// startPeriod,
// endPeriod,
// sdk.NewCoins(
// sdk.NewCoin("ukava", sdk.NewInt(159459)),
// ),
// ),
// }
// ik.SetParams(ctx, params)
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
// beforeParams := ik.GetParams(ctx)
// require.Equal(t, params, beforeParams, "initial incentive params should be set")
err := evmutilk.SetBalance(ctx, addr, balance)
require.NoError(t, err)
}
// // -- UPGRADE
// app.UpdateIncentiveParams(ctx, tApp.App)
// Run balance transfer
aggregateSum, err := app.TransferFractionalBalances(
ctx,
evmutilk,
pbk,
)
require.NoError(t, err)
// // -- After
// afterParams := ik.GetParams(ctx)
// Check balances are deleted in evmutil and migrated to precisebank
sum := sdkmath.ZeroInt()
for i := range tt.fractionalBalances {
sum = sum.Add(tt.fractionalBalances[i])
// require.Len(
// t,
// afterParams.EarnRewardPeriods[0].RewardsPerSecond,
// 1,
// "bkava earn reward period should only contain 1 coin",
// )
// require.Equal(
// t,
// // Manual calculation of
// // 600,000 * 1000,000 / (365 * 24 * 60 * 60)
// sdk.NewCoin("ukava", sdkmath.NewInt(19025)),
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0],
// "bkava earn reward period should be updated",
// )
addr := sdk.AccAddress([]byte(strconv.Itoa(i)))
acc := evmutilk.GetAccount(ctx, addr)
require.Nil(t, acc, "account should be deleted")
// // Check that other params are not changed
// afterParams.EarnRewardPeriods[0].RewardsPerSecond[0] = beforeParams.EarnRewardPeriods[0].RewardsPerSecond[0]
// require.Equal(
// t,
// beforeParams,
// afterParams,
// "other param values should not be changed",
// )
// }
balance := pbk.GetFractionalBalance(ctx, addr)
require.Equal(t, tt.fractionalBalances[i], balance, "balance should be migrated")
}
func generateConsKey(
t *testing.T,
) *ethsecp256k1.PrivKey {
t.Helper()
key, err := ethsecp256k1.GenerateKey()
require.NoError(t, err)
return key
require.Equal(t, sum, aggregateSum, "aggregate sum should be correct")
})
}
}
func TestInitializeRemainder(t *testing.T) {
tests := []struct {
name string
giveAggregateSum sdkmath.Int
wantRemainder sdkmath.Int
}{
{
"0 remainder, 1ukava",
precisebanktypes.ConversionFactor(),
sdkmath.NewInt(0),
},
{
"0 remainder, multiple ukava",
precisebanktypes.ConversionFactor().MulRaw(5),
sdkmath.NewInt(0),
},
{
"non-zero remainder, min",
precisebanktypes.ConversionFactor().SubRaw(1),
sdkmath.NewInt(1),
},
{
"non-zero remainder, max",
sdkmath.NewInt(1),
precisebanktypes.ConversionFactor().SubRaw(1),
},
{
"non-zero remainder, half",
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
pbk := tApp.GetPrecisebankKeeper()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
remainder := app.InitializeRemainder(
ctx,
tApp.GetPrecisebankKeeper(),
tt.giveAggregateSum,
)
require.Equal(t, tt.wantRemainder, remainder)
// Check actual state
remainderAfter := pbk.GetRemainderAmount(ctx)
require.Equal(t, tt.wantRemainder, remainderAfter)
// Not checking invariants here since it requires actual balance state
aggregateSumWithRemainder := tt.giveAggregateSum.Add(remainder)
require.True(
t,
aggregateSumWithRemainder.
Mod(precisebanktypes.ConversionFactor()).
IsZero(),
"remainder + aggregate sum should be a multiple of the conversion factor",
)
})
}
}
func TestTransferFractionalBalanceReserve(t *testing.T) {
tests := []struct {
name string
initialReserve sdk.Coin
fractionalBalances []sdkmath.Int
}{
{
"balanced reserve, no remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 2 accounts
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 4 accounts, total 2 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(2)),
[]sdkmath.Int{
// 2 accounts, total 1 int units
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"insufficient reserve, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(1)),
[]sdkmath.Int{
// 5 accounts, total 2.5 int units
// Expected 3 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
{
"extra reserve funds, with remainder",
sdk.NewCoin(precisebanktypes.IntegerCoinDenom, sdk.NewInt(3)),
[]sdkmath.Int{
// 3 accounts, total 1.5 int units.
// Expected 2 int units in reserve, 0.5 remainder
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
precisebanktypes.ConversionFactor().QuoRaw(2),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: time.Now()})
bk := tApp.GetBankKeeper()
pbk := tApp.GetPrecisebankKeeper()
err := bk.MintCoins(ctx, evmutiltypes.ModuleName, sdk.NewCoins(tt.initialReserve))
require.NoError(t, err)
oldReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(evmutiltypes.ModuleName)
newReserveAddr := tApp.GetAccountKeeper().GetModuleAddress(precisebanktypes.ModuleName)
// Double check balances
oldReserveBalance := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
newReserveBalance := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
require.Equal(t, tt.initialReserve, oldReserveBalance)
require.True(t, newReserveBalance.IsZero(), "empty initial new reserve")
for i, balance := range tt.fractionalBalances {
addr := sdk.AccAddress([]byte{byte(i)})
require.NotPanics(t, func() {
pbk.SetFractionalBalance(ctx, addr, balance)
}, "given fractional balances should be valid")
}
// Run reserve migration
err = app.TransferFractionalBalanceReserve(
ctx,
tApp.GetAccountKeeper(),
bk,
tApp.GetPrecisebankKeeper(),
)
require.NoError(t, err)
// Check old reserve is empty
oldReserveBalanceAfter := bk.GetBalance(ctx, oldReserveAddr, precisebanktypes.IntegerCoinDenom)
require.True(t, oldReserveBalanceAfter.IsZero(), "old reserve should be empty")
// Check new reserve fully backs fractional balances
newReserveBalanceAfter := bk.GetBalance(ctx, newReserveAddr, precisebanktypes.IntegerCoinDenom)
fractionalBalanceTotal := pbk.GetTotalSumFractionalBalances(ctx)
expectedReserveBal := fractionalBalanceTotal.
Quo(precisebanktypes.ConversionFactor())
// Check if theres a remainder
if fractionalBalanceTotal.Mod(precisebanktypes.ConversionFactor()).IsPositive() {
expectedReserveBal = expectedReserveBal.Add(sdkmath.OneInt())
}
require.Equal(
t,
expectedReserveBal,
newReserveBalanceAfter.Amount,
"new reserve should equal total fractional balances + remainder",
)
})
}
}

View File

@ -28,9 +28,9 @@ DIRS := $(BUILD_CACHE_DIR) $(BIN_DIR)
### Tool Versions ###
################################################################################
GO_BIN ?= go
PROTOC_VERSION ?= v21.9
BUF_VERSION ?= v1.9.0
PROTOC_GEN_GOCOSMOS_VERSION ?= v0.3.1
PROTOC_VERSION ?= v25.1
BUF_VERSION ?= v1.28.1
PROTOC_GEN_GOCOSMOS_VERSION ?= $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/gogoproto)
PROTOC_GEN_GRPC_GATEWAY_VERSION ?= $(shell $(GO_BIN) list -m github.com/grpc-ecosystem/grpc-gateway| sed 's:.* ::')
PROTOC_GEN_DOC_VERSION ?= v1.5.1
SWAGGER_COMBINE_VERSION ?= v1.4.0
@ -68,7 +68,7 @@ $(PROTOC_VERSION_FILE):
mkdir -p protoc && cd protoc; \
curl -sOL $(PROTOC_DOWNLOAD_URL); \
unzip -q $(PROTOC_ARCHIVE_NAME) bin/protoc
@cp $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
@cp -f $(BUILD_CACHE_DIR)/protoc/bin/protoc $(BIN_DIR)/protoc
@rm -rf $(BUILD_CACHE_DIR)/protoc
PROTOC := $(BIN_DIR)/protoc
@ -93,7 +93,7 @@ $(BUF_VERSION_FILE):
mkdir -p buf && cd buf; \
curl -sOL $(BUF_DOWNLOAD_URL); \
tar -xzf $(BUF_ARCHIVE_NAME) buf/bin/buf
@cp $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
@cp -f $(BUILD_CACHE_DIR)/buf/buf/bin/buf $(BIN_DIR)/buf
@rm -rf $(BUILD_CACHE_DIR)/buf
BUF := $(BIN_DIR)/buf
@ -113,8 +113,8 @@ $(PROTOC_GEN_GOCOSMOS_VERSION_FILE):
@touch $(PROTOC_GEN_GOCOSMOS_VERSION_FILE)
@cd $(BUILD_CACHE_DIR); \
mkdir -p protoc-gen-gocosmos && cd protoc-gen-gocosmos; \
git clone -q https://github.com/regen-network/cosmos-proto.git; \
cd cosmos-proto; \
git clone -q https://github.com/cosmos/gogoproto.git; \
cd gogoproto; \
git checkout -q $(PROTOC_GEN_GOCOSMOS_VERSION); \
GOBIN=$(ROOT_DIR)/$(BIN_DIR) $(GO_BIN) install ./protoc-gen-gocosmos
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-gocosmos
@ -185,7 +185,7 @@ $(PROTOC_GEN_DOC_VERSION_FILE):
mkdir -p protoc-gen-doc && cd protoc-gen-doc; \
curl -sOL $(PROTOC_GEN_DOC_DOWNLOAD_URL); \
tar -xzf $(PROTOC_GEN_DOC_ARCHIVE_NAME) protoc-gen-doc
@cp $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
@cp -f $(BUILD_CACHE_DIR)/protoc-gen-doc/protoc-gen-doc $(BIN_DIR)/protoc-gen-doc
@rm -rf $(BUILD_CACHE_DIR)/protoc-gen-doc
PROTOC_GEN_DOC := $(BIN_DIR)/protoc-gen-doc

45
build/lint.mk Normal file
View File

@ -0,0 +1,45 @@
################################################################################
### Required Variables ###
################################################################################
ifndef DOCKER
$(error DOCKER not set)
endif
ifndef BUILD_DIR
$(error BUILD_DIR not set)
endif
################################################################################
### Lint Settings ###
################################################################################
LINT_FROM_REV ?= $(shell git merge-base origin/master HEAD)
GOLANGCI_VERSION ?= $(shell cat .golangci-version)
GOLANGCI_IMAGE_TAG ?= golangci/golangci-lint:$(GOLANGCI_VERSION)
GOLANGCI_DIR ?= $(CURDIR)/$(BUILD_DIR)/.golangci-lint
GOLANGCI_CACHE_DIR ?= $(GOLANGCI_DIR)/$(GOLANGCI_VERSION)-cache
GOLANGCI_MOD_CACHE_DIR ?= $(GOLANGCI_DIR)/go-mod
################################################################################
### Lint Target ###
################################################################################
.PHONY: lint
lint: $(GOLANGCI_CACHE_DIR) $(GOLANGCI_MOD_CACHE_DIR)
@echo "Running lint from rev $(LINT_FROM_REV), use LINT_FROM_REV var to override."
$(DOCKER) run -t --rm \
-v $(GOLANGCI_CACHE_DIR):/root/.cache \
-v $(GOLANGCI_MOD_CACHE_DIR):/go/pkg/mod \
-v $(CURDIR):/app \
-w /app \
$(GOLANGCI_IMAGE_TAG) \
golangci-lint run -v --new-from-rev $(LINT_FROM_REV)
$(GOLANGCI_CACHE_DIR):
@mkdir -p $@
$(GOLANGCI_MOD_CACHE_DIR):
@mkdir -p $@

View File

@ -14,13 +14,23 @@ PROTOBUF_ANY_DOWNLOAD_URL = https://raw.githubusercontent.com/protocolbuffers/pr
#
# Proto dependencies under go.mod
#
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/gogo/protobuf)
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/tendermint/tendermint)
GOGO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/gogoproto)
TENDERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cometbft/cometbft)
COSMOS_PROTO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-proto)
COSMOS_SDK_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/cosmos-sdk)
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v6)
IBC_GO_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/cosmos/ibc-go/v7)
ETHERMINT_PATH := $(shell $(GO_BIN) list -m -f '{{.Dir}}' github.com/evmos/ethermint)
#
# ICS23 Proof Proto
#
ICS23_VERSION := $(shell $(GO_BIN) list -m -f '{{.Version}}' github.com/cosmos/ics23/go)
ICS23_PROOFS_PROTO_PATH := cosmos/ics23/v1/proofs.proto
ICS23_PROOFS_PROTO_LOCAL_PATH := third_party/proto/$(ICS23_PROOFS_PROTO_PATH)
ICS23_PROOFS_PROTO_DOWNLOAD_URL := https://raw.githubusercontent.com/cosmos/ics23/go/$(ICS23_VERSION)/proto/$(ICS23_PROOFS_PROTO_PATH)
#
# Common target directories
#
@ -44,18 +54,21 @@ proto-update-deps: check-rsync ## Update all third party proto files
@curl -sSL $(PROTOBUF_ANY_DOWNLOAD_URL)/any.proto > $(PROTOBUF_GOOGLE_TYPES)/any.proto
@mkdir -p client/docs
@cp $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
@cp $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
@cp -f $(COSMOS_SDK_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/cosmos-swagger.yml
@cp -f $(IBC_GO_PATH)/docs/client/swagger-ui/swagger.yaml client/docs/ibc-go-swagger.yml
@cp -f $(ETHERMINT_PATH)/client/docs/swagger-ui/swagger.yaml client/docs/ethermint-swagger.yml
@mkdir -p $(COSMOS_PROTO_TYPES)
@cp $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
@cp -f $(COSMOS_PROTO_PATH)/proto/cosmos_proto/cosmos.proto $(COSMOS_PROTO_TYPES)/cosmos.proto
@mkdir -p $(dir $(ICS23_PROOFS_PROTO_LOCAL_PATH))
@curl -sSL $(ICS23_PROOFS_PROTO_DOWNLOAD_URL) > $(ICS23_PROOFS_PROTO_LOCAL_PATH)
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(GOGO_PATH)/gogoproto third_party/proto
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(TENDERMINT_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(COSMOS_SDK_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(IBC_GO_PATH)/proto third_party
@$(RSYNC_BIN) -r --chmod=Du=rwx,Dgo=rx,Fu=rw,Fgo=r --include "*.proto" --include='*/' --exclude='*' $(ETHERMINT_PATH)/proto third_party
@cp -f $(IBC_GO_PATH)/third_party/proto/proofs.proto third_party/proto/proofs.proto
.PHONY: check-proto-deps
check-proto-deps: proto-update-deps ## Return error code 1 if proto dependencies are not changed

View File

@ -1,7 +1,7 @@
.PHONY: proto-lint check-proto-lint
proto-lint check-proto-lint: install-build-deps
@echo "Linting proto file"
@$(BUF) lint
@$(BUF) lint proto
.PHONY: proto-gen
proto-gen: install-build-deps

View File

@ -1,6 +1,8 @@
package chaincfg
import sdk "github.com/cosmos/cosmos-sdk/types"
import (
sdk "github.com/cosmos/cosmos-sdk/types"
)
const (
AppName = "0gchaind"

View File

@ -1 +1 @@
a967d2fdda299ec8e1e3b99fb55bd06ecfdb0469
6862cde560c70cb82f7908e6cef22ca223465bd2

View File

@ -22,6 +22,8 @@
},
"app_hash": "",
"app_state": {
"06-solomachine": null,
"07-tendermint": null,
"auction": {
"next_auction_id": "1",
"params": {
@ -505,6 +507,10 @@
{
"address": "kava1vlpsrmdyuywvaqrv7rx6xga224sqfwz3fyfhwq",
"coins": [
{
"denom": "bnb",
"amount": "500000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -525,6 +531,10 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/multichain/usdc",
"amount": "1000000000000000000"
@ -556,12 +566,20 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "1000000000000000"
}
]
},
{
"address": "kava1krh7k30pc9rteejpl2zycj0vau58y8c69xkzws",
"coins": [
{
"denom": "bnb",
"amount": "100000000000000000"
},
{
"denom": "btcb",
"amount": "200000000"
@ -582,6 +600,14 @@
"denom": "erc20/axelar/wbtc",
"amount": "1000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"amount": "200000000"
},
{
"denom": "erc20/tether/usdt",
"amount": "100000000000"
},
{
"denom": "hard",
"amount": "1000000000"
@ -597,6 +623,10 @@
{
"denom": "usdx",
"amount": "103000000000"
},
{
"denom": "xrpb",
"amount": "103000000000"
}
]
},
@ -818,6 +848,7 @@
"gov_denom": "ukava",
"params": {
"circuit_breaker": false,
"liquidation_block_interval": 500,
"collateral_params": [
{
"denom": "bnb",
@ -989,8 +1020,7 @@
"check_collateralization_index_count": "10",
"conversion_factor": "6"
}
]
,
],
"debt_auction_lot": "10000000000",
"debt_auction_threshold": "100000000000",
"debt_param": {
@ -1237,7 +1267,15 @@
"votes": []
},
"community": {
"params": {}
"params": {
"upgrade_time_disable_inflation": "2023-11-01T00:00:00Z",
"upgrade_time_set_staking_rewards_per_second": "744191",
"staking_rewards_per_second": "0"
},
"staking_rewards_state": {
"last_accumulation_time": "0001-01-01T00:00:00Z",
"last_truncation_error": "0"
}
},
"crisis": {
"constant_fee": {
@ -2063,6 +2101,25 @@
}
],
"nested_types": []
},
{
"msg_type_url": "/kava.committee.v1beta1.MsgVote",
"msg_value_type_name": "MsgValueCommitteeVote",
"value_types": [
{
"name": "proposal_id",
"type": "uint64"
},
{
"name": "voter",
"type": "string"
},
{
"name": "vote_type",
"type": "int32"
}
],
"nested_types": []
}
],
"allow_unprotected_txs": false
@ -2225,22 +2282,27 @@
"deposits": [],
"votes": [],
"proposals": [],
"deposit_params": {
"deposit_params": null,
"voting_params": {
"voting_period": "604800s"
},
"tally_params": null,
"params": {
"min_deposit": [
{
"denom": "ukava",
"amount": "10000000"
}
],
"max_deposit_period": "172800s"
},
"voting_params": {
"voting_period": "600s"
},
"tally_params": {
"max_deposit_period": "172800s",
"voting_period": "604800s",
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
"veto_threshold": "0.334000000000000000",
"min_initial_deposit_ratio": "0.000000000000000000",
"burn_vote_quorum": false,
"burn_proposal_deposit_prevote": false,
"burn_vote_veto": true
}
},
"hard": {
@ -2515,6 +2577,24 @@
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
},
{
"denom": "erc20/bitgo/wbtc",
"borrow_limit": {
"has_max_limit": true,
"maximum_limit": "0.000000000000000000",
"loan_to_value": "0.000000000000000000"
},
"spot_market_id": "btc:usd:30",
"conversion_factor": "100000000",
"interest_rate_model": {
"base_rate_apy": "0.000000000000000000",
"base_multiplier": "0.050000000000000000",
"kink": "0.800000000000000000",
"jump_multiplier": "5.000000000000000000"
},
"reserve_factor": "0.025000000000000000",
"keeper_reward_percentage": "0.020000000000000000"
}
],
"minimum_borrow_usd_value": "10.000000000000000000"
@ -2730,6 +2810,18 @@
"amount": "787"
}
]
},
{
"active": true,
"collateral_type": "erc20/bitgo/wbtc",
"start": "2022-11-11T15:00:00Z",
"end": "2025-11-11T15:00:00Z",
"rewards_per_second": [
{
"denom": "ukava",
"amount": "787"
}
]
}
],
"hard_borrow_reward_periods": [],
@ -3166,6 +3258,16 @@
}
},
"params": null,
"packetfowardmiddleware": {
"params": {
"fee_percentage": "0.000000000000000000"
},
"in_flight_packets": {}
},
"precisebank": {
"balances": [],
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [
@ -3639,6 +3741,7 @@
}
]
},
"router": {},
"savings": {
"params": {
"supported_denoms": [
@ -3810,7 +3913,8 @@
"params": {
"send_enabled": true,
"receive_enabled": true
}
},
"total_escrowed": []
},
"upgrade": {},
"validatorvesting": null,

View File

@ -837,6 +837,7 @@
"gov_denom": "ukava",
"params": {
"circuit_breaker": false,
"liquidation_block_interval": 500,
"collateral_params": [
{
"auction_size": "50000000000",
@ -2177,6 +2178,23 @@
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000"
},
"params": {
"min_deposit": [
{
"denom": "ukava",
"amount": "10000000"
}
],
"max_deposit_period": "172800s",
"voting_period": "600s",
"quorum": "0.334000000000000000",
"threshold": "0.500000000000000000",
"veto_threshold": "0.334000000000000000",
"min_initial_deposit_ratio": "0.000000000000000000",
"burn_vote_quorum": false,
"burn_proposal_deposit_prevote": false,
"burn_vote_veto": true
}
},
"hard": {
@ -2982,6 +3000,15 @@
}
},
"params": null,
"packetfowardmiddleware": {
"params": {
"fee_percentage": "0.000000000000000000"
},
"in_flight_packets": {}
},
"precisebank": {
"remainder": "0"
},
"pricefeed": {
"params": {
"markets": [

View File

@ -14,9 +14,9 @@ import (
"strings"
"testing"
"github.com/cometbft/cometbft/crypto/ed25519"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
tmtypes "github.com/tendermint/tendermint/types"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/tests"
@ -813,7 +813,7 @@ func TestKvCLISubmitCommunityPoolSpendProposal(t *testing.T) {
}
func TestKvCLIQueryTxPagination(t *testing.T) {
// Skip until https://github.com/tendermint/tendermint/issues/4432 has been
// Skip until https://github.com/cometbft/cometbft/issues/4432 has been
// resolved and included in a release.
t.SkipNow()

View File

@ -13,13 +13,13 @@ import (
"github.com/stretchr/testify/require"
tmtypes "github.com/tendermint/tendermint/types"
tmtypes "github.com/cometbft/cometbft/types"
"cosmossdk.io/simapp"
clientkeys "github.com/cosmos/cosmos-sdk/client/keys"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/tests"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/x/auth"

View File

@ -182,6 +182,23 @@
]
}
},
{
"url": "./out/swagger/kava/precisebank/v1/query.swagger.json",
"tags": {
"rename": {
"Query": "Precisebank"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Precisebank$1"
}
]
}
},
{
"url": "./out/swagger/kava/pricefeed/v1beta1/query.swagger.json",
"tags": {
@ -295,6 +312,30 @@
]
}
},
{
"url": "./client/docs/ethermint-swagger.yml",
"dereference": {
"circular": "ignore"
},
"tags": {
"rename": {
"Query": "Ethermint"
}
},
"operationIds": {
"rename": [
{
"type": "regex",
"from": "(.*)",
"to": "Ethermint$1"
}
]
},
"paths": {
"exclude": [
]
}
},
{
"url": "./client/docs/legacy-swagger.yml",
"dereference": {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,381 @@
[
{
"inputs": [
{
"internalType": "string",
"name": "name",
"type": "string"
},
{
"internalType": "string",
"name": "symbol",
"type": "string"
},
{
"internalType": "uint8",
"name": "decimals_",
"type": "uint8"
}
],
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Approval",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "previousOwner",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnershipTransferred",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": true,
"internalType": "address",
"name": "from",
"type": "address"
},
{
"indexed": true,
"internalType": "address",
"name": "to",
"type": "address"
},
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "Transfer",
"type": "event"
},
{
"inputs": [
{
"internalType": "address",
"name": "owner",
"type": "address"
},
{
"internalType": "address",
"name": "spender",
"type": "address"
}
],
"name": "allowance",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "approve",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "account",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "burn",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "decimals",
"outputs": [
{
"internalType": "uint8",
"name": "",
"type": "uint8"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "subtractedValue",
"type": "uint256"
}
],
"name": "decreaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "spender",
"type": "address"
},
{
"internalType": "uint256",
"name": "addedValue",
"type": "uint256"
}
],
"name": "increaseAllowance",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "mint",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "name",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "renounceOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "symbol",
"outputs": [
{
"internalType": "string",
"name": "",
"type": "string"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"internalType": "uint256",
"name": "",
"type": "uint256"
}
],
"stateMutability": "view",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "from",
"type": "address"
},
{
"internalType": "address",
"name": "to",
"type": "address"
},
{
"internalType": "uint256",
"name": "amount",
"type": "uint256"
}
],
"name": "transferFrom",
"outputs": [
{
"internalType": "bool",
"name": "",
"type": "bool"
}
],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "transferOwnership",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]

File diff suppressed because one or more lines are too long

1069
client/erc20/main.go Normal file

File diff suppressed because one or more lines are too long

74
client/grpc/README.md Normal file
View File

@ -0,0 +1,74 @@
# Kava gRPC Client
The Kava gRPC client is a tool for making gRPC queries on a Kava chain.
## Features
- Easy-to-use gRPC client for the Kava chain.
- Access all query clients for Cosmos and Kava modules using `client.Query` (e.g., `client.Query.Bank.Balance`).
- Utilize utility functions for common queries (e.g., `client.BaseAccount(str)`).
## Usage
### Creating a new client
```go
package main
import (
kavaGrpc "github.com/0glabs/0g-chain/client/grpc"
)
grpcUrl := "https://grpc.kava.io:443"
client, err := kavaGrpc.NewClient(grpcUrl)
if err != nil {
panic(err)
}
```
### Making grpc queries
Query clients for both Cosmos and Kava modules are available via `client.Query`.
Example: Query Cosmos module `x/bank` for address balance
```go
import (
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
)
rsp, err := client.Query.Bank.Balance(context.Background(), &banktypes.QueryBalanceRequest{
Address: "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92",
Denom: "ukava",
})
```
Example: Query Kava module `x/evmutil` for params
```go
import (
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
)
rsp, err := client.Query.Evmutil.Params(
context.Background(), &evmutiltypes.QueryParamsRequest{},
)
```
#### Query Utilities
Utility functions for common queries are available directly on the client.
Example: Util query to get a base account
```go
kavaAcc := "kava19rjk5qmmwywnzfccwzyn02jywgpwjqf60afj92"
rsp, err := client.BaseAccount(kavaAcc)
if err != nil {
panic(err)
}
fmt.Printf("account sequence for %s: %d\n", kavaAcc, rsp.Sequence)
```
## Query Tests
To test queries, a Kava node is required. Therefore, the e2e tests for the gRPC client queries can be found in the `tests/e2e` directory. Tests for new utility queries should be added as e2e tests under the `test/e2e` directory.

50
client/grpc/client.go Normal file
View File

@ -0,0 +1,50 @@
package grpc
import (
"errors"
"github.com/0glabs/0g-chain/client/grpc/query"
"github.com/0glabs/0g-chain/client/grpc/util"
)
// ZgChainGrpcClient enables the usage of kava grpc query clients and query utils
type ZgChainGrpcClient struct {
config ZgChainGrpcClientConfig
// Query clients for cosmos and kava modules
Query *query.QueryClient
// Utils for common queries (ie fetch an unpacked BaseAccount)
*util.Util
}
// ZgChainGrpcClientConfig is a configuration struct for a ZgChainGrpcClient
type ZgChainGrpcClientConfig struct {
// note: add future config options here
}
// NewClient creates a new ZgChainGrpcClient via a grpc url
func NewClient(grpcUrl string) (*ZgChainGrpcClient, error) {
return NewClientWithConfig(grpcUrl, NewDefaultConfig())
}
// NewClientWithConfig creates a new ZgChainGrpcClient via a grpc url and config
func NewClientWithConfig(grpcUrl string, config ZgChainGrpcClientConfig) (*ZgChainGrpcClient, error) {
if grpcUrl == "" {
return nil, errors.New("grpc url cannot be empty")
}
query, error := query.NewQueryClient(grpcUrl)
if error != nil {
return nil, error
}
client := &ZgChainGrpcClient{
Query: query,
Util: util.NewUtil(query),
config: config,
}
return client, nil
}
func NewDefaultConfig() ZgChainGrpcClientConfig {
return ZgChainGrpcClientConfig{}
}

View File

@ -0,0 +1,15 @@
package grpc_test
import (
"testing"
"github.com/0glabs/0g-chain/client/grpc"
"github.com/stretchr/testify/require"
)
func TestNewClient_InvalidEndpoint(t *testing.T) {
_, err := grpc.NewClient("invalid-url")
require.ErrorContains(t, err, "unknown grpc url scheme")
_, err = grpc.NewClient("")
require.ErrorContains(t, err, "grpc url cannot be empty")
}

View File

@ -0,0 +1,52 @@
package query
import (
"context"
"crypto/tls"
"fmt"
"net/url"
"github.com/0glabs/0g-chain/app"
"github.com/cosmos/cosmos-sdk/codec"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
// newGrpcConnection parses a GRPC endpoint and creates a connection to it
func newGrpcConnection(ctx context.Context, endpoint string) (*grpc.ClientConn, error) {
grpcUrl, err := url.Parse(endpoint)
if err != nil {
return nil, fmt.Errorf("failed to parse grpc connection \"%s\": %v", endpoint, err)
}
var creds credentials.TransportCredentials
switch grpcUrl.Scheme {
case "http":
creds = insecure.NewCredentials()
case "https":
creds = credentials.NewTLS(&tls.Config{})
default:
return nil, fmt.Errorf("unknown grpc url scheme: %s", grpcUrl.Scheme)
}
// Ensure the encoding config is set up correctly with the query client
// otherwise it will produce panics like:
// invalid Go type math.Int for field ...
encodingConfig := app.MakeEncodingConfig()
protoCodec := codec.NewProtoCodec(encodingConfig.InterfaceRegistry)
grpcCodec := protoCodec.GRPCCodec()
secureOpt := grpc.WithTransportCredentials(creds)
grpcConn, err := grpc.DialContext(
ctx,
grpcUrl.Host,
secureOpt,
grpc.WithDefaultCallOptions(grpc.ForceCodec(grpcCodec)),
)
if err != nil {
return nil, err
}
return grpcConn, nil
}

7
client/grpc/query/doc.go Normal file
View File

@ -0,0 +1,7 @@
/*
The query package includes Cosmos and Kava gRPC query clients.
To ensure that the `QueryClient` stays updated, add new module query clients
to the `QueryClient` whenever new modules with grpc queries are added to the Kava app.
*/
package query

108
client/grpc/query/query.go Normal file
View File

@ -0,0 +1,108 @@
package query
import (
"context"
"github.com/cosmos/cosmos-sdk/client/grpc/tmservice"
txtypes "github.com/cosmos/cosmos-sdk/types/tx"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
authz "github.com/cosmos/cosmos-sdk/x/authz"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types"
disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types"
evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types"
govv1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1"
govv1beta1types "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
paramstypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal"
slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types"
ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
feemarkettypes "github.com/evmos/ethermint/x/feemarket/types"
bep3types "github.com/0glabs/0g-chain/x/bep3/types"
committeetypes "github.com/0glabs/0g-chain/x/committee/types"
evmutiltypes "github.com/0glabs/0g-chain/x/evmutil/types"
issuancetypes "github.com/0glabs/0g-chain/x/issuance/types"
precisebanktypes "github.com/0glabs/0g-chain/x/precisebank/types"
pricefeedtypes "github.com/0glabs/0g-chain/x/pricefeed/types"
)
// QueryClient is a wrapper with all Cosmos and Kava grpc query clients
type QueryClient struct {
// cosmos-sdk query clients
Tm tmservice.ServiceClient
Tx txtypes.ServiceClient
Auth authtypes.QueryClient
Authz authz.QueryClient
Bank banktypes.QueryClient
Distribution disttypes.QueryClient
Evidence evidencetypes.QueryClient
Gov govv1types.QueryClient
GovBeta govv1beta1types.QueryClient
Mint minttypes.QueryClient
Params paramstypes.QueryClient
Slashing slashingtypes.QueryClient
Staking stakingtypes.QueryClient
Upgrade upgradetypes.QueryClient
Consensus consensustypes.QueryClient
// 3rd party query clients
Evm evmtypes.QueryClient
Feemarket feemarkettypes.QueryClient
IbcClient ibcclienttypes.QueryClient
IbcTransfer ibctransfertypes.QueryClient
// kava module query clients
Bep3 bep3types.QueryClient
Committee committeetypes.QueryClient
Evmutil evmutiltypes.QueryClient
Issuance issuancetypes.QueryClient
Pricefeed pricefeedtypes.QueryClient
Precisebank precisebanktypes.QueryClient
}
// NewQueryClient creates a new QueryClient and initializes all the module query clients
func NewQueryClient(grpcEndpoint string) (*QueryClient, error) {
conn, err := newGrpcConnection(context.Background(), grpcEndpoint)
if err != nil {
return &QueryClient{}, err
}
client := &QueryClient{
Tm: tmservice.NewServiceClient(conn),
Tx: txtypes.NewServiceClient(conn),
Auth: authtypes.NewQueryClient(conn),
Authz: authz.NewQueryClient(conn),
Bank: banktypes.NewQueryClient(conn),
Distribution: disttypes.NewQueryClient(conn),
Evidence: evidencetypes.NewQueryClient(conn),
Gov: govv1types.NewQueryClient(conn),
GovBeta: govv1beta1types.NewQueryClient(conn),
Mint: minttypes.NewQueryClient(conn),
Params: paramstypes.NewQueryClient(conn),
Slashing: slashingtypes.NewQueryClient(conn),
Staking: stakingtypes.NewQueryClient(conn),
Upgrade: upgradetypes.NewQueryClient(conn),
Consensus: consensustypes.NewQueryClient(conn),
Evm: evmtypes.NewQueryClient(conn),
Feemarket: feemarkettypes.NewQueryClient(conn),
IbcClient: ibcclienttypes.NewQueryClient(conn),
IbcTransfer: ibctransfertypes.NewQueryClient(conn),
Bep3: bep3types.NewQueryClient(conn),
Committee: committeetypes.NewQueryClient(conn),
Evmutil: evmutiltypes.NewQueryClient(conn),
Issuance: issuancetypes.NewQueryClient(conn),
Pricefeed: pricefeedtypes.NewQueryClient(conn),
Precisebank: precisebanktypes.NewQueryClient(conn),
}
return client, nil
}

View File

@ -0,0 +1,64 @@
package query_test
import (
"testing"
"github.com/0glabs/0g-chain/client/grpc/query"
"github.com/stretchr/testify/require"
)
func TestNewQueryClient_InvalidGprc(t *testing.T) {
t.Run("valid connection", func(t *testing.T) {
conn, err := query.NewQueryClient("http://localhost:1234")
require.NoError(t, err)
require.NotNil(t, conn)
})
t.Run("non-empty url", func(t *testing.T) {
_, err := query.NewQueryClient("")
require.ErrorContains(t, err, "unknown grpc url scheme")
})
t.Run("invalid url scheme", func(t *testing.T) {
_, err := query.NewQueryClient("ftp://localhost:1234")
require.ErrorContains(t, err, "unknown grpc url scheme")
})
}
func TestNewQueryClient_ValidClient(t *testing.T) {
t.Run("all clients are created", func(t *testing.T) {
client, err := query.NewQueryClient("http://localhost:1234")
require.NoError(t, err)
require.NotNil(t, client)
// validate cosmos clients
require.NotNil(t, client.Tm)
require.NotNil(t, client.Tx)
require.NotNil(t, client.Auth)
require.NotNil(t, client.Authz)
require.NotNil(t, client.Bank)
require.NotNil(t, client.Distribution)
require.NotNil(t, client.Evidence)
require.NotNil(t, client.Gov)
require.NotNil(t, client.GovBeta)
require.NotNil(t, client.Mint)
require.NotNil(t, client.Params)
require.NotNil(t, client.Slashing)
require.NotNil(t, client.Staking)
require.NotNil(t, client.Upgrade)
require.NotNil(t, client.Consensus)
// validate 3rd party clients
require.NotNil(t, client.Evm)
require.NotNil(t, client.Feemarket)
require.NotNil(t, client.IbcClient)
require.NotNil(t, client.IbcTransfer)
// validate kava clients
require.NotNil(t, client.Bep3)
require.NotNil(t, client.Committee)
require.NotNil(t, client.Evmutil)
require.NotNil(t, client.Issuance)
require.NotNil(t, client.Pricefeed)
})
}

View File

@ -0,0 +1,41 @@
package util
import (
"context"
"fmt"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
)
// Account fetches an account via an address and returns the unpacked account
func (u *Util) Account(addr string) (authtypes.AccountI, error) {
res, err := u.query.Auth.Account(context.Background(), &authtypes.QueryAccountRequest{
Address: addr,
})
if err != nil {
return nil, fmt.Errorf("failed to fetch account: %w", err)
}
var acc authtypes.AccountI
err = u.encodingConfig.Marshaler.UnpackAny(res.Account, &acc)
if err != nil {
return nil, fmt.Errorf("failed to unpack account: %w", err)
}
return acc, nil
}
// BaseAccount fetches a base account via an address or returns an error if
// the account is not a base account
func (u *Util) BaseAccount(addr string) (authtypes.BaseAccount, error) {
acc, err := u.Account(addr)
if err != nil {
return authtypes.BaseAccount{}, err
}
bAcc, ok := acc.(*authtypes.BaseAccount)
if !ok {
return authtypes.BaseAccount{}, fmt.Errorf("%s is not a base account", addr)
}
return *bAcc, nil
}

8
client/grpc/util/doc.go Normal file
View File

@ -0,0 +1,8 @@
/*
The util package contains utility functions for the Kava gRPC client.
For example, `account.go` includes account-related query helpers.
In this file, utilities such as `client.Util.BaseAccount(addr)` is exposed to
query an account and return an unpacked `BaseAccount` instance.
*/
package util

32
client/grpc/util/util.go Normal file
View File

@ -0,0 +1,32 @@
package util
import (
"context"
"strconv"
grpctypes "github.com/cosmos/cosmos-sdk/types/grpc"
"google.golang.org/grpc/metadata"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
query "github.com/0glabs/0g-chain/client/grpc/query"
)
// Util contains utility functions for the Kava gRPC client
type Util struct {
query *query.QueryClient
encodingConfig params.EncodingConfig
}
// NewUtil creates a new Util instance
func NewUtil(query *query.QueryClient) *Util {
return &Util{
query: query,
encodingConfig: app.MakeEncodingConfig(),
}
}
func (u *Util) CtxAtHeight(height int64) context.Context {
heightStr := strconv.FormatInt(height, 10)
return metadata.AppendToOutgoingContext(context.Background(), grpctypes.GRPCBlockHeightHeader, heightStr)
}

View File

@ -13,7 +13,7 @@ import (
"strconv"
"strings"
"github.com/tendermint/tendermint/types"
"github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
@ -132,7 +132,7 @@ func (br BaseReq) ValidateBasic(w http.ResponseWriter) bool {
return true
}
// ReadRESTReq reads and unmarshals a Request's body to the the BaseReq struct.
// ReadRESTReq reads and unmarshals a Request's body to the BaseReq struct.
// Writes an error response to ResponseWriter and returns false if errors occurred.
func ReadRESTReq(w http.ResponseWriter, r *http.Request, cdc *codec.LegacyAmino, req interface{}) bool {
body, err := io.ReadAll(r.Body)

View File

@ -12,6 +12,7 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
simappparams "cosmossdk.io/simapp/params"
"github.com/0glabs/0g-chain/client/rest"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
@ -19,7 +20,6 @@ import (
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
simappparams "github.com/cosmos/cosmos-sdk/simapp/params"
"github.com/cosmos/cosmos-sdk/types"
)

View File

@ -7,6 +7,10 @@ import (
"path/filepath"
"strings"
"github.com/Kava-Labs/opendb"
cometbftdb "github.com/cometbft/cometbft-db"
"github.com/cometbft/cometbft/libs/log"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/server"
@ -19,8 +23,6 @@ import (
ethermintflags "github.com/evmos/ethermint/server/flags"
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
db "github.com/tendermint/tm-db"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
@ -29,6 +31,7 @@ import (
const (
flagMempoolEnableAuth = "mempool.enable-authentication"
flagMempoolAuthAddresses = "mempool.authorized-addresses"
flagSkipLoadLatest = "skip-load-latest"
)
// appCreator holds functions used by the sdk server to control the 0g-chain app.
@ -40,7 +43,7 @@ type appCreator struct {
// newApp loads config from AppOptions and returns a new app.
func (ac appCreator) newApp(
logger log.Logger,
db db.DB,
db cometbftdb.DB,
traceStore io.Writer,
appOpts servertypes.AppOptions,
) servertypes.Application {
@ -61,7 +64,7 @@ func (ac appCreator) newApp(
homeDir := cast.ToString(appOpts.Get(flags.FlagHome))
snapshotDir := filepath.Join(homeDir, "data", "snapshots") // TODO can these directory names be imported from somewhere?
snapshotDB, err := sdk.NewLevelDB("metadata", snapshotDir)
snapshotDB, err := opendb.OpenDB(appOpts, snapshotDir, "metadata", server.GetAppDBBackend(appOpts))
if err != nil {
panic(err)
}
@ -88,10 +91,26 @@ func (ac appCreator) newApp(
cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent)),
)
// Setup chainId
chainID := cast.ToString(appOpts.Get(flags.FlagChainID))
if len(chainID) == 0 {
// fallback to genesis chain-id
appGenesis, err := tmtypes.GenesisDocFromFile(filepath.Join(homeDir, "config", "genesis.json"))
if err != nil {
panic(err)
}
chainID = appGenesis.ChainID
}
skipLoadLatest := false
if appOpts.Get(flagSkipLoadLatest) != nil {
skipLoadLatest = cast.ToBool(appOpts.Get(flagSkipLoadLatest))
}
return app.NewApp(
logger, db, homeDir, traceStore, ac.encodingConfig,
app.Options{
SkipLoadLatest: false,
SkipLoadLatest: skipLoadLatest,
SkipUpgradeHeights: skipUpgradeHeights,
SkipGenesisInvariants: cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)),
InvariantCheckPeriod: cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)),
@ -112,18 +131,20 @@ func (ac appCreator) newApp(
baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))),
baseapp.SetIAVLDisableFastNode(cast.ToBool(iavlDisableFastNode)),
baseapp.SetIAVLLazyLoading(cast.ToBool(appOpts.Get(server.FlagIAVLLazyLoading))),
baseapp.SetChainID(chainID),
)
}
// appExport writes out an app's state to json.
func (ac appCreator) appExport(
logger log.Logger,
db db.DB,
db cometbftdb.DB,
traceStore io.Writer,
height int64,
forZeroHeight bool,
jailAllowedAddrs []string,
appOpts servertypes.AppOptions,
modulesToExport []string,
) (servertypes.ExportedApp, error) {
homePath, ok := appOpts.Get(flags.FlagHome).(string)
if !ok || homePath == "" {
@ -144,7 +165,7 @@ func (ac appCreator) appExport(
} else {
tempApp = app.NewApp(logger, db, homePath, traceStore, ac.encodingConfig, options)
}
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs)
return tempApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs, modulesToExport)
}
// addStartCmdFlags adds flags to the server start command.

View File

@ -4,10 +4,10 @@ import (
"encoding/json"
"fmt"
tmtypes "github.com/cometbft/cometbft/types"
"github.com/cosmos/cosmos-sdk/version"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
"github.com/spf13/cobra"
tmtypes "github.com/tendermint/tendermint/types"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"

View File

@ -0,0 +1,53 @@
package iavlviewer
import (
"crypto/sha256"
"fmt"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newDataCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "data <prefix> [version number]",
Short: "View all keys, hash, & size of tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
printKeys(tree)
hash := tree.Hash()
fmt.Printf("Hash: %X\n", hash)
fmt.Printf("Size: %X\n", tree.Size())
return nil
},
}
return cmd
}
func printKeys(tree *iavl.MutableTree) {
fmt.Println("Printing all keys with hashed values (to detect diff)")
tree.Iterate(func(key []byte, value []byte) bool { //nolint:errcheck
printKey := parseWeaveKey(key)
digest := sha256.Sum256(value)
fmt.Printf(" %s\n %X\n", printKey, digest)
return false
})
}

View File

@ -0,0 +1,38 @@
package iavlviewer
import (
"fmt"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newHashCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "hash <prefix> [version number]",
Short: "Print the root hash of the iavl tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
fmt.Printf("Hash: %X\n", tree.Hash())
return nil
},
}
return cmd
}

View File

@ -0,0 +1,86 @@
package iavlviewer
import (
"fmt"
"os"
"strconv"
"cosmossdk.io/log"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/store/wrapper"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
"github.com/cosmos/iavl"
iavldb "github.com/cosmos/iavl/db"
)
const (
DefaultCacheSize int = 10000
)
func NewCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "iavlviewer <data|hash|shape|versions> <prefix> [version number]",
Short: "Output various data, hashes, and calculations for an iavl tree",
}
cmd.AddCommand(newDataCmd(opts))
cmd.AddCommand(newHashCmd(opts))
cmd.AddCommand(newShapeCmd(opts))
cmd.AddCommand(newVersionsCmd(opts))
return cmd
}
func parseVersion(arg string) (int, error) {
version, err := strconv.Atoi(arg)
if err != nil {
return 0, fmt.Errorf("invalid version number: '%s'", arg)
}
return version, nil
}
func openPrefixTree(opts ethermintserver.StartOptions, cmd *cobra.Command, prefix string, version int) (*iavl.MutableTree, error) {
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
ctx.Config.SetRoot(clientCtx.HomeDir)
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
if err != nil {
return nil, fmt.Errorf("failed to open database at %s: %s", clientCtx.HomeDir, err)
}
defer func() {
if err := db.Close(); err != nil {
ctx.Logger.Error("error closing db", "error", err.Error())
}
}()
cosmosdb := wrapper.NewCosmosDB(db)
tree, err := readTree(cosmosdb, version, []byte(prefix))
if err != nil {
return nil, fmt.Errorf("failed to read tree with prefix %s: %s", prefix, err)
}
return tree, nil
}
// ReadTree loads an iavl tree from the directory
// If version is 0, load latest, otherwise, load named version
// The prefix represents which iavl tree you want to read. The iaviwer will always set a prefix.
func readTree(db dbm.DB, version int, prefix []byte) (*iavl.MutableTree, error) {
if len(prefix) != 0 {
db = dbm.NewPrefixDB(db, prefix)
}
tree := iavl.NewMutableTree(iavldb.NewWrapper(db), DefaultCacheSize, false, log.NewLogger(os.Stdout))
ver, err := tree.LoadVersion(int64(version))
if err != nil {
return nil, err
}
fmt.Printf("Latest version: %d\n", ver)
fmt.Printf("Got version: %d\n", version)
return tree, err
}

View File

@ -0,0 +1,47 @@
package iavlviewer
import (
"fmt"
"strings"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newShapeCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "shape <prefix> [version number]",
Short: "View shape of iavl tree.",
Args: cobra.RangeArgs(1, 2),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
version := 0
if len(args) == 2 {
var err error
version, err = parseVersion(args[1])
if err != nil {
return err
}
}
tree, err := openPrefixTree(opts, cmd, prefix, version)
if err != nil {
return err
}
printShape(tree)
return nil
},
}
return cmd
}
func printShape(tree *iavl.MutableTree) {
// shape := tree.RenderShape(" ", nil)
// TODO: handle this error
shape, _ := tree.RenderShape(" ", nodeEncoder)
fmt.Println(strings.Join(shape, "\n"))
}

View File

@ -0,0 +1,74 @@
package iavlviewer
import (
"bytes"
"encoding/hex"
"fmt"
"strings"
"github.com/cosmos/iavl"
ethermintserver "github.com/evmos/ethermint/server"
"github.com/spf13/cobra"
)
func newVersionsCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "versions <prefix>",
Short: "Print all versions of iavl tree",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
prefix := args[0]
tree, err := openPrefixTree(opts, cmd, prefix, 15)
if err != nil {
return err
}
printVersions(tree)
return nil
},
}
return cmd
}
func printVersions(tree *iavl.MutableTree) {
versions := tree.AvailableVersions()
fmt.Println("Available versions:")
for _, v := range versions {
fmt.Printf(" %d\n", v)
}
}
// parseWeaveKey assumes a separating : where all in front should be ascii,
// and all afterwards may be ascii or binary
func parseWeaveKey(key []byte) string {
cut := bytes.IndexRune(key, ':')
if cut == -1 {
return encodeID(key)
}
prefix := key[:cut]
id := key[cut+1:]
return fmt.Sprintf("%s:%s", encodeID(prefix), encodeID(id))
}
// casts to a string if it is printable ascii, hex-encodes otherwise
func encodeID(id []byte) string {
for _, b := range id {
if b < 0x20 || b >= 0x80 {
return strings.ToUpper(hex.EncodeToString(id))
}
}
return string(id)
}
func nodeEncoder(id []byte, depth int, isLeaf bool) string {
prefix := fmt.Sprintf("-%d ", depth)
if isLeaf {
prefix = fmt.Sprintf("*%d ", depth)
}
if len(id) == 0 {
return fmt.Sprintf("%s<nil>", prefix)
}
return fmt.Sprintf("%s%s", prefix, parseWeaveKey(id))
}

View File

@ -1,14 +1,14 @@
package client
package main
import (
"bufio"
"github.com/cometbft/cometbft/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/keys"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/cli"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
ethclient "github.com/evmos/ethermint/client"
@ -18,9 +18,9 @@ import (
var ethFlag = "eth"
// KeyCommands registers a sub-tree of commands to interact with
// keyCommands registers a sub-tree of commands to interact with
// local private key storage.
func KeyCommands(defaultNodeHome string) *cobra.Command {
func keyCommands(defaultNodeHome string) *cobra.Command {
cmd := &cobra.Command{
Use: "keys",
Short: "Manage your application's keys",
@ -52,7 +52,7 @@ The pass backend requires GnuPG: https://gnupg.org/
addCmd := keys.AddKeyCommand()
addCmd.Flags().Bool(ethFlag, false, "use default evm coin-type (60) and key signing algorithm (\"eth_secp256k1\")")
algoFlag := addCmd.Flag(flags.FlagKeyAlgorithm)
algoFlag := addCmd.Flag(flags.FlagKeyType)
algoFlag.DefValue = string(hd.EthSecp256k1Type)
err := algoFlag.Value.Set(string(hd.EthSecp256k1Type))
if err != nil {
@ -107,7 +107,7 @@ func runAddCmd(cmd *cobra.Command, args []string) error {
eth, _ := cmd.Flags().GetBool(ethFlag)
if eth {
cmd.Print("eth flag specified: using coin-type 60 and signing algorithm eth_secp256k1\n")
cmd.Flags().Set(flags.FlagKeyAlgorithm, string(hd.EthSecp256k1Type))
cmd.Flags().Set(flags.FlagKeyType, string(hd.EthSecp256k1Type))
cmd.Flags().Set("coin-type", "60")
}

View File

@ -11,9 +11,7 @@ import (
func main() {
chaincfg.SetSDKConfig().Seal()
rootCmd := NewRootCmd()
if err := svrcmd.Execute(rootCmd, chaincfg.EnvPrefix, chaincfg.DefaultNodeHome); err != nil {
switch e := err.(type) {
case server.ErrorCode:

View File

@ -0,0 +1,216 @@
//go:build rocksdb
// +build rocksdb
package rocksdb
import (
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/cometbft/cometbft/libs/log"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/server"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
"github.com/Kava-Labs/opendb"
)
const (
flagPrintStatsInterval = "print-stats-interval"
)
var allowedDBs = []string{"application", "blockstore", "state"}
func CompactRocksDBCmd() *cobra.Command {
cmd := &cobra.Command{
Use: fmt.Sprintf(
"compact <%s>",
strings.Join(allowedDBs, "|"),
),
Short: "force compacts RocksDB",
Long: `This is a utility command that performs a force compaction on the state or
blockstore. This should only be run once the node has stopped.`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
statsIntervalStr, err := cmd.Flags().GetString(flagPrintStatsInterval)
if err != nil {
return err
}
statsInterval, err := time.ParseDuration(statsIntervalStr)
if err != nil {
return fmt.Errorf("failed to parse duration for --%s: %w", flagPrintStatsInterval, err)
}
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
if server.GetAppDBBackend(ctx.Viper) != "rocksdb" {
return errors.New("compaction is currently only supported with rocksdb")
}
if !slices.Contains(allowedDBs, args[0]) {
return fmt.Errorf(
"invalid db name, must be one of the following: %s",
strings.Join(allowedDBs, ", "),
)
}
return compactRocksDBs(clientCtx.HomeDir, logger, args[0], statsInterval)
},
}
cmd.Flags().String(flagPrintStatsInterval, "1m", "duration string for how often to print compaction stats")
return cmd
}
// compactRocksDBs performs a manual compaction on the given db.
func compactRocksDBs(
rootDir string,
logger log.Logger,
dbName string,
statsInterval time.Duration,
) error {
dbPath := filepath.Join(rootDir, "data", dbName+".db")
dbOpts, cfOpts, err := opendb.LoadLatestOptions(dbPath)
if err != nil {
return err
}
logger.Info("opening db", "path", dbPath)
db, _, err := grocksdb.OpenDbColumnFamilies(
dbOpts,
dbPath,
[]string{opendb.DefaultColumnFamilyName},
[]*grocksdb.Options{cfOpts},
)
if err != nil {
return err
}
if err != nil {
logger.Error("failed to initialize cometbft db", "path", dbPath, "err", err)
return fmt.Errorf("failed to open db %s %w", dbPath, err)
}
defer db.Close()
logColumnFamilyMetadata(db, logger)
logger.Info("starting compaction...", "db", dbPath)
done := make(chan bool)
registerSignalHandler(db, logger, done)
startCompactionStatsOutput(db, logger, done, statsInterval)
// Actually run the compaction
db.CompactRange(grocksdb.Range{Start: nil, Limit: nil})
logger.Info("done compaction", "db", dbPath)
done <- true
return nil
}
// bytesToMB converts bytes to megabytes.
func bytesToMB(bytes uint64) float64 {
return float64(bytes) / 1024 / 1024
}
// logColumnFamilyMetadata outputs the column family and level metadata.
func logColumnFamilyMetadata(
db *grocksdb.DB,
logger log.Logger,
) {
metadata := db.GetColumnFamilyMetadata()
logger.Info(
"column family metadata",
"name", metadata.Name(),
"sizeMB", bytesToMB(metadata.Size()),
"fileCount", metadata.FileCount(),
"levels", len(metadata.LevelMetas()),
)
for _, level := range metadata.LevelMetas() {
logger.Info(
fmt.Sprintf("level %d metadata", level.Level()),
"sstMetas", strconv.Itoa(len(level.SstMetas())),
"sizeMB", strconv.FormatFloat(bytesToMB(level.Size()), 'f', 2, 64),
)
}
}
// startCompactionStatsOutput starts a goroutine that outputs compaction stats
// every minute.
func startCompactionStatsOutput(
db *grocksdb.DB,
logger log.Logger,
done chan bool,
statsInterval time.Duration,
) {
go func() {
ticker := time.NewTicker(statsInterval)
isClosed := false
for {
select {
// Make sure we don't try reading from the closed db.
// We continue the loop so that we can make sure the done channel
// does not stall indefinitely from repeated writes and no reader.
case <-done:
logger.Debug("stopping compaction stats output")
isClosed = true
case <-ticker.C:
if !isClosed {
compactionStats := db.GetProperty("rocksdb.stats")
fmt.Printf("%s\n", compactionStats)
}
}
}
}()
}
// registerSignalHandler registers a signal handler that will cancel any running
// compaction when the user presses Ctrl+C.
func registerSignalHandler(
db *grocksdb.DB,
logger log.Logger,
done chan bool,
) {
// https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ
// Q: Can I close the DB when a manual compaction is in progress?
//
// A: No, it's not safe to do that. However, you call
// CancelAllBackgroundWork(db, true) in another thread to abort the
// running compactions, so that you can close the DB sooner. Since
// 6.5, you can also speed it up using
// DB::DisableManualCompaction().
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
for sig := range c {
logger.Info(fmt.Sprintf(
"received %s signal, aborting running compaction... Do NOT kill me before compaction is cancelled. I will exit when compaction is cancelled.",
sig,
))
db.DisableManualCompaction()
logger.Info("manual compaction disabled")
// Stop the logging
done <- true
}
}()
}

View File

@ -0,0 +1,19 @@
//go:build rocksdb
// +build rocksdb
package rocksdb
import (
"github.com/spf13/cobra"
)
// RocksDBCmd defines the root command containing subcommands that assist in
// rocksdb related tasks such as manual compaction.
var RocksDBCmd = &cobra.Command{
Use: "rocksdb",
Short: "RocksDB util commands",
}
func init() {
RocksDBCmd.AddCommand(CompactRocksDBCmd())
}

View File

@ -0,0 +1,14 @@
//go:build !rocksdb
// +build !rocksdb
package rocksdb
import (
"github.com/spf13/cobra"
)
// RocksDBCmd defines the root command when the rocksdb build tag is not set.
var RocksDBCmd = &cobra.Command{
Use: "rocksdb",
Short: "RocksDB util commands, disabled because rocksdb build tag not set",
}

View File

@ -1,32 +1,38 @@
package main
import (
"fmt"
"os"
"path/filepath"
dbm "github.com/cometbft/cometbft-db"
tmcfg "github.com/cometbft/cometbft/config"
tmcli "github.com/cometbft/cometbft/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/config"
"github.com/cosmos/cosmos-sdk/client/debug"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/cosmos/cosmos-sdk/server"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/x/genutil"
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types"
ethermintclient "github.com/evmos/ethermint/client"
"github.com/evmos/ethermint/crypto/hd"
ethermintserver "github.com/evmos/ethermint/server"
servercfg "github.com/evmos/ethermint/server/config"
"github.com/spf13/cobra"
tmcfg "github.com/tendermint/tendermint/config"
tmcli "github.com/tendermint/tendermint/libs/cli"
"github.com/0glabs/0g-chain/app"
"github.com/0glabs/0g-chain/app/params"
"github.com/0glabs/0g-chain/chaincfg"
kavaclient "github.com/0glabs/0g-chain/client"
"github.com/0glabs/0g-chain/cmd/opendb"
"github.com/0glabs/0g-chain/cmd/0gchaind/iavlviewer"
"github.com/0glabs/0g-chain/cmd/0gchaind/rocksdb"
"github.com/0glabs/0g-chain/crypto/vrf"
"github.com/Kava-Labs/opendb"
)
func customKeyringOptions() keyring.Option {
@ -46,11 +52,10 @@ func NewRootCmd() *cobra.Command {
WithLegacyAmino(encodingConfig.Amino).
WithInput(os.Stdin).
WithAccountRetriever(types.AccountRetriever{}).
WithBroadcastMode(flags.BroadcastBlock).
WithBroadcastMode(flags.FlagBroadcastMode).
WithHomeDir(chaincfg.DefaultNodeHome).
WithKeyringOptions(customKeyringOptions()).
WithKeyringOptions(hd.EthSecp256k1Option()).
WithViper(chaincfg.EnvPrefix)
rootCmd := &cobra.Command{
Use: chaincfg.AppName,
Short: "Daemon and CLI for the 0g-chain blockchain.",
@ -84,18 +89,29 @@ func NewRootCmd() *cobra.Command {
}
addSubCmds(rootCmd, encodingConfig, chaincfg.DefaultNodeHome)
return rootCmd
}
// addSubCmds registers all the sub commands used by 0g-chain.
// dbOpener is a function to open `application.db`, potentially with customized options.
// dbOpener sets dataDir to "data", dbName to "application" and calls generic OpenDB function.
func dbOpener(opts servertypes.AppOptions, rootDir string, backend dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(rootDir, "data")
return opendb.OpenDB(opts, dataDir, "application", backend)
}
// addSubCmds registers all the sub commands used by kava.
func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, defaultNodeHome string) {
gentxModule, ok := app.ModuleBasics[genutiltypes.ModuleName].(genutil.AppModuleBasic)
if !ok {
panic(fmt.Errorf("expected %s module to be an instance of type %T", genutiltypes.ModuleName, genutil.AppModuleBasic{}))
}
rootCmd.AddCommand(
StatusCommand(),
ethermintclient.ValidateChainID(
genutilcli.InitCmd(app.ModuleBasics, defaultNodeHome),
),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome),
genutilcli.CollectGenTxsCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome, gentxModule.GenTxValidator),
AssertInvariantsCmd(encodingConfig),
genutilcli.GenTxCmd(app.ModuleBasics, encodingConfig.TxConfig, banktypes.GenesisBalancesIterator{}, defaultNodeHome),
genutilcli.ValidateGenesisCmd(app.ModuleBasics),
@ -113,7 +129,7 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
opts := ethermintserver.StartOptions{
AppCreator: ac.newApp,
DefaultNodeHome: chaincfg.DefaultNodeHome,
DBOpener: opendb.OpenDB,
DBOpener: dbOpener,
}
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
ethermintserver.AddCommands(
@ -123,10 +139,13 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
ac.addStartCmdFlags,
)
// add keybase, gas RPC, query, and tx child commands
// add keybase, auxiliary RPC, query, and tx child commands
rootCmd.AddCommand(
newQueryCmd(),
newTxCmd(),
kavaclient.KeyCommands(chaincfg.DefaultNodeHome),
keyCommands(chaincfg.DefaultNodeHome),
rocksdb.RocksDBCmd,
newShardCmd(opts),
iavlviewer.NewCmd(opts),
)
}

322
cmd/0gchaind/shard.go Normal file
View File

@ -0,0 +1,322 @@
package main
import (
"fmt"
"strings"
"github.com/0glabs/0g-chain/app"
"github.com/spf13/cobra"
dbm "github.com/cometbft/cometbft-db"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/server"
pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
tmconfig "github.com/cometbft/cometbft/config"
"github.com/cometbft/cometbft/node"
tmstate "github.com/cometbft/cometbft/state"
"github.com/cometbft/cometbft/store"
ethermintserver "github.com/evmos/ethermint/server"
)
const (
flagShardStartBlock = "start"
flagShardEndBlock = "end"
flagShardOnlyAppState = "only-app-state"
flagShardForceAppVersion = "force-app-version"
flagShardOnlyCometbftState = "only-cometbft-state"
// TODO: --preserve flag for creating & operating on a copy?
// allow using -1 to mean "latest" (perform no rollbacks)
shardEndBlockLatest = -1
)
func newShardCmd(opts ethermintserver.StartOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "shard --home <path-to-home-dir> --start <start-block> --end <end-block> [--only-app-state] [--only-cometbft-state] [--force-app-version <app-version>]",
Short: "Strip all blocks from the database outside of a given range",
Long: `shard opens a local kava home directory's databases and removes all blocks outside a range defined by --start and --end. The range is inclusive of the end block.
It works by first rolling back the latest state to the block before the end block, and then by pruning all state before the start block.
Setting the end block to -1 signals to keep the latest block (no rollbacks).
The application.db can be loaded at a particular height via the --force-app-version option. This is useful if the sharding process is prematurely terminated while the application.db is being sharded.
The --only-app-state flag can be used to skip the pruning of the blockstore and cometbft state. This matches the functionality of the cosmos-sdk's "prune" command. Note that rolled back blocks will still affect all stores.
Similarly, the --only-cometbft-state flag skips pruning app state. This can be useful if the shard command is prematurely terminated during the shard process.
The shard command only flags the iavl tree nodes for deletion. Actual removal from the databases will be performed when each database is compacted.
WARNING: this is a destructive action.`,
Example: `Create a 1M block data shard (keeps blocks kava 1,000,000 to 2,000,000)
$ kava shard --home path/to/.kava --start 1000000 --end 2000000
Prune all blocks up to 5,000,000:
$ kava shard --home path/to/.kava --start 5000000 --end -1
Prune first 1M blocks _without_ affecting blockstore or cometBFT state:
$ kava shard --home path/to/.kava --start 1000000 --end -1 --only-app-state`,
RunE: func(cmd *cobra.Command, args []string) error {
//////////////////////////
// parse & validate flags
//////////////////////////
startBlock, err := cmd.Flags().GetInt64(flagShardStartBlock)
if err != nil {
return err
}
endBlock, err := cmd.Flags().GetInt64(flagShardEndBlock)
if err != nil {
return err
}
if (endBlock == 0 || endBlock < startBlock) && endBlock != shardEndBlockLatest {
return fmt.Errorf("end block (%d) must be greater than start block (%d)", endBlock, startBlock)
}
onlyAppState, err := cmd.Flags().GetBool(flagShardOnlyAppState)
if err != nil {
return err
}
forceAppVersion, err := cmd.Flags().GetInt64(flagShardForceAppVersion)
if err != nil {
return err
}
onlyCometbftState, err := cmd.Flags().GetBool(flagShardOnlyCometbftState)
if err != nil {
return err
}
clientCtx := client.GetClientContextFromCmd(cmd)
ctx := server.GetServerContextFromCmd(cmd)
ctx.Config.SetRoot(clientCtx.HomeDir)
////////////////////////
// manage db connection
////////////////////////
// connect to database
db, err := opts.DBOpener(ctx.Viper, clientCtx.HomeDir, server.GetAppDBBackend(ctx.Viper))
if err != nil {
return err
}
// close db connection when done
defer func() {
if err := db.Close(); err != nil {
ctx.Logger.Error("error closing db", "error", err.Error())
}
}()
///////////////////
// load multistore
///////////////////
// create app in order to load the multistore
// skip loading the latest version so the desired height can be manually loaded
ctx.Viper.Set("skip-load-latest", true)
app := opts.AppCreator(ctx.Logger, db, nil, ctx.Viper).(*app.App)
if forceAppVersion == shardEndBlockLatest {
if err := app.LoadLatestVersion(); err != nil {
return err
}
} else {
if err := app.LoadVersion(forceAppVersion); err != nil {
return err
}
}
// get the multistore
cms := app.CommitMultiStore()
multistore, ok := cms.(*rootmulti.Store)
if !ok {
return fmt.Errorf("only sharding of rootmulti.Store type is supported")
}
////////////////////////
// shard application.db
////////////////////////
if !onlyCometbftState {
if err := shardApplicationDb(multistore, startBlock, endBlock); err != nil {
return err
}
} else {
fmt.Printf("[%s] skipping sharding of application.db\n", flagShardOnlyCometbftState)
}
//////////////////////////////////
// shard blockstore.db & state.db
//////////////////////////////////
// open block store & cometbft state
blockStore, stateStore, err := openCometBftDbs(ctx.Config)
if err != nil {
return fmt.Errorf("failed to open cometbft dbs: %s", err)
}
if !onlyAppState {
if err := shardCometBftDbs(blockStore, stateStore, startBlock, endBlock); err != nil {
return err
}
} else {
fmt.Printf("[%s] skipping sharding of blockstore.db and state.db\n", flagShardOnlyAppState)
fmt.Printf("blockstore contains blocks %d - %d\n", blockStore.Base(), blockStore.Height())
}
return nil
},
}
cmd.Flags().String(flags.FlagHome, opts.DefaultNodeHome, "The application home directory")
cmd.Flags().Int64(flagShardStartBlock, 1, "Start block of data shard (inclusive)")
cmd.Flags().Int64(flagShardEndBlock, 0, "End block of data shard (inclusive)")
cmd.Flags().Bool(flagShardOnlyAppState, false, "Skip pruning of blockstore & cometbft state")
cmd.Flags().Bool(flagShardOnlyCometbftState, false, "Skip pruning of application state")
cmd.Flags().Int64(flagShardForceAppVersion, shardEndBlockLatest, "Instead of loading latest, force set the version of the multistore that is loaded")
return cmd
}
// shardApplicationDb prunes the multistore up to startBlock and rolls it back to endBlock
func shardApplicationDb(multistore *rootmulti.Store, startBlock, endBlock int64) error {
//////////////////////////////
// Rollback state to endBlock
//////////////////////////////
// handle desired endblock being latest
latest := multistore.LastCommitID().Version
if latest == 0 {
return fmt.Errorf("failed to find latest height >0")
}
fmt.Printf("latest height: %d\n", latest)
if endBlock == shardEndBlockLatest {
endBlock = latest
}
shardSize := endBlock - startBlock + 1
// error if requesting block range the database does not have
if endBlock > latest {
return fmt.Errorf("data does not contain end block (%d): latest version is %d", endBlock, latest)
}
fmt.Printf("pruning data down to heights %d - %d (%d blocks)\n", startBlock, endBlock, shardSize)
// set pruning options to prevent no-ops from `PruneStores`
multistore.SetPruning(pruningtypes.PruningOptions{KeepRecent: uint64(shardSize), Interval: 0})
// rollback application state
if err := multistore.RollbackToVersion(endBlock); err != nil {
return fmt.Errorf("failed to rollback application state: %s", err)
}
//////////////////////////////
// Prune blocks to startBlock
//////////////////////////////
// enumerate all heights to prune
pruneHeights := make([]int64, 0, latest-shardSize)
for i := int64(1); i < startBlock; i++ {
pruneHeights = append(pruneHeights, i)
}
if len(pruneHeights) > 0 {
// prune application state
fmt.Printf("pruning application state to height %d\n", startBlock)
for _, pruneHeight := range pruneHeights {
if err := multistore.PruneStores(pruneHeight); err != nil {
return fmt.Errorf("failed to prune application state: %s", err)
}
}
}
return nil
}
// shardCometBftDbs shrinks blockstore.db & state.db down to the desired block range
func shardCometBftDbs(blockStore *store.BlockStore, stateStore tmstate.Store, startBlock, endBlock int64) error {
var err error
latest := blockStore.Height()
if endBlock == shardEndBlockLatest {
endBlock = latest
}
//////////////////////////////
// Rollback state to endBlock
//////////////////////////////
// prep for outputting progress repeatedly to same line
needsRollback := endBlock < latest
progress := "rolling back blockstore & cometbft state to height %d"
numChars := len(fmt.Sprintf(progress, latest))
clearLine := fmt.Sprintf("\r%s\r", strings.Repeat(" ", numChars))
printRollbackProgress := func(h int64) {
fmt.Print(clearLine)
fmt.Printf(progress, h)
}
// rollback tendermint db
height := latest
for height > endBlock {
beforeRollbackHeight := height
printRollbackProgress(height - 1)
height, _, err = tmstate.Rollback(blockStore, stateStore, true)
if err != nil {
return fmt.Errorf("failed to rollback cometbft state: %w", err)
}
if beforeRollbackHeight == height {
return fmt.Errorf("attempting to rollback cometbft state height %d failed (no rollback performed)", height)
}
}
if needsRollback {
fmt.Println()
} else {
fmt.Printf("latest store height is already %d\n", latest)
}
//////////////////////////////
// Prune blocks to startBlock
//////////////////////////////
// get starting block of block store
baseBlock := blockStore.Base()
// only prune if data exists, otherwise blockStore.PruneBlocks will panic
if baseBlock < startBlock {
// prune block store
fmt.Printf("pruning block store from %d - %d\n", baseBlock, startBlock)
if _, err := blockStore.PruneBlocks(startBlock); err != nil {
return fmt.Errorf("failed to prune block store (retainHeight=%d): %s", startBlock, err)
}
// prune cometbft state
fmt.Printf("pruning cometbft state from %d - %d\n", baseBlock, startBlock)
if err := stateStore.PruneStates(baseBlock, startBlock); err != nil {
return fmt.Errorf("failed to prune cometbft state store (%d - %d): %s", baseBlock, startBlock, err)
}
} else {
fmt.Printf("blockstore and cometbft state begins at block %d\n", baseBlock)
}
return nil
}
// inspired by https://github.com/Kava-Labs/cometbft/blob/277b0853db3f67865a55aa1c54f59790b5f591be/node/node.go#L234
func openCometBftDbs(config *tmconfig.Config) (blockStore *store.BlockStore, stateStore tmstate.Store, err error) {
dbProvider := node.DefaultDBProvider
var blockStoreDB dbm.DB
blockStoreDB, err = dbProvider(&node.DBContext{ID: "blockstore", Config: config})
if err != nil {
return
}
blockStore = store.NewBlockStore(blockStoreDB)
stateDB, err := dbProvider(&node.DBContext{ID: "state", Config: config})
if err != nil {
return
}
stateStore = tmstate.NewStore(stateDB, tmstate.StoreOptions{
DiscardABCIResponses: config.Storage.DiscardABCIResponses,
})
return
}

View File

@ -5,9 +5,9 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/p2p"
coretypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/cometbft/cometbft/libs/bytes"
"github.com/cometbft/cometbft/p2p"
coretypes "github.com/cometbft/cometbft/rpc/core/types"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"

View File

@ -1,499 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
var rocksdbMetrics *Metrics
// Metrics contains all rocksdb metrics which will be reported to prometheus
type Metrics struct {
// Keys
NumberKeysWritten metrics.Gauge
NumberKeysRead metrics.Gauge
NumberKeysUpdated metrics.Gauge
EstimateNumKeys metrics.Gauge
// Files
NumberFileOpens metrics.Gauge
NumberFileErrors metrics.Gauge
// Memory
BlockCacheUsage metrics.Gauge
EstimateTableReadersMem metrics.Gauge
CurSizeAllMemTables metrics.Gauge
BlockCachePinnedUsage metrics.Gauge
// Cache
BlockCacheMiss metrics.Gauge
BlockCacheHit metrics.Gauge
BlockCacheAdd metrics.Gauge
BlockCacheAddFailures metrics.Gauge
// Detailed Cache
BlockCacheIndexMiss metrics.Gauge
BlockCacheIndexHit metrics.Gauge
BlockCacheIndexBytesInsert metrics.Gauge
BlockCacheFilterMiss metrics.Gauge
BlockCacheFilterHit metrics.Gauge
BlockCacheFilterBytesInsert metrics.Gauge
BlockCacheDataMiss metrics.Gauge
BlockCacheDataHit metrics.Gauge
BlockCacheDataBytesInsert metrics.Gauge
// Latency
DBGetMicrosP50 metrics.Gauge
DBGetMicrosP95 metrics.Gauge
DBGetMicrosP99 metrics.Gauge
DBGetMicrosP100 metrics.Gauge
DBGetMicrosCount metrics.Gauge
DBWriteMicrosP50 metrics.Gauge
DBWriteMicrosP95 metrics.Gauge
DBWriteMicrosP99 metrics.Gauge
DBWriteMicrosP100 metrics.Gauge
DBWriteMicrosCount metrics.Gauge
// Write Stall
StallMicros metrics.Gauge
DBWriteStallP50 metrics.Gauge
DBWriteStallP95 metrics.Gauge
DBWriteStallP99 metrics.Gauge
DBWriteStallP100 metrics.Gauge
DBWriteStallCount metrics.Gauge
DBWriteStallSum metrics.Gauge
// Bloom Filter
BloomFilterUseful metrics.Gauge
BloomFilterFullPositive metrics.Gauge
BloomFilterFullTruePositive metrics.Gauge
// LSM Tree Stats
LastLevelReadBytes metrics.Gauge
LastLevelReadCount metrics.Gauge
NonLastLevelReadBytes metrics.Gauge
NonLastLevelReadCount metrics.Gauge
GetHitL0 metrics.Gauge
GetHitL1 metrics.Gauge
GetHitL2AndUp metrics.Gauge
}
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
func registerMetrics() {
if rocksdbMetrics != nil {
// metrics already registered
return
}
labels := make([]string, 0)
rocksdbMetrics = &Metrics{
// Keys
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_written",
Help: "",
}, labels),
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_read",
Help: "",
}, labels),
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_updated",
Help: "",
}, labels),
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "estimate_num_keys",
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
}, labels),
// Files
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_opens",
Help: "",
}, labels),
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_errors",
Help: "",
}, labels),
// Memory
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_usage",
Help: "memory size for the entries residing in block cache",
}, labels),
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "estimate_table_readers_mem",
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
}, labels),
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "cur_size_all_mem_tables",
Help: "approximate size of active and unflushed immutable memtables (bytes)",
}, labels),
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_pinned_usage",
Help: "returns the memory size for the entries being pinned",
}, labels),
// Cache
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_miss",
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
}, labels),
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_hit",
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
}, labels),
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add",
Help: "number of blocks added to block cache",
}, labels),
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add_failures",
Help: "number of failures when adding blocks to block cache",
}, labels),
// Detailed Cache
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_miss",
Help: "",
}, labels),
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_hit",
Help: "",
}, labels),
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_bytes_insert",
Help: "",
}, labels),
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_miss",
Help: "",
}, labels),
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_hit",
Help: "",
}, labels),
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_bytes_insert",
Help: "",
}, labels),
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_miss",
Help: "",
}, labels),
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_hit",
Help: "",
}, labels),
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_bytes_insert",
Help: "",
}, labels),
// Latency
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p50",
Help: "",
}, labels),
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p95",
Help: "",
}, labels),
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p99",
Help: "",
}, labels),
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p100",
Help: "",
}, labels),
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_count",
Help: "",
}, labels),
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p50",
Help: "",
}, labels),
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p95",
Help: "",
}, labels),
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p99",
Help: "",
}, labels),
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p100",
Help: "",
}, labels),
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_count",
Help: "",
}, labels),
// Write Stall
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "stall_micros",
Help: "Writer has to wait for compaction or flush to finish.",
}, labels),
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p50",
Help: "",
}, labels),
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p95",
Help: "",
}, labels),
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p99",
Help: "",
}, labels),
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p100",
Help: "",
}, labels),
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_count",
Help: "",
}, labels),
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_sum",
Help: "",
}, labels),
// Bloom Filter
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_useful",
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
}, labels),
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_positive",
Help: "number of times bloom FullFilter has not avoided the reads.",
}, labels),
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_true_positive",
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
}, labels),
// LSM Tree Stats
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_bytes",
Help: "",
}, labels),
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_count",
Help: "",
}, labels),
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_bytes",
Help: "",
}, labels),
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_count",
Help: "",
}, labels),
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l0",
Help: "number of Get() queries served by L0",
}, labels),
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l1",
Help: "number of Get() queries served by L1",
}, labels),
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l2_and_up",
Help: "number of Get() queries served by L2 and up",
}, labels),
}
}
// report reports metrics to prometheus based on rocksdb props and stats
func (m *Metrics) report(props *properties, stats *stats) {
// Keys
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
// Files
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
// Memory
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
// Cache
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
// Detailed Cache
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
// Latency
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
// Write Stall
m.StallMicros.Set(float64(stats.StallMicros))
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
// Bloom Filter
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
// LSM Tree Stats
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
m.GetHitL0.Set(float64(stats.GetHitL0))
m.GetHitL1.Set(float64(stats.GetHitL1))
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
}

View File

@ -1,18 +0,0 @@
//go:build !rocksdb
// +build !rocksdb
package opendb
import (
"path/filepath"
"github.com/cosmos/cosmos-sdk/server/types"
dbm "github.com/tendermint/tm-db"
)
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
return dbm.NewDB("application", backendType, dataDir)
}

View File

@ -1,398 +0,0 @@
//go:build rocksdb
// +build rocksdb
// Copyright 2023 Kava Labs, Inc.
// Copyright 2023 Cronos Labs, Inc.
//
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opendb
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/cosmos/cosmos-sdk/server/types"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cast"
dbm "github.com/tendermint/tm-db"
)
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
const (
// default tm-db block cache size for RocksDB
defaultBlockCacheSize = 1 << 30
defaultColumnFamilyName = "default"
enableMetricsOptName = "rocksdb.enable-metrics"
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
defaultReportMetricsIntervalSecs = 15
maxOpenFilesDBOptName = "rocksdb.max-open-files"
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
useFsyncDBOptName = "rocksdb.use_fsync"
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
numLevelsCFOptName = "rocksdb.num-levels"
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
blockSizeBBTOOptName = "rocksdb.block_size"
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "rocksdb.format_version"
asyncIOReadOptName = "rocksdb.read-async-io"
)
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
if backendType == dbm.RocksDBBackend {
return openRocksdb(dataDir, appOpts)
}
return dbm.NewDB("application", backendType, dataDir)
}
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
// option will be overridden only in case if it explicitly specified in appOpts
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
optionsPath := filepath.Join(dir, "application.db")
dbOpts, cfOpts, err := loadLatestOptions(optionsPath)
if err != nil {
return nil, err
}
// customize rocksdb options
bbtoOpts := bbtoFromAppOpts(appOpts)
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
dbOpts = overrideDBOpts(dbOpts, appOpts)
cfOpts = overrideCFOpts(cfOpts, appOpts)
readOpts := readOptsFromAppOpts(appOpts)
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
if reportMetricsIntervalSecs == 0 {
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
}
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
}
// loadLatestOptions loads and returns database and column family options
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
// if database exists it should have only one column family named default
func loadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
return newDefaultOptions(), newDefaultOptions(), nil
}
if err != nil {
return nil, nil, err
}
cfNames := latestOpts.ColumnFamilyNames()
cfOpts := latestOpts.ColumnFamilyOpts()
// db should have only one column family named default
ok := len(cfNames) == 1 && cfNames[0] == defaultColumnFamilyName
if !ok {
return nil, nil, ErrUnexpectedConfiguration
}
// return db and cf opts
return latestOpts.Options(), &cfOpts[0], nil
}
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
if maxOpenFiles != nil {
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
}
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
if maxFileOpeningThreads != nil {
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
}
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
if tableCacheNumshardbits != nil {
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
}
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
if allowMMAPWrites != nil {
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
}
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
if allowMMAPReads != nil {
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
}
useFsync := appOpts.Get(useFsyncDBOptName)
if useFsync != nil {
dbOpts.SetUseFsync(cast.ToBool(useFsync))
}
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
if useAdaptiveMutex != nil {
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
}
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
if bytesPerSync != nil {
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
}
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
if maxBackgroundJobs != nil {
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
}
return dbOpts
}
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
if writeBufferSize != nil {
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
}
numLevels := appOpts.Get(numLevelsCFOptName)
if numLevels != nil {
cfOpts.SetNumLevels(cast.ToInt(numLevels))
}
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
if maxWriteBufferNumber != nil {
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
}
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
if minWriteBufferNumberToMerge != nil {
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
}
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
if maxBytesForLevelBase != nil {
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
}
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
if maxBytesForLevelMultiplier != nil {
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
}
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
if targetFileSizeBase != nil {
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
}
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
if targetFileSizeMultiplier != nil {
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
}
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
if level0FileNumCompactionTrigger != nil {
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
}
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
if level0SlowdownWritesTrigger != nil {
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
}
return cfOpts
}
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
ro := grocksdb.NewDefaultReadOptions()
asyncIO := appOpts.Get(asyncIOReadOptName)
if asyncIO != nil {
ro.SetAsyncIO(cast.ToBool(asyncIO))
}
return ro
}
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
bbto := defaultBBTO()
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
if blockCacheSize != nil {
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
bbto.SetBlockCache(cache)
}
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
if bitsPerKey != nil {
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
bbto.SetFilterPolicy(filter)
}
blockSize := appOpts.Get(blockSizeBBTOOptName)
if blockSize != nil {
bbto.SetBlockSize(cast.ToInt(blockSize))
}
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
if cacheIndexAndFilterBlocks != nil {
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
}
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
if pinL0FilterAndIndexBlocksInCache != nil {
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
}
formatVersion := appOpts.Get(formatVersionBBTOOptName)
if formatVersion != nil {
bbto.SetFormatVersion(cast.ToInt(formatVersion))
}
return bbto
}
// newRocksDBWithOptions opens rocksdb with provided database and column family options
// newRocksDBWithOptions expects that db has only one column family named default
func newRocksDBWithOptions(
name string,
dir string,
dbOpts *grocksdb.Options,
cfOpts *grocksdb.Options,
readOpts *grocksdb.ReadOptions,
enableMetrics bool,
reportMetricsIntervalSecs int64,
) (*dbm.RocksDB, error) {
dbPath := filepath.Join(dir, name+".db")
// Ensure path exists
if err := os.MkdirAll(dbPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create db path: %w", err)
}
// EnableStatistics adds overhead so shouldn't be enabled in production
if enableMetrics {
dbOpts.EnableStatistics()
}
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
if err != nil {
return nil, err
}
if enableMetrics {
registerMetrics()
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
}
wo := grocksdb.NewDefaultWriteOptions()
woSync := grocksdb.NewDefaultWriteOptions()
woSync.SetSync(true)
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
}
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func newDefaultOptions() *grocksdb.Options {
// default rocksdb option, good enough for most cases, including heavy workloads.
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
// compression: snappy as default, need to -lsnappy to enable.
bbto := defaultBBTO()
opts := grocksdb.NewDefaultOptions()
opts.SetBlockBasedTableFactory(bbto)
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
opts.SetMaxOpenFiles(4096)
opts.SetCreateIfMissing(true)
opts.IncreaseParallelism(runtime.NumCPU())
// 1.5GB maximum memory use for writebuffer.
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
return opts
}
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
return bbto
}
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
// NOTE: should be launched as a goroutine
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
ticker := time.NewTicker(interval)
for {
select {
case <-ticker.C:
props, stats, err := getPropsAndStats(db)
if err != nil {
continue
}
rocksdbMetrics.report(props, stats)
}
}
}
// getPropsAndStats gets statistics from rocksdb
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
propsLoader := newPropsLoader(db)
props, err := propsLoader.load()
if err != nil {
return nil, nil, err
}
statMap, err := parseSerializedStats(props.OptionsStatistics)
if err != nil {
return nil, nil, err
}
statLoader := newStatLoader(statMap)
stats, err := statLoader.load()
if err != nil {
return nil, nil, err
}
return props, stats, nil
}

View File

@ -1,384 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"os"
"path/filepath"
"testing"
"github.com/linxGnu/grocksdb"
"github.com/stretchr/testify/require"
)
type mockAppOptions struct {
opts map[string]interface{}
}
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
return &mockAppOptions{
opts: opts,
}
}
func (m *mockAppOptions) Get(key string) interface{} {
return m.opts[key]
}
func TestOpenRocksdb(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
writeBufferSizeCFOptName: 999_999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := openRocksdb(dir, tc.mockAppOptions)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
mockAppOpts := newMockAppOptions(map[string]interface{}{})
db, err := openRocksdb(dir, mockAppOpts)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestLoadLatestOptions(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
const testCasesNum = 3
dbOptsList := make([]*grocksdb.Options, testCasesNum)
cfOptsList := make([]*grocksdb.Options, testCasesNum)
dbOptsList[0] = newDefaultOptions()
cfOptsList[0] = newDefaultOptions()
dbOptsList[1] = newDefaultOptions()
dbOptsList[1].SetMaxOpenFiles(999)
cfOptsList[1] = newDefaultOptions()
cfOptsList[1].SetWriteBufferSize(999_999)
dbOptsList[2] = newDefaultOptions()
dbOptsList[2].SetMaxOpenFiles(999)
dbOptsList[2].SetMaxFileOpeningThreads(9)
cfOptsList[2] = newDefaultOptions()
cfOptsList[2].SetWriteBufferSize(999_999)
cfOptsList[2].SetNumLevels(9)
for _, tc := range []struct {
desc string
dbOpts *grocksdb.Options
cfOpts *grocksdb.Options
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
dbOpts: dbOptsList[0],
cfOpts: cfOptsList[0],
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
dbOpts: dbOptsList[1],
cfOpts: cfOptsList[1],
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
dbOpts: dbOptsList[2],
cfOpts: cfOptsList[2],
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestOverrideDBOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-open-files",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: 9,
},
{
desc: "override max-open-files and max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dbOpts := newDefaultOptions()
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
})
}
}
func TestOverrideCFOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
writeBufferSize uint64
numLevels int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override write-buffer-size",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
}),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
numLevelsCFOptName: 9,
}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: 9,
},
{
desc: "override write-buffer-size and num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
cfOpts := newDefaultOptions()
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
}
func TestReadOptsFromAppOpts(t *testing.T) {
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
asyncIO bool
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
asyncIO: false,
},
{
desc: "set asyncIO option to true",
mockAppOptions: newMockAppOptions(map[string]interface{}{
asyncIOReadOptName: true,
}),
asyncIO: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
})
}
}
func TestNewRocksDBWithOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts := newDefaultOptions()
dbOpts.SetMaxOpenFiles(999)
cfOpts := newDefaultOptions()
cfOpts.SetWriteBufferSize(999_999)
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err = loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
}
func TestNewDefaultOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
require.Equal(t, 4096, maxOpenFiles)
}

View File

@ -1,87 +0,0 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
type propsGetter interface {
GetProperty(propName string) (value string)
GetIntProperty(propName string) (value uint64, success bool)
}
type propsLoader struct {
db propsGetter
errorMsgs []string
}
func newPropsLoader(db propsGetter) *propsLoader {
return &propsLoader{
db: db,
errorMsgs: make([]string, 0),
}
}
func (l *propsLoader) load() (*properties, error) {
props := &properties{
BaseLevel: l.getIntProperty("rocksdb.base-level"),
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
}
if len(l.errorMsgs) != 0 {
errorMsg := strings.Join(l.errorMsgs, ";")
return nil, errors.New(errorMsg)
}
return props, nil
}
func (l *propsLoader) getProperty(propName string) string {
value := l.db.GetProperty(propName)
if value == "" {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
return ""
}
return value
}
func (l *propsLoader) getIntProperty(propName string) uint64 {
value, ok := l.db.GetIntProperty(propName)
if !ok {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
return 0
}
return value
}
type properties struct {
BaseLevel uint64
BlockCacheCapacity uint64
BlockCachePinnedUsage uint64
BlockCacheUsage uint64
CurSizeActiveMemTable uint64
CurSizeAllMemTables uint64
EstimateLiveDataSize uint64
EstimateNumKeys uint64
EstimateTableReadersMem uint64
LiveSSTFilesSize uint64
SizeAllMemTables uint64
OptionsStatistics string
}

Some files were not shown because too many files have changed in this diff Show More