Compare commits

...

18 Commits
dev ... v0.24.2

Author SHA1 Message Date
drklee3
7e6451a528
Fix changelog unhandled merge conflict (#1772) 2023-11-17 14:06:00 -08:00
mergify[bot]
b97aa51c0b
fix: update ledger-cosmos-go v0.13.1 to resolve signing error with cosmos ledger app 2.34.12 (backport #1770) (#1771)
* fix: update ledger-cosmos-go v0.13.1 to resolve signing error with cosmos ledger app 2.34.12 (#1770)

* Update ledger-cosmos-go v0.13.1 with cosmos fork update

* Bump cosmos-sdk v0.46.11-kava.2

* Update changelog

* Update cosmos-sdk tag v0.46.11-kava.3

Incorrect kava.2 tag

(cherry picked from commit 39146747ac)

# Conflicts:
#	CHANGELOG.md
#	go.mod
#	go.sum

* Update conflicts in go.mod

* Update changelog

---------

Co-authored-by: drklee3 <derrick@dlee.dev>
2023-11-17 14:01:41 -08:00
mergify[bot]
2f24120b31 fix: prevent goleveldb panic on large genesis files (backport #1631) (#1747)
* fix: prevent goleveldb panic on large genesis files (#1631)

* Use patched cometbft

* Temporarily disable evmutil fully-backed invariant

* chore: use patched cometbft v0.34.27-kava.0

* Revert "Temporarily disable evmutil fully-backed invariant"

This reverts commit 66c2357d8b505e601c5ecf6a28faa96696e33d6c.

* Revert change to goleveldb version

* Update changelog

* Update changelog pr

(cherry picked from commit bf5db8a7bd4d875d3d0fed51ea5e0248e827f63b)

# Conflicts:
#	CHANGELOG.md

* Fix conflicts in changelog

---------

Co-authored-by: drklee3 <derrick@dlee.dev>
2023-10-25 12:49:00 -07:00
mergify[bot]
e7f598d7ec Make read-async-io configurable (#1732) (#1738)
* Make read-async-io configurable

* Added unit-test for read options configuration

(cherry picked from commit 0598b99063)

Co-authored-by: Evgeniy Scherbina <evgeniy.shcherbina.es@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
f8ca05626f Add metrics for rocksdb bloom filter and lsm tree (#1710) (#1728)
* Add metrics for rocksdb bloom filter

* Add metrics for rocksdb lsm tree

* Add metrics for rocksdb lsm tree

* Add metrics for rocksdb detailed cache

* Fix tests

* Add help for metrics

* Add help for metrics

(cherry picked from commit 9aefbac0e8)

Co-authored-by: Evgeniy Scherbina <evgeniy.shcherbina.es@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
a6f771e49c Add metrics for rocksdb query latency (#1709) (#1722)
* Add metrics for rocksdb query latency

* Add metrics for rocksdb write stalling

* Add metrics for rocksdb write stall histogram

* Fix tests

(cherry picked from commit 75c86a772b)

Co-authored-by: Evgeniy Scherbina <evgeniy.shcherbina.es@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
a442d20692 Add max-background-jobs rocksdb option (#1708) (#1716)
(cherry picked from commit 3c8394f17a)

Co-authored-by: Evgeniy Scherbina <evgeniy.shcherbina.es@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
e35747a5f5 Make rocksdb configurable (backport #1658) (#1702)
* Make rocksdb configurable (#1658)

* Make rocksdb configurable

* Make sure rocksdb tests are running in CI

* Updating ci-rocksdb-build workflow

* Remove test.sh

* Update tm-db dependency

(cherry picked from commit 90fbe1aad7)

* Rocksdb Metrics (#1692)

* Rocksdb Metrics

* Add rocksdb namespace for options

* Adding help to the metrics

* CR's fixes

* CR's fixes

* CR's fixes

* Increase number of options to configure rocksdb (#1696)

---------

Co-authored-by: Evgeniy Scherbina <evgeniy.shcherbina.es@gmail.com>
2023-10-25 12:49:00 -07:00
Robert Pirtle
bdfcf56fba prepare v0.24.1 release (#1758)
* prepare v0.24.1 release

* add staking_rewards migration doc
2023-10-25 12:49:00 -07:00
mergify[bot]
db5f89b9be feat(community): add CLI cmd for annualized-rewards (backport #1756) (#1757)
* feat(community): add CLI cmd for annualized-rewards (#1756)

(cherry picked from commit 48ee996f61)

* resolve conflicts

---------

Co-authored-by: Robert Pirtle <Astropirtle@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
a9583b16f4 feat(community): add AnnualizedRewards grpc query (backport #1751) (#1754)
* feat(community): add AnnualizedRewards grpc query (#1751)

* add annualized_reward query proto

* use sdkmath.LegacyDec to match RPS param...

* add AnnualizedRewards grpc query

* add changelog entry

* simplify calculation & expand test cases

(cherry picked from commit 0efe7f2281)

* fix conflicts, remove community param references

* backport update to lint CI

* disable internal testnet genesis check

* fix initialization order of keepers in app.go

---------

Co-authored-by: Robert Pirtle <Astropirtle@gmail.com>
2023-10-25 12:49:00 -07:00
mergify[bot]
e7e2cbf41a
docs: update unused cosmos modules in swagger docs (#1664) (#1693)
(cherry picked from commit ebcebc4be3)

Co-authored-by: Ruaridh <rhuairahrighairidh@users.noreply.github.com>
2023-09-01 09:41:29 -07:00
Robert Pirtle
35358df72a
unregister x/metrics & call BeginBlocker directly (#1691)
registering x/metrics to the module manager breaks the AppHash because
its consensus version is added to the ModuleVersionMap which affects the
AppHash.

this commit unregisters the module so it is not consensus breaking.
instead, it directly calls the BeginBlock before running the module
manager's.
2023-08-28 12:00:25 -07:00
mergify[bot]
f898e3aff4
feat(metrics): add timing metrics to abci methods (backport #1669) (#1685)
* feat(metrics): add timing metrics to abci methods (#1669)

* feat(metrics): add timing metrics to abci methods

* update changelog

(cherry picked from commit 8b6bbd36f4)

# Conflicts:
#	CHANGELOG.md

* fix changlog conflicts

---------

Co-authored-by: Robert Pirtle <Astropirtle@gmail.com>
2023-08-25 16:11:45 -07:00
mergify[bot]
6c565343f7
feat(x/metrics): add module for emiting custom chain metrics (backport #1668) (#1677)
* feat(x/metrics): add module for emiting custom chain metrics (#1668)

* initialize x/metrics with metrics collection

* include global labels in x/metrics metrics

* add x/metrics spec

* add x/metrics test coverage

* update changelog

(cherry picked from commit 9a0aed7626)

# Conflicts:
#	CHANGELOG.md

* fix changlog conflicts

---------

Co-authored-by: Robert Pirtle <Astropirtle@gmail.com>
2023-08-25 15:14:14 -07:00
Robert Pirtle
48c845b941 chore: support $(GO_BIN) in make install / build 2023-08-09 16:10:43 -07:00
Nick DeLuca
2eee058b78
add kava 14 migration instructions in prep to tag v0.24.0 (#1647) 2023-06-30 14:09:20 -07:00
Robert Pirtle
1d031896cb
feat: add upgrade handlers for v0.24.x (#1619)
* setup skeleton for upgrade handlers

* initialize allowed_cosmos_denoms param

* allow eip712 signing of cosmos coin conversion msgs

* update stability committee ParamsChangePermission

* e2e test the upgrade handler
2023-06-15 12:21:54 -07:00
57 changed files with 4341 additions and 9077 deletions

View File

@ -33,37 +33,38 @@ jobs:
run: make test
- name: run e2e tests
run: make docker-build test-e2e
validate-internal-testnet-genesis:
runs-on: ubuntu-latest
steps:
- name: checkout repo from current commit
uses: actions/checkout@v3
- name: save version of kava that will be deployed if this pr is merged
id: kava-version
run: |
echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
- name: checkout repo from master
uses: actions/checkout@v3
with:
ref: master
- name: checkout version of kava that will be deployed if this pr is merged
run: |
git pull -p
git checkout $KAVA_VERSION
env:
KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: "1.20"
check-latest: true
cache: true
- name: build kava cli
run: make install
- name: checkout repo from current commit to validate current branch's genesis
uses: actions/checkout@v3
- name: validate testnet genesis
run: kava validate-genesis ci/env/kava-internal-testnet/genesis.json
# this is only applicable for PRs based on master. Disabling for this release branch.
# validate-internal-testnet-genesis:
# runs-on: ubuntu-latest
# steps:
# - name: checkout repo from current commit
# uses: actions/checkout@v3
# - name: save version of kava that will be deployed if this pr is merged
# id: kava-version
# run: |
# echo "KAVA_VERSION=$(cat ./ci/env/kava-internal-testnet/KAVA.VERSION)" >> $GITHUB_OUTPUT
# - name: checkout repo from master
# uses: actions/checkout@v3
# with:
# ref: master
# - name: checkout version of kava that will be deployed if this pr is merged
# run: |
# git pull -p
# git checkout $KAVA_VERSION
# env:
# KAVA_VERSION: ${{ steps.kava-version.outputs.KAVA_VERSION }}
# - name: Set up Go
# uses: actions/setup-go@v3
# with:
# go-version: "1.20"
# check-latest: true
# cache: true
# - name: build kava cli
# run: make install
# - name: checkout repo from current commit to validate current branch's genesis
# uses: actions/checkout@v3
# - name: validate testnet genesis
# run: kava validate-genesis ci/env/kava-internal-testnet/genesis.json
validate-protonet-genesis:
runs-on: ubuntu-latest
steps:

View File

@ -14,3 +14,4 @@ jobs:
with:
github_token: ${{ secrets.github_token }}
reporter: github-pr-review
golangci_lint_flags: --timeout 10m

View File

@ -1,5 +1,8 @@
name: Continuous Integration (Rocksdb Build)
env:
ROCKSDB_VERSION: v8.1.1
on:
workflow_call:
jobs:
@ -16,7 +19,29 @@ jobs:
cache: true
- name: build rocksdb dependency
run: bash ${GITHUB_WORKSPACE}/.github/scripts/install-rocksdb.sh
env:
ROCKSDB_VERSION: v7.10.2
- name: build application
run: make build COSMOS_BUILD_OPTIONS=rocksdb
test:
runs-on: ubuntu-latest
steps:
- name: install RocksDB dependencies
run: sudo apt-get update
&& sudo apt-get install -y git make gcc libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev
- name: install RocksDB as shared library
run: git clone https://github.com/facebook/rocksdb.git
&& cd rocksdb
&& git checkout $ROCKSDB_VERSION
&& sudo make -j$(nproc) install-shared
&& sudo ldconfig
- name: checkout repo from current commit
uses: actions/checkout@v3
with:
submodules: true
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: "1.20"
check-latest: true
cache: true
- name: run unit tests
run: make test-rocksdb

View File

@ -36,6 +36,22 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
## [v0.24.2](https://github.com/Kava-Labs/kava/releases/tag/v0.24.2)
### Bug Fixes
- (deps) [#1770] Bump ledger-cosmos-go to v0.13.1 to resolve signing error with
cosmos ledger app 2.34.12
## [v0.24.1](https://github.com/Kava-Labs/kava/releases/tag/v0.24.1)
### Features
- (metrics) [#1668] Adds non-state breaking x/metrics module for custom telemetry.
- (metrics) [#1669] Add performance timing metrics to all Begin/EndBlockers
- (community) [#1751] Add `AnnualizedRewards` query endpoint
## [v0.24.0](https://github.com/Kava-Labs/kava/releases/tag/v0.24.0)
### Features
- (evmutil) [#1590] & [#1596] Add allow list param of sdk native denoms that can be transferred to evm
- (evmutil) [#1591] & [#1596] Configure module to support deploying ERC20KavaWrappedCosmosCoin contracts
@ -98,6 +114,9 @@ Ref: https://keepachangelog.com/en/1.0.0/
### Bug Fixes
- (x/incentive) [#1550] Fix validation on genesis reward accumulation time.
- (deps) [#1622] Bump tm-db to v0.6.7-kava.3 to return rocksdb open error
- (deps) [#1631] Bump cometbft to v0.34.27-kava.0 to avoid goleveldb panic on large
genesis files.
## [v0.16.1]
@ -268,7 +287,15 @@ the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.38.4/CHANGELOG.md).
- [#257](https://github.com/Kava-Labs/kava/pulls/257) Include scripts to run
large-scale simulations remotely using aws-batch
[#1770]: https://github.com/Kava-Labs/kava/pull/1770
[#1755]: https://github.com/Kava-Labs/kava/pull/1755
[#1761]: https://github.com/Kava-Labs/kava/pull/1761
[#1752]: https://github.com/Kava-Labs/kava/pull/1752
[#1751]: https://github.com/Kava-Labs/kava/pull/1751
[#1669]: https://github.com/Kava-Labs/kava/pull/1669
[#1668]: https://github.com/Kava-Labs/kava/pull/1668
[#1624]: https://github.com/Kava-Labs/kava/pull/1624
[#1631]: https://github.com/Kava-Labs/kava/pull/1631
[#1622]: https://github.com/Kava-Labs/kava/pull/1622
[#1614]: https://github.com/Kava-Labs/kava/pull/1614
[#1610]: https://github.com/Kava-Labs/kava/pull/1610

View File

@ -10,7 +10,7 @@ WORKDIR /root
# default home directory is /root
# install rocksdb
ARG rocksdb_version=v7.10.2
ARG rocksdb_version=v8.1.1
ENV ROCKSDB_VERSION=$rocksdb_version
RUN git clone https://github.com/facebook/rocksdb.git \

View File

@ -3,6 +3,8 @@
################################################################################
PROJECT_NAME := kava# unique namespace for project
GO_BIN ?= go
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
GIT_COMMIT := $(shell git rev-parse HEAD)
GIT_COMMIT_SHORT := $(shell git rev-parse --short HEAD)
@ -186,16 +188,16 @@ all: install
build: go.sum
ifeq ($(OS), Windows_NT)
go build -mod=readonly $(BUILD_FLAGS) -o out/$(shell go env GOOS)/kava.exe ./cmd/kava
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/kava.exe ./cmd/kava
else
go build -mod=readonly $(BUILD_FLAGS) -o out/$(shell go env GOOS)/kava ./cmd/kava
$(GO_BIN) build -mod=readonly $(BUILD_FLAGS) -o out/$(shell $(GO_BIN) env GOOS)/kava ./cmd/kava
endif
build-linux: go.sum
LEDGER_ENABLED=false GOOS=linux GOARCH=amd64 $(MAKE) build
install: go.sum
go install -mod=readonly $(BUILD_FLAGS) ./cmd/kava
$(GO_BIN) install -mod=readonly $(BUILD_FLAGS) ./cmd/kava
########################################
### Tools & dependencies
@ -300,6 +302,9 @@ test-e2e: docker-build
test:
@go test $$(go list ./... | grep -v 'contrib' | grep -v 'tests/e2e')
test-rocksdb:
@go test -tags=rocksdb ./cmd/kava/opendb
# Run cli integration tests
# `-p 4` to use 4 cores, `-tags cli_test` to tell go not to ignore the cli package
# These tests use the `kvd` or `kvcli` binaries in the build dir, or in `$BUILDDIR` if that env var is set.

View File

@ -24,13 +24,13 @@ Reference implementation of Kava, a blockchain for cross-chain DeFi. Built using
## Mainnet
The current recommended version of the software for mainnet is [v0.23.0](https://github.com/Kava-Labs/kava/releases/tag/v0.23.0). The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
The current recommended version of the software for mainnet is [v0.24.1](https://github.com/Kava-Labs/kava/releases/tag/v0.24.1). The master branch of this repository often contains considerable development work since the last mainnet release and is __not__ runnable on mainnet.
### Installation and Setup
For detailed instructions see [the Kava docs](https://docs.kava.io/docs/participate/validator-node).
```bash
git checkout v0.23.0
git checkout v0.24.1
make install
```

View File

@ -139,6 +139,8 @@ import (
"github.com/kava-labs/kava/x/liquid"
liquidkeeper "github.com/kava-labs/kava/x/liquid/keeper"
liquidtypes "github.com/kava-labs/kava/x/liquid/types"
metrics "github.com/kava-labs/kava/x/metrics"
metricstypes "github.com/kava-labs/kava/x/metrics/types"
pricefeed "github.com/kava-labs/kava/x/pricefeed"
pricefeedkeeper "github.com/kava-labs/kava/x/pricefeed/keeper"
pricefeedtypes "github.com/kava-labs/kava/x/pricefeed/types"
@ -261,6 +263,7 @@ type Options struct {
MempoolAuthAddresses []sdk.AccAddress
EVMTrace string
EVMMaxGasWanted uint64
TelemetryOptions metricstypes.TelemetryOptions
}
// DefaultOptions is a sensible default Options value.
@ -330,6 +333,12 @@ type App struct {
// configurator
configurator module.Configurator
// backported x/metrics Metrics
// to prevent AppHash mismatch, the module is not registered to the module manager.
// instead, the module's BeginBlocker is called directly.
// this way, its consensus version has no bearing on the ModuleVersionMap included in the AppHash.
metrics *metricstypes.Metrics
}
func init() {
@ -383,6 +392,7 @@ func NewApp(
keys: keys,
tkeys: tkeys,
memKeys: memKeys,
metrics: metricstypes.NewMetrics(options.TelemetryOptions),
}
// init params keeper and subspaces
@ -631,14 +641,6 @@ func NewApp(
&app.distrKeeper,
)
// x/community's deposit/withdraw to lend proposals depend on hard keeper.
app.communityKeeper = communitykeeper.NewKeeper(
app.accountKeeper,
app.bankKeeper,
&cdpKeeper,
app.distrKeeper,
&hardKeeper,
)
app.kavadistKeeper = kavadistkeeper.NewKeeper(
appCodec,
keys[kavadisttypes.StoreKey],
@ -659,6 +661,17 @@ func NewApp(
authtypes.FeeCollectorName,
)
// x/community's deposit/withdraw to lend proposals depend on hard keeper.
app.communityKeeper = communitykeeper.NewKeeper(
app.accountKeeper,
app.bankKeeper,
&cdpKeeper,
app.distrKeeper,
&hardKeeper,
&app.mintKeeper,
app.stakingKeeper,
)
app.incentiveKeeper = incentivekeeper.NewKeeper(
appCodec,
keys[incentivetypes.StoreKey],
@ -1010,6 +1023,10 @@ func (app *App) RegisterServices(cfg module.Configurator) {
// BeginBlocker contains app specific logic for the BeginBlock abci call.
func (app *App) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock {
// call the metrics BeginBlocker directly instead of registering the module to the module manager.
// all consensus versions of modules registrered to the moduel manager contribute to the AppHash.
// to prevent the backport of x/metrics from being consensus breaking, it is called directly.
metrics.BeginBlocker(ctx, app.metrics)
return app.mm.BeginBlock(ctx, req)
}

View File

@ -1,3 +1,251 @@
package app
func (app App) RegisterUpgradeHandlers() {}
import (
"fmt"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
evmkeeper "github.com/evmos/ethermint/x/evm/keeper"
evmtypes "github.com/evmos/ethermint/x/evm/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
committeekeeper "github.com/kava-labs/kava/x/committee/keeper"
committeetypes "github.com/kava-labs/kava/x/committee/types"
evmutilkeeper "github.com/kava-labs/kava/x/evmutil/keeper"
evmutiltypes "github.com/kava-labs/kava/x/evmutil/types"
)
const (
MainnetUpgradeName = "v0.24.0"
TestnetUpgradeName = "v0.24.0-alpha.0"
MainnetAtomDenom = "ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2"
TestnetHardDenom = "hard"
MainnetStabilityCommitteeId = uint64(1)
TestnetStabilityCommitteeId = uint64(1)
)
var (
// Committee permission for changing AllowedCosmosDenoms param
AllowedParamsChangeAllowedCosmosDenoms = committeetypes.AllowedParamsChange{
Subspace: evmutiltypes.ModuleName,
Key: "AllowedCosmosDenoms",
}
// EIP712 allowed message for MsgConvertCosmosCoinToERC20
EIP712AllowedMsgConvertCosmosCoinToERC20 = evmtypes.EIP712AllowedMsg{
MsgTypeUrl: "/kava.evmutil.v1beta1.MsgConvertCosmosCoinToERC20",
MsgValueTypeName: "MsgConvertCosmosCoinToERC20",
ValueTypes: []evmtypes.EIP712MsgAttrType{
{
Name: "initiator",
Type: "string",
},
{
Name: "receiver",
Type: "string",
},
{
Name: "amount",
Type: "Coin",
},
},
NestedTypes: nil,
}
// EIP712 allowed message for MsgConvertCosmosCoinFromERC20
EIP712AllowedMsgConvertCosmosCoinFromERC20 = evmtypes.EIP712AllowedMsg{
MsgTypeUrl: "/kava.evmutil.v1beta1.MsgConvertCosmosCoinFromERC20",
MsgValueTypeName: "MsgConvertCosmosCoinFromERC20",
ValueTypes: []evmtypes.EIP712MsgAttrType{
{
Name: "initiator",
Type: "string",
},
{
Name: "receiver",
Type: "string",
},
{
Name: "amount",
Type: "Coin",
},
},
NestedTypes: nil,
}
)
func (app App) RegisterUpgradeHandlers() {
// register upgrade handler for mainnet
app.upgradeKeeper.SetUpgradeHandler(MainnetUpgradeName, MainnetUpgradeHandler(app))
// register upgrade handler for testnet
app.upgradeKeeper.SetUpgradeHandler(TestnetUpgradeName, TestnetUpgradeHandler(app))
upgradeInfo, err := app.upgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
panic(err)
}
doUpgrade := upgradeInfo.Name == MainnetUpgradeName || upgradeInfo.Name == TestnetUpgradeName
if doUpgrade && !app.upgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
storeUpgrades := storetypes.StoreUpgrades{}
// configure store loader that checks if version == upgradeHeight and applies store upgrades
app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
}
}
func MainnetUpgradeHandler(app App) upgradetypes.UpgradeHandler {
return func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
app.Logger().Info("running mainnet upgrade handler")
toVM, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
if err != nil {
return toVM, err
}
app.Logger().Info("initializing allowed_cosmos_denoms param of x/evmutil")
allowedDenoms := []evmutiltypes.AllowedCosmosCoinERC20Token{
{
CosmosDenom: MainnetAtomDenom,
// erc20 contract metadata
Name: "ATOM",
Symbol: "ATOM",
Decimals: 6,
},
}
InitializeEvmutilAllowedCosmosDenoms(ctx, &app.evmutilKeeper, allowedDenoms)
app.Logger().Info("allowing cosmos coin conversion messaged in EIP712 signing")
AllowEip712SigningForConvertMessages(ctx, app.evmKeeper)
app.Logger().Info("allowing stability committee to update x/evmutil AllowedCosmosDenoms param")
AddAllowedCosmosDenomsParamChangeToStabilityCommittee(
ctx,
app.interfaceRegistry,
&app.committeeKeeper,
MainnetStabilityCommitteeId,
)
return toVM, nil
}
}
func TestnetUpgradeHandler(app App) upgradetypes.UpgradeHandler {
return func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
app.Logger().Info("running testnet upgrade handler")
toVM, err := app.mm.RunMigrations(ctx, app.configurator, fromVM)
if err != nil {
return toVM, err
}
app.Logger().Info("initializing allowed_cosmos_denoms param of x/evmutil")
// on testnet, IBC is not enabled. we initialize HARD tokens for conversion to EVM.
allowedDenoms := []evmutiltypes.AllowedCosmosCoinERC20Token{
{
CosmosDenom: TestnetHardDenom,
// erc20 contract metadata
Name: "HARD",
Symbol: "HARD",
Decimals: 6,
},
}
InitializeEvmutilAllowedCosmosDenoms(ctx, &app.evmutilKeeper, allowedDenoms)
app.Logger().Info("allowing cosmos coin conversion messaged in EIP712 signing")
AllowEip712SigningForConvertMessages(ctx, app.evmKeeper)
app.Logger().Info("allowing stability committee to update x/evmutil AllowedCosmosDenoms param")
AddAllowedCosmosDenomsParamChangeToStabilityCommittee(
ctx,
app.interfaceRegistry,
&app.committeeKeeper,
TestnetStabilityCommitteeId,
)
return toVM, nil
}
}
// InitializeEvmutilAllowedCosmosDenoms sets the AllowedCosmosDenoms parameter of the x/evmutil module.
// This new parameter controls what cosmos denoms are allowed to be converted to ERC20 tokens.
func InitializeEvmutilAllowedCosmosDenoms(
ctx sdk.Context,
evmutilKeeper *evmutilkeeper.Keeper,
allowedCoins []evmutiltypes.AllowedCosmosCoinERC20Token,
) {
params := evmutilKeeper.GetParams(ctx)
params.AllowedCosmosDenoms = allowedCoins
if err := params.Validate(); err != nil {
panic(fmt.Sprintf("x/evmutil params are not valid: %s", err))
}
evmutilKeeper.SetParams(ctx, params)
}
// AllowEip712SigningForConvertMessages adds the cosmos coin conversion messages to the
// allowed message types for EIP712 signing.
// The newly allowed messages are:
// - MsgConvertCosmosCoinToERC20
// - MsgConvertCosmosCoinFromERC20
func AllowEip712SigningForConvertMessages(ctx sdk.Context, evmKeeper *evmkeeper.Keeper) {
params := evmKeeper.GetParams(ctx)
params.EIP712AllowedMsgs = append(
params.EIP712AllowedMsgs,
EIP712AllowedMsgConvertCosmosCoinToERC20,
EIP712AllowedMsgConvertCosmosCoinFromERC20,
)
if err := params.Validate(); err != nil {
panic(fmt.Sprintf("x/evm params are not valid: %s", err))
}
evmKeeper.SetParams(ctx, params)
}
// AddAllowedCosmosDenomsParamChangeToStabilityCommittee enables the stability committee
// to update the AllowedCosmosDenoms parameter of x/evmutil.
func AddAllowedCosmosDenomsParamChangeToStabilityCommittee(
ctx sdk.Context,
cdc codectypes.InterfaceRegistry,
committeeKeeper *committeekeeper.Keeper,
committeeId uint64,
) {
// get committee
committee, foundCommittee := committeeKeeper.GetCommittee(ctx, committeeId)
if !foundCommittee {
panic(fmt.Sprintf("expected to find committee with id %d but found none", committeeId))
}
permissions := committee.GetPermissions()
// find & update the ParamsChangePermission
foundPermission := false
for i, permission := range permissions {
if paramsChangePermission, ok := permission.(*committeetypes.ParamsChangePermission); ok {
foundPermission = true
paramsChangePermission.AllowedParamsChanges = append(
paramsChangePermission.AllowedParamsChanges,
AllowedParamsChangeAllowedCosmosDenoms,
)
permissions[i] = paramsChangePermission
break
}
}
// error if permission was not found & updated
if !foundPermission {
panic(fmt.Sprintf("no ParamsChangePermission found on committee with id %d", committeeId))
}
// update permissions
committee.SetPermissions(permissions)
if err := committee.Validate(); err != nil {
panic(fmt.Sprintf("stability committee (id=%d) is invalid: %s", committeeId, err))
}
// save permission changes
committeeKeeper.SetCommittee(ctx, committee)
}

View File

@ -245,7 +245,11 @@
}
},
"paths": {
"exclude": ["^/cosmos/authz/.*", "^/cosmos/feegrant/.*"]
"exclude": [
"^/cosmos/feegrant/.*",
"^/cosmos/nft/.*",
"^/cosmos/group/.*"
]
}
},
{
@ -266,6 +270,12 @@
"to": "Ibc$1"
}
]
},
"paths": {
"exclude": [
"^/ibc/apps/interchain_accounts/.*",
"^/ibc/apps/fee/.*"
]
}
},
{

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@ import (
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/app/params"
metricstypes "github.com/kava-labs/kava/x/metrics/types"
)
const (
@ -99,6 +100,7 @@ func (ac appCreator) newApp(
MempoolAuthAddresses: mempoolAuthAddresses,
EVMTrace: cast.ToString(appOpts.Get(ethermintflags.EVMTracer)),
EVMMaxGasWanted: cast.ToUint64(appOpts.Get(ethermintflags.EVMMaxTxGasWanted)),
TelemetryOptions: metricstypes.TelemetryOptionsFromAppOpts(appOpts),
},
baseapp.SetPruning(pruningOpts),
baseapp.SetMinGasPrices(strings.Replace(cast.ToString(appOpts.Get(server.FlagMinGasPrices)), ";", ",", -1)),

View File

@ -23,6 +23,7 @@ import (
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/app/params"
kavaclient "github.com/kava-labs/kava/client"
"github.com/kava-labs/kava/cmd/kava/opendb"
)
// EnvPrefix is the prefix environment variables must have to configure the app.
@ -105,13 +106,15 @@ func addSubCmds(rootCmd *cobra.Command, encodingConfig params.EncodingConfig, de
encodingConfig: encodingConfig,
}
opts := ethermintserver.StartOptions{
AppCreator: ac.newApp,
DefaultNodeHome: app.DefaultNodeHome,
DBOpener: opendb.OpenDB,
}
// ethermintserver adds additional flags to start the JSON-RPC server for evm support
ethermintserver.AddCommands(
rootCmd,
ethermintserver.NewDefaultStartOptions(
ac.newApp,
app.DefaultNodeHome,
),
opts,
ac.appExport,
ac.addStartCmdFlags,
)

499
cmd/kava/opendb/metrics.go Normal file
View File

@ -0,0 +1,499 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
)
// rocksdbMetrics will be initialized in registerMetrics() if enableRocksdbMetrics flag set to true
var rocksdbMetrics *Metrics
// Metrics contains all rocksdb metrics which will be reported to prometheus
type Metrics struct {
// Keys
NumberKeysWritten metrics.Gauge
NumberKeysRead metrics.Gauge
NumberKeysUpdated metrics.Gauge
EstimateNumKeys metrics.Gauge
// Files
NumberFileOpens metrics.Gauge
NumberFileErrors metrics.Gauge
// Memory
BlockCacheUsage metrics.Gauge
EstimateTableReadersMem metrics.Gauge
CurSizeAllMemTables metrics.Gauge
BlockCachePinnedUsage metrics.Gauge
// Cache
BlockCacheMiss metrics.Gauge
BlockCacheHit metrics.Gauge
BlockCacheAdd metrics.Gauge
BlockCacheAddFailures metrics.Gauge
// Detailed Cache
BlockCacheIndexMiss metrics.Gauge
BlockCacheIndexHit metrics.Gauge
BlockCacheIndexBytesInsert metrics.Gauge
BlockCacheFilterMiss metrics.Gauge
BlockCacheFilterHit metrics.Gauge
BlockCacheFilterBytesInsert metrics.Gauge
BlockCacheDataMiss metrics.Gauge
BlockCacheDataHit metrics.Gauge
BlockCacheDataBytesInsert metrics.Gauge
// Latency
DBGetMicrosP50 metrics.Gauge
DBGetMicrosP95 metrics.Gauge
DBGetMicrosP99 metrics.Gauge
DBGetMicrosP100 metrics.Gauge
DBGetMicrosCount metrics.Gauge
DBWriteMicrosP50 metrics.Gauge
DBWriteMicrosP95 metrics.Gauge
DBWriteMicrosP99 metrics.Gauge
DBWriteMicrosP100 metrics.Gauge
DBWriteMicrosCount metrics.Gauge
// Write Stall
StallMicros metrics.Gauge
DBWriteStallP50 metrics.Gauge
DBWriteStallP95 metrics.Gauge
DBWriteStallP99 metrics.Gauge
DBWriteStallP100 metrics.Gauge
DBWriteStallCount metrics.Gauge
DBWriteStallSum metrics.Gauge
// Bloom Filter
BloomFilterUseful metrics.Gauge
BloomFilterFullPositive metrics.Gauge
BloomFilterFullTruePositive metrics.Gauge
// LSM Tree Stats
LastLevelReadBytes metrics.Gauge
LastLevelReadCount metrics.Gauge
NonLastLevelReadBytes metrics.Gauge
NonLastLevelReadCount metrics.Gauge
GetHitL0 metrics.Gauge
GetHitL1 metrics.Gauge
GetHitL2AndUp metrics.Gauge
}
// registerMetrics registers metrics in prometheus and initializes rocksdbMetrics variable
func registerMetrics() {
if rocksdbMetrics != nil {
// metrics already registered
return
}
labels := make([]string, 0)
rocksdbMetrics = &Metrics{
// Keys
NumberKeysWritten: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_written",
Help: "",
}, labels),
NumberKeysRead: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_read",
Help: "",
}, labels),
NumberKeysUpdated: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "number_keys_updated",
Help: "",
}, labels),
EstimateNumKeys: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "key",
Name: "estimate_num_keys",
Help: "estimated number of total keys in the active and unflushed immutable memtables and storage",
}, labels),
// Files
NumberFileOpens: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_opens",
Help: "",
}, labels),
NumberFileErrors: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "file",
Name: "number_file_errors",
Help: "",
}, labels),
// Memory
BlockCacheUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_usage",
Help: "memory size for the entries residing in block cache",
}, labels),
EstimateTableReadersMem: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "estimate_table_readers_mem",
Help: "estimated memory used for reading SST tables, excluding memory used in block cache (e.g., filter and index blocks)",
}, labels),
CurSizeAllMemTables: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "cur_size_all_mem_tables",
Help: "approximate size of active and unflushed immutable memtables (bytes)",
}, labels),
BlockCachePinnedUsage: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "memory",
Name: "block_cache_pinned_usage",
Help: "returns the memory size for the entries being pinned",
}, labels),
// Cache
BlockCacheMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_miss",
Help: "block_cache_miss == block_cache_index_miss + block_cache_filter_miss + block_cache_data_miss",
}, labels),
BlockCacheHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_hit",
Help: "block_cache_hit == block_cache_index_hit + block_cache_filter_hit + block_cache_data_hit",
}, labels),
BlockCacheAdd: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add",
Help: "number of blocks added to block cache",
}, labels),
BlockCacheAddFailures: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "cache",
Name: "block_cache_add_failures",
Help: "number of failures when adding blocks to block cache",
}, labels),
// Detailed Cache
BlockCacheIndexMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_miss",
Help: "",
}, labels),
BlockCacheIndexHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_hit",
Help: "",
}, labels),
BlockCacheIndexBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_index_bytes_insert",
Help: "",
}, labels),
BlockCacheFilterMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_miss",
Help: "",
}, labels),
BlockCacheFilterHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_hit",
Help: "",
}, labels),
BlockCacheFilterBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_filter_bytes_insert",
Help: "",
}, labels),
BlockCacheDataMiss: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_miss",
Help: "",
}, labels),
BlockCacheDataHit: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_hit",
Help: "",
}, labels),
BlockCacheDataBytesInsert: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "detailed_cache",
Name: "block_cache_data_bytes_insert",
Help: "",
}, labels),
// Latency
DBGetMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p50",
Help: "",
}, labels),
DBGetMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p95",
Help: "",
}, labels),
DBGetMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p99",
Help: "",
}, labels),
DBGetMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_p100",
Help: "",
}, labels),
DBGetMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_get_micros_count",
Help: "",
}, labels),
DBWriteMicrosP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p50",
Help: "",
}, labels),
DBWriteMicrosP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p95",
Help: "",
}, labels),
DBWriteMicrosP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p99",
Help: "",
}, labels),
DBWriteMicrosP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_p100",
Help: "",
}, labels),
DBWriteMicrosCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "latency",
Name: "db_write_micros_count",
Help: "",
}, labels),
// Write Stall
StallMicros: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "stall_micros",
Help: "Writer has to wait for compaction or flush to finish.",
}, labels),
DBWriteStallP50: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p50",
Help: "",
}, labels),
DBWriteStallP95: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p95",
Help: "",
}, labels),
DBWriteStallP99: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p99",
Help: "",
}, labels),
DBWriteStallP100: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_p100",
Help: "",
}, labels),
DBWriteStallCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_count",
Help: "",
}, labels),
DBWriteStallSum: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "stall",
Name: "db_write_stall_sum",
Help: "",
}, labels),
// Bloom Filter
BloomFilterUseful: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_useful",
Help: "number of times bloom filter has avoided file reads, i.e., negatives.",
}, labels),
BloomFilterFullPositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_positive",
Help: "number of times bloom FullFilter has not avoided the reads.",
}, labels),
BloomFilterFullTruePositive: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "filter",
Name: "bloom_filter_full_true_positive",
Help: "number of times bloom FullFilter has not avoided the reads and data actually exist.",
}, labels),
// LSM Tree Stats
LastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_bytes",
Help: "",
}, labels),
LastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "last_level_read_count",
Help: "",
}, labels),
NonLastLevelReadBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_bytes",
Help: "",
}, labels),
NonLastLevelReadCount: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "non_last_level_read_count",
Help: "",
}, labels),
GetHitL0: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l0",
Help: "number of Get() queries served by L0",
}, labels),
GetHitL1: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l1",
Help: "number of Get() queries served by L1",
}, labels),
GetHitL2AndUp: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: "rocksdb",
Subsystem: "lsm",
Name: "get_hit_l2_and_up",
Help: "number of Get() queries served by L2 and up",
}, labels),
}
}
// report reports metrics to prometheus based on rocksdb props and stats
func (m *Metrics) report(props *properties, stats *stats) {
// Keys
m.NumberKeysWritten.Set(float64(stats.NumberKeysWritten))
m.NumberKeysRead.Set(float64(stats.NumberKeysRead))
m.NumberKeysUpdated.Set(float64(stats.NumberKeysUpdated))
m.EstimateNumKeys.Set(float64(props.EstimateNumKeys))
// Files
m.NumberFileOpens.Set(float64(stats.NumberFileOpens))
m.NumberFileErrors.Set(float64(stats.NumberFileErrors))
// Memory
m.BlockCacheUsage.Set(float64(props.BlockCacheUsage))
m.EstimateTableReadersMem.Set(float64(props.EstimateTableReadersMem))
m.CurSizeAllMemTables.Set(float64(props.CurSizeAllMemTables))
m.BlockCachePinnedUsage.Set(float64(props.BlockCachePinnedUsage))
// Cache
m.BlockCacheMiss.Set(float64(stats.BlockCacheMiss))
m.BlockCacheHit.Set(float64(stats.BlockCacheHit))
m.BlockCacheAdd.Set(float64(stats.BlockCacheAdd))
m.BlockCacheAddFailures.Set(float64(stats.BlockCacheAddFailures))
// Detailed Cache
m.BlockCacheIndexMiss.Set(float64(stats.BlockCacheIndexMiss))
m.BlockCacheIndexHit.Set(float64(stats.BlockCacheIndexHit))
m.BlockCacheIndexBytesInsert.Set(float64(stats.BlockCacheIndexBytesInsert))
m.BlockCacheFilterMiss.Set(float64(stats.BlockCacheFilterMiss))
m.BlockCacheFilterHit.Set(float64(stats.BlockCacheFilterHit))
m.BlockCacheFilterBytesInsert.Set(float64(stats.BlockCacheFilterBytesInsert))
m.BlockCacheDataMiss.Set(float64(stats.BlockCacheDataMiss))
m.BlockCacheDataHit.Set(float64(stats.BlockCacheDataHit))
m.BlockCacheDataBytesInsert.Set(float64(stats.BlockCacheDataBytesInsert))
// Latency
m.DBGetMicrosP50.Set(stats.DBGetMicros.P50)
m.DBGetMicrosP95.Set(stats.DBGetMicros.P95)
m.DBGetMicrosP99.Set(stats.DBGetMicros.P99)
m.DBGetMicrosP100.Set(stats.DBGetMicros.P100)
m.DBGetMicrosCount.Set(stats.DBGetMicros.Count)
m.DBWriteMicrosP50.Set(stats.DBWriteMicros.P50)
m.DBWriteMicrosP95.Set(stats.DBWriteMicros.P95)
m.DBWriteMicrosP99.Set(stats.DBWriteMicros.P99)
m.DBWriteMicrosP100.Set(stats.DBWriteMicros.P100)
m.DBWriteMicrosCount.Set(stats.DBWriteMicros.Count)
// Write Stall
m.StallMicros.Set(float64(stats.StallMicros))
m.DBWriteStallP50.Set(stats.DBWriteStallHistogram.P50)
m.DBWriteStallP95.Set(stats.DBWriteStallHistogram.P95)
m.DBWriteStallP99.Set(stats.DBWriteStallHistogram.P99)
m.DBWriteStallP100.Set(stats.DBWriteStallHistogram.P100)
m.DBWriteStallCount.Set(stats.DBWriteStallHistogram.Count)
m.DBWriteStallSum.Set(stats.DBWriteStallHistogram.Sum)
// Bloom Filter
m.BloomFilterUseful.Set(float64(stats.BloomFilterUseful))
m.BloomFilterFullPositive.Set(float64(stats.BloomFilterFullPositive))
m.BloomFilterFullTruePositive.Set(float64(stats.BloomFilterFullTruePositive))
// LSM Tree Stats
m.LastLevelReadBytes.Set(float64(stats.LastLevelReadBytes))
m.LastLevelReadCount.Set(float64(stats.LastLevelReadCount))
m.NonLastLevelReadBytes.Set(float64(stats.NonLastLevelReadBytes))
m.NonLastLevelReadCount.Set(float64(stats.NonLastLevelReadCount))
m.GetHitL0.Set(float64(stats.GetHitL0))
m.GetHitL1.Set(float64(stats.GetHitL1))
m.GetHitL2AndUp.Set(float64(stats.GetHitL2AndUp))
}

18
cmd/kava/opendb/opendb.go Normal file
View File

@ -0,0 +1,18 @@
//go:build !rocksdb
// +build !rocksdb
package opendb
import (
"path/filepath"
"github.com/cosmos/cosmos-sdk/server/types"
dbm "github.com/tendermint/tm-db"
)
// OpenDB is a copy of default DBOpener function used by ethermint, see for details:
// https://github.com/evmos/ethermint/blob/07cf2bd2b1ce9bdb2e44ec42a39e7239292a14af/server/start.go#L647
func OpenDB(_ types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
return dbm.NewDB("application", backendType, dataDir)
}

View File

@ -0,0 +1,397 @@
//go:build rocksdb
// +build rocksdb
// Copyright 2023 Kava Labs, Inc.
// Copyright 2023 Cronos Labs, Inc.
//
// Derived from https://github.com/crypto-org-chain/cronos@496ce7e
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opendb
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/cosmos/cosmos-sdk/server/types"
"github.com/linxGnu/grocksdb"
"github.com/spf13/cast"
dbm "github.com/tendermint/tm-db"
)
var ErrUnexpectedConfiguration = errors.New("unexpected rocksdb configuration, rocksdb should have only one column family named default")
const (
// default tm-db block cache size for RocksDB
defaultBlockCacheSize = 1 << 30
defaultColumnFamilyName = "default"
enableMetricsOptName = "rocksdb.enable-metrics"
reportMetricsIntervalSecsOptName = "rocksdb.report-metrics-interval-secs"
defaultReportMetricsIntervalSecs = 15
maxOpenFilesDBOptName = "rocksdb.max-open-files"
maxFileOpeningThreadsDBOptName = "rocksdb.max-file-opening-threads"
tableCacheNumshardbitsDBOptName = "rocksdb.table_cache_numshardbits"
allowMMAPWritesDBOptName = "rocksdb.allow_mmap_writes"
allowMMAPReadsDBOptName = "rocksdb.allow_mmap_reads"
useFsyncDBOptName = "rocksdb.use_fsync"
useAdaptiveMutexDBOptName = "rocksdb.use_adaptive_mutex"
bytesPerSyncDBOptName = "rocksdb.bytes_per_sync"
maxBackgroundJobsDBOptName = "rocksdb.max-background-jobs"
writeBufferSizeCFOptName = "rocksdb.write-buffer-size"
numLevelsCFOptName = "rocksdb.num-levels"
maxWriteBufferNumberCFOptName = "rocksdb.max_write_buffer_number"
minWriteBufferNumberToMergeCFOptName = "rocksdb.min_write_buffer_number_to_merge"
maxBytesForLevelBaseCFOptName = "rocksdb.max_bytes_for_level_base"
maxBytesForLevelMultiplierCFOptName = "rocksdb.max_bytes_for_level_multiplier"
targetFileSizeBaseCFOptName = "rocksdb.target_file_size_base"
targetFileSizeMultiplierCFOptName = "rocksdb.target_file_size_multiplier"
level0FileNumCompactionTriggerCFOptName = "rocksdb.level0_file_num_compaction_trigger"
level0SlowdownWritesTriggerCFOptName = "rocksdb.level0_slowdown_writes_trigger"
blockCacheSizeBBTOOptName = "rocksdb.block_cache_size"
bitsPerKeyBBTOOptName = "rocksdb.bits_per_key"
blockSizeBBTOOptName = "rocksdb.block_size"
cacheIndexAndFilterBlocksBBTOOptName = "rocksdb.cache_index_and_filter_blocks"
pinL0FilterAndIndexBlocksInCacheBBTOOptName = "rocksdb.pin_l0_filter_and_index_blocks_in_cache"
formatVersionBBTOOptName = "rocksdb.format_version"
asyncIOReadOptName = "rocksdb.read-async-io"
)
func OpenDB(appOpts types.AppOptions, home string, backendType dbm.BackendType) (dbm.DB, error) {
dataDir := filepath.Join(home, "data")
if backendType == dbm.RocksDBBackend {
return openRocksdb(filepath.Join(dataDir, "application.db"), appOpts)
}
return dbm.NewDB("application", backendType, dataDir)
}
// openRocksdb loads existing options, overrides some of them with appOpts and opens database
// option will be overridden only in case if it explicitly specified in appOpts
func openRocksdb(dir string, appOpts types.AppOptions) (dbm.DB, error) {
dbOpts, cfOpts, err := loadLatestOptions(dir)
if err != nil {
return nil, err
}
// customize rocksdb options
bbtoOpts := bbtoFromAppOpts(appOpts)
dbOpts.SetBlockBasedTableFactory(bbtoOpts)
cfOpts.SetBlockBasedTableFactory(bbtoOpts)
dbOpts = overrideDBOpts(dbOpts, appOpts)
cfOpts = overrideCFOpts(cfOpts, appOpts)
readOpts := readOptsFromAppOpts(appOpts)
enableMetrics := cast.ToBool(appOpts.Get(enableMetricsOptName))
reportMetricsIntervalSecs := cast.ToInt64(appOpts.Get(reportMetricsIntervalSecsOptName))
if reportMetricsIntervalSecs == 0 {
reportMetricsIntervalSecs = defaultReportMetricsIntervalSecs
}
return newRocksDBWithOptions("application", dir, dbOpts, cfOpts, readOpts, enableMetrics, reportMetricsIntervalSecs)
}
// loadLatestOptions loads and returns database and column family options
// if options file not found, it means database isn't created yet, in such case default tm-db options will be returned
// if database exists it should have only one column family named default
func loadLatestOptions(dir string) (*grocksdb.Options, *grocksdb.Options, error) {
latestOpts, err := grocksdb.LoadLatestOptions(dir, grocksdb.NewDefaultEnv(), true, grocksdb.NewLRUCache(defaultBlockCacheSize))
if err != nil && strings.HasPrefix(err.Error(), "NotFound: ") {
return newDefaultOptions(), newDefaultOptions(), nil
}
if err != nil {
return nil, nil, err
}
cfNames := latestOpts.ColumnFamilyNames()
cfOpts := latestOpts.ColumnFamilyOpts()
// db should have only one column family named default
ok := len(cfNames) == 1 && cfNames[0] == defaultColumnFamilyName
if !ok {
return nil, nil, ErrUnexpectedConfiguration
}
// return db and cf opts
return latestOpts.Options(), &cfOpts[0], nil
}
// overrideDBOpts merges dbOpts and appOpts, appOpts takes precedence
func overrideDBOpts(dbOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
maxOpenFiles := appOpts.Get(maxOpenFilesDBOptName)
if maxOpenFiles != nil {
dbOpts.SetMaxOpenFiles(cast.ToInt(maxOpenFiles))
}
maxFileOpeningThreads := appOpts.Get(maxFileOpeningThreadsDBOptName)
if maxFileOpeningThreads != nil {
dbOpts.SetMaxFileOpeningThreads(cast.ToInt(maxFileOpeningThreads))
}
tableCacheNumshardbits := appOpts.Get(tableCacheNumshardbitsDBOptName)
if tableCacheNumshardbits != nil {
dbOpts.SetTableCacheNumshardbits(cast.ToInt(tableCacheNumshardbits))
}
allowMMAPWrites := appOpts.Get(allowMMAPWritesDBOptName)
if allowMMAPWrites != nil {
dbOpts.SetAllowMmapWrites(cast.ToBool(allowMMAPWrites))
}
allowMMAPReads := appOpts.Get(allowMMAPReadsDBOptName)
if allowMMAPReads != nil {
dbOpts.SetAllowMmapReads(cast.ToBool(allowMMAPReads))
}
useFsync := appOpts.Get(useFsyncDBOptName)
if useFsync != nil {
dbOpts.SetUseFsync(cast.ToBool(useFsync))
}
useAdaptiveMutex := appOpts.Get(useAdaptiveMutexDBOptName)
if useAdaptiveMutex != nil {
dbOpts.SetUseAdaptiveMutex(cast.ToBool(useAdaptiveMutex))
}
bytesPerSync := appOpts.Get(bytesPerSyncDBOptName)
if bytesPerSync != nil {
dbOpts.SetBytesPerSync(cast.ToUint64(bytesPerSync))
}
maxBackgroundJobs := appOpts.Get(maxBackgroundJobsDBOptName)
if maxBackgroundJobs != nil {
dbOpts.SetMaxBackgroundJobs(cast.ToInt(maxBackgroundJobs))
}
return dbOpts
}
// overrideCFOpts merges cfOpts and appOpts, appOpts takes precedence
func overrideCFOpts(cfOpts *grocksdb.Options, appOpts types.AppOptions) *grocksdb.Options {
writeBufferSize := appOpts.Get(writeBufferSizeCFOptName)
if writeBufferSize != nil {
cfOpts.SetWriteBufferSize(cast.ToUint64(writeBufferSize))
}
numLevels := appOpts.Get(numLevelsCFOptName)
if numLevels != nil {
cfOpts.SetNumLevels(cast.ToInt(numLevels))
}
maxWriteBufferNumber := appOpts.Get(maxWriteBufferNumberCFOptName)
if maxWriteBufferNumber != nil {
cfOpts.SetMaxWriteBufferNumber(cast.ToInt(maxWriteBufferNumber))
}
minWriteBufferNumberToMerge := appOpts.Get(minWriteBufferNumberToMergeCFOptName)
if minWriteBufferNumberToMerge != nil {
cfOpts.SetMinWriteBufferNumberToMerge(cast.ToInt(minWriteBufferNumberToMerge))
}
maxBytesForLevelBase := appOpts.Get(maxBytesForLevelBaseCFOptName)
if maxBytesForLevelBase != nil {
cfOpts.SetMaxBytesForLevelBase(cast.ToUint64(maxBytesForLevelBase))
}
maxBytesForLevelMultiplier := appOpts.Get(maxBytesForLevelMultiplierCFOptName)
if maxBytesForLevelMultiplier != nil {
cfOpts.SetMaxBytesForLevelMultiplier(cast.ToFloat64(maxBytesForLevelMultiplier))
}
targetFileSizeBase := appOpts.Get(targetFileSizeBaseCFOptName)
if targetFileSizeBase != nil {
cfOpts.SetTargetFileSizeBase(cast.ToUint64(targetFileSizeBase))
}
targetFileSizeMultiplier := appOpts.Get(targetFileSizeMultiplierCFOptName)
if targetFileSizeMultiplier != nil {
cfOpts.SetTargetFileSizeMultiplier(cast.ToInt(targetFileSizeMultiplier))
}
level0FileNumCompactionTrigger := appOpts.Get(level0FileNumCompactionTriggerCFOptName)
if level0FileNumCompactionTrigger != nil {
cfOpts.SetLevel0FileNumCompactionTrigger(cast.ToInt(level0FileNumCompactionTrigger))
}
level0SlowdownWritesTrigger := appOpts.Get(level0SlowdownWritesTriggerCFOptName)
if level0SlowdownWritesTrigger != nil {
cfOpts.SetLevel0SlowdownWritesTrigger(cast.ToInt(level0SlowdownWritesTrigger))
}
return cfOpts
}
func readOptsFromAppOpts(appOpts types.AppOptions) *grocksdb.ReadOptions {
ro := grocksdb.NewDefaultReadOptions()
asyncIO := appOpts.Get(asyncIOReadOptName)
if asyncIO != nil {
ro.SetAsyncIO(cast.ToBool(asyncIO))
}
return ro
}
func bbtoFromAppOpts(appOpts types.AppOptions) *grocksdb.BlockBasedTableOptions {
bbto := defaultBBTO()
blockCacheSize := appOpts.Get(blockCacheSizeBBTOOptName)
if blockCacheSize != nil {
cache := grocksdb.NewLRUCache(cast.ToUint64(blockCacheSize))
bbto.SetBlockCache(cache)
}
bitsPerKey := appOpts.Get(bitsPerKeyBBTOOptName)
if bitsPerKey != nil {
filter := grocksdb.NewBloomFilter(cast.ToFloat64(bitsPerKey))
bbto.SetFilterPolicy(filter)
}
blockSize := appOpts.Get(blockSizeBBTOOptName)
if blockSize != nil {
bbto.SetBlockSize(cast.ToInt(blockSize))
}
cacheIndexAndFilterBlocks := appOpts.Get(cacheIndexAndFilterBlocksBBTOOptName)
if cacheIndexAndFilterBlocks != nil {
bbto.SetCacheIndexAndFilterBlocks(cast.ToBool(cacheIndexAndFilterBlocks))
}
pinL0FilterAndIndexBlocksInCache := appOpts.Get(pinL0FilterAndIndexBlocksInCacheBBTOOptName)
if pinL0FilterAndIndexBlocksInCache != nil {
bbto.SetPinL0FilterAndIndexBlocksInCache(cast.ToBool(pinL0FilterAndIndexBlocksInCache))
}
formatVersion := appOpts.Get(formatVersionBBTOOptName)
if formatVersion != nil {
bbto.SetFormatVersion(cast.ToInt(formatVersion))
}
return bbto
}
// newRocksDBWithOptions opens rocksdb with provided database and column family options
// newRocksDBWithOptions expects that db has only one column family named default
func newRocksDBWithOptions(
name string,
dir string,
dbOpts *grocksdb.Options,
cfOpts *grocksdb.Options,
readOpts *grocksdb.ReadOptions,
enableMetrics bool,
reportMetricsIntervalSecs int64,
) (*dbm.RocksDB, error) {
dbPath := filepath.Join(dir, name+".db")
// Ensure path exists
if err := os.MkdirAll(dbPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create db path: %w", err)
}
// EnableStatistics adds overhead so shouldn't be enabled in production
if enableMetrics {
dbOpts.EnableStatistics()
}
db, _, err := grocksdb.OpenDbColumnFamilies(dbOpts, dbPath, []string{defaultColumnFamilyName}, []*grocksdb.Options{cfOpts})
if err != nil {
return nil, err
}
if enableMetrics {
registerMetrics()
go reportMetrics(db, time.Second*time.Duration(reportMetricsIntervalSecs))
}
wo := grocksdb.NewDefaultWriteOptions()
woSync := grocksdb.NewDefaultWriteOptions()
woSync.SetSync(true)
return dbm.NewRocksDBWithRawDB(db, readOpts, wo, woSync), nil
}
// newDefaultOptions returns default tm-db options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func newDefaultOptions() *grocksdb.Options {
// default rocksdb option, good enough for most cases, including heavy workloads.
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
// compression: snappy as default, need to -lsnappy to enable.
bbto := defaultBBTO()
opts := grocksdb.NewDefaultOptions()
opts.SetBlockBasedTableFactory(bbto)
// SetMaxOpenFiles to 4096 seems to provide a reliable performance boost
opts.SetMaxOpenFiles(4096)
opts.SetCreateIfMissing(true)
opts.IncreaseParallelism(runtime.NumCPU())
// 1.5GB maximum memory use for writebuffer.
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
return opts
}
// defaultBBTO returns default tm-db bbto options for RocksDB, see for details:
// https://github.com/Kava-Labs/tm-db/blob/94ff76d31724965f8883cddebabe91e0d01bc03f/rocksdb.go#L30
func defaultBBTO() *grocksdb.BlockBasedTableOptions {
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
bbto.SetBlockCache(grocksdb.NewLRUCache(defaultBlockCacheSize))
bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10))
return bbto
}
// reportMetrics periodically requests stats from rocksdb and reports to prometheus
// NOTE: should be launched as a goroutine
func reportMetrics(db *grocksdb.DB, interval time.Duration) {
ticker := time.NewTicker(interval)
for {
select {
case <-ticker.C:
props, stats, err := getPropsAndStats(db)
if err != nil {
continue
}
rocksdbMetrics.report(props, stats)
}
}
}
// getPropsAndStats gets statistics from rocksdb
func getPropsAndStats(db *grocksdb.DB) (*properties, *stats, error) {
propsLoader := newPropsLoader(db)
props, err := propsLoader.load()
if err != nil {
return nil, nil, err
}
statMap, err := parseSerializedStats(props.OptionsStatistics)
if err != nil {
return nil, nil, err
}
statLoader := newStatLoader(statMap)
stats, err := statLoader.load()
if err != nil {
return nil, nil, err
}
return props, stats, nil
}

View File

@ -0,0 +1,384 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"os"
"path/filepath"
"testing"
"github.com/linxGnu/grocksdb"
"github.com/stretchr/testify/require"
)
type mockAppOptions struct {
opts map[string]interface{}
}
func newMockAppOptions(opts map[string]interface{}) *mockAppOptions {
return &mockAppOptions{
opts: opts,
}
}
func (m *mockAppOptions) Get(key string) interface{} {
return m.opts[key]
}
func TestOpenRocksdb(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
writeBufferSizeCFOptName: 999_999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := openRocksdb(dir, tc.mockAppOptions)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
mockAppOpts := newMockAppOptions(map[string]interface{}{})
db, err := openRocksdb(dir, mockAppOpts)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestLoadLatestOptions(t *testing.T) {
t.Run("db already exists", func(t *testing.T) {
defaultOpts := newDefaultOptions()
const testCasesNum = 3
dbOptsList := make([]*grocksdb.Options, testCasesNum)
cfOptsList := make([]*grocksdb.Options, testCasesNum)
dbOptsList[0] = newDefaultOptions()
cfOptsList[0] = newDefaultOptions()
dbOptsList[1] = newDefaultOptions()
dbOptsList[1].SetMaxOpenFiles(999)
cfOptsList[1] = newDefaultOptions()
cfOptsList[1].SetWriteBufferSize(999_999)
dbOptsList[2] = newDefaultOptions()
dbOptsList[2].SetMaxOpenFiles(999)
dbOptsList[2].SetMaxFileOpeningThreads(9)
cfOptsList[2] = newDefaultOptions()
cfOptsList[2].SetWriteBufferSize(999_999)
cfOptsList[2].SetNumLevels(9)
for _, tc := range []struct {
desc string
dbOpts *grocksdb.Options
cfOpts *grocksdb.Options
maxOpenFiles int
maxFileOpeningThreads int
writeBufferSize uint64
numLevels int
}{
{
desc: "default options",
dbOpts: dbOptsList[0],
cfOpts: cfOptsList[0],
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 2 options",
dbOpts: dbOptsList[1],
cfOpts: cfOptsList[1],
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "change 4 options",
dbOpts: dbOptsList[2],
cfOpts: cfOptsList[2],
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
db, err := newRocksDBWithOptions(name, dir, tc.dbOpts, tc.cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
})
t.Run("db doesn't exist yet", func(t *testing.T) {
defaultOpts := newDefaultOptions()
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts, cfOpts, err := loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, defaultOpts.GetMaxOpenFiles(), dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, defaultOpts.GetWriteBufferSize(), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), cfOpts.GetNumLevels())
})
}
func TestOverrideDBOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
maxOpenFiles int
maxFileOpeningThreads int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-open-files",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: defaultOpts.GetMaxFileOpeningThreads(),
},
{
desc: "override max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: defaultOpts.GetMaxOpenFiles(),
maxFileOpeningThreads: 9,
},
{
desc: "override max-open-files and max-file-opening-threads",
mockAppOptions: newMockAppOptions(map[string]interface{}{
maxOpenFilesDBOptName: 999,
maxFileOpeningThreadsDBOptName: 9,
}),
maxOpenFiles: 999,
maxFileOpeningThreads: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
dbOpts := newDefaultOptions()
dbOpts = overrideDBOpts(dbOpts, tc.mockAppOptions)
require.Equal(t, tc.maxOpenFiles, dbOpts.GetMaxOpenFiles())
require.Equal(t, tc.maxFileOpeningThreads, dbOpts.GetMaxFileOpeningThreads())
})
}
}
func TestOverrideCFOpts(t *testing.T) {
defaultOpts := newDefaultOptions()
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
writeBufferSize uint64
numLevels int
}{
{
desc: "override nothing",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override write-buffer-size",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
}),
writeBufferSize: 999_999,
numLevels: defaultOpts.GetNumLevels(),
},
{
desc: "override num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
numLevelsCFOptName: 9,
}),
writeBufferSize: defaultOpts.GetWriteBufferSize(),
numLevels: 9,
},
{
desc: "override write-buffer-size and num-levels",
mockAppOptions: newMockAppOptions(map[string]interface{}{
writeBufferSizeCFOptName: 999_999,
numLevelsCFOptName: 9,
}),
writeBufferSize: 999_999,
numLevels: 9,
},
} {
t.Run(tc.desc, func(t *testing.T) {
cfOpts := newDefaultOptions()
cfOpts = overrideCFOpts(cfOpts, tc.mockAppOptions)
require.Equal(t, tc.writeBufferSize, cfOpts.GetWriteBufferSize())
require.Equal(t, tc.numLevels, cfOpts.GetNumLevels())
})
}
}
func TestReadOptsFromAppOpts(t *testing.T) {
for _, tc := range []struct {
desc string
mockAppOptions *mockAppOptions
asyncIO bool
}{
{
desc: "default options",
mockAppOptions: newMockAppOptions(map[string]interface{}{}),
asyncIO: false,
},
{
desc: "set asyncIO option to true",
mockAppOptions: newMockAppOptions(map[string]interface{}{
asyncIOReadOptName: true,
}),
asyncIO: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
readOpts := readOptsFromAppOpts(tc.mockAppOptions)
require.Equal(t, tc.asyncIO, readOpts.IsAsyncIO())
})
}
}
func TestNewRocksDBWithOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
name := "application"
dir, err := os.MkdirTemp("", "rocksdb")
require.NoError(t, err)
defer func() {
err := os.RemoveAll(dir)
require.NoError(t, err)
}()
dbOpts := newDefaultOptions()
dbOpts.SetMaxOpenFiles(999)
cfOpts := newDefaultOptions()
cfOpts.SetWriteBufferSize(999_999)
db, err := newRocksDBWithOptions(name, dir, dbOpts, cfOpts, grocksdb.NewDefaultReadOptions(), true, defaultReportMetricsIntervalSecs)
require.NoError(t, err)
require.NoError(t, db.Close())
dbOpts, cfOpts, err = loadLatestOptions(filepath.Join(dir, "application.db"))
require.NoError(t, err)
require.Equal(t, 999, dbOpts.GetMaxOpenFiles())
require.Equal(t, defaultOpts.GetMaxFileOpeningThreads(), dbOpts.GetMaxFileOpeningThreads())
require.Equal(t, uint64(999_999), cfOpts.GetWriteBufferSize())
require.Equal(t, defaultOpts.GetNumLevels(), dbOpts.GetNumLevels())
}
func TestNewDefaultOptions(t *testing.T) {
defaultOpts := newDefaultOptions()
maxOpenFiles := defaultOpts.GetMaxOpenFiles()
require.Equal(t, 4096, maxOpenFiles)
}

View File

@ -0,0 +1,87 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
type propsGetter interface {
GetProperty(propName string) (value string)
GetIntProperty(propName string) (value uint64, success bool)
}
type propsLoader struct {
db propsGetter
errorMsgs []string
}
func newPropsLoader(db propsGetter) *propsLoader {
return &propsLoader{
db: db,
errorMsgs: make([]string, 0),
}
}
func (l *propsLoader) load() (*properties, error) {
props := &properties{
BaseLevel: l.getIntProperty("rocksdb.base-level"),
BlockCacheCapacity: l.getIntProperty("rocksdb.block-cache-capacity"),
BlockCachePinnedUsage: l.getIntProperty("rocksdb.block-cache-pinned-usage"),
BlockCacheUsage: l.getIntProperty("rocksdb.block-cache-usage"),
CurSizeActiveMemTable: l.getIntProperty("rocksdb.cur-size-active-mem-table"),
CurSizeAllMemTables: l.getIntProperty("rocksdb.cur-size-all-mem-tables"),
EstimateLiveDataSize: l.getIntProperty("rocksdb.estimate-live-data-size"),
EstimateNumKeys: l.getIntProperty("rocksdb.estimate-num-keys"),
EstimateTableReadersMem: l.getIntProperty("rocksdb.estimate-table-readers-mem"),
LiveSSTFilesSize: l.getIntProperty("rocksdb.live-sst-files-size"),
SizeAllMemTables: l.getIntProperty("rocksdb.size-all-mem-tables"),
OptionsStatistics: l.getProperty("rocksdb.options-statistics"),
}
if len(l.errorMsgs) != 0 {
errorMsg := strings.Join(l.errorMsgs, ";")
return nil, errors.New(errorMsg)
}
return props, nil
}
func (l *propsLoader) getProperty(propName string) string {
value := l.db.GetProperty(propName)
if value == "" {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("property %v is empty", propName))
return ""
}
return value
}
func (l *propsLoader) getIntProperty(propName string) uint64 {
value, ok := l.db.GetIntProperty(propName)
if !ok {
l.errorMsgs = append(l.errorMsgs, fmt.Sprintf("can't get %v int property", propName))
return 0
}
return value
}
type properties struct {
BaseLevel uint64
BlockCacheCapacity uint64
BlockCachePinnedUsage uint64
BlockCacheUsage uint64
CurSizeActiveMemTable uint64
CurSizeAllMemTables uint64
EstimateLiveDataSize uint64
EstimateNumKeys uint64
EstimateTableReadersMem uint64
LiveSSTFilesSize uint64
SizeAllMemTables uint64
OptionsStatistics string
}

View File

@ -0,0 +1,112 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
type mockPropsGetter struct {
props map[string]string
intProps map[string]uint64
}
func newMockPropsGetter(
props map[string]string,
intProps map[string]uint64,
) *mockPropsGetter {
return &mockPropsGetter{
props: props,
intProps: intProps,
}
}
func (m *mockPropsGetter) GetProperty(propName string) string {
return m.props[propName]
}
func (m *mockPropsGetter) GetIntProperty(propName string) (uint64, bool) {
prop, ok := m.intProps[propName]
return prop, ok
}
func TestPropsLoader(t *testing.T) {
defaultProps := map[string]string{
"rocksdb.options-statistics": "1",
}
defaultIntProps := map[string]uint64{
"rocksdb.base-level": 1,
"rocksdb.block-cache-capacity": 2,
"rocksdb.block-cache-pinned-usage": 3,
"rocksdb.block-cache-usage": 4,
"rocksdb.cur-size-active-mem-table": 5,
"rocksdb.cur-size-all-mem-tables": 6,
"rocksdb.estimate-live-data-size": 7,
"rocksdb.estimate-num-keys": 8,
"rocksdb.estimate-table-readers-mem": 9,
"rocksdb.live-sst-files-size": 10,
"rocksdb.size-all-mem-tables": 11,
}
missingProps := make(map[string]string)
missingIntProps := make(map[string]uint64)
defaultExpectedProps := properties{
BaseLevel: 1,
BlockCacheCapacity: 2,
BlockCachePinnedUsage: 3,
BlockCacheUsage: 4,
CurSizeActiveMemTable: 5,
CurSizeAllMemTables: 6,
EstimateLiveDataSize: 7,
EstimateNumKeys: 8,
EstimateTableReadersMem: 9,
LiveSSTFilesSize: 10,
SizeAllMemTables: 11,
OptionsStatistics: "1",
}
for _, tc := range []struct {
desc string
props map[string]string
intProps map[string]uint64
expectedProps *properties
success bool
}{
{
desc: "success case",
props: defaultProps,
intProps: defaultIntProps,
expectedProps: &defaultExpectedProps,
success: true,
},
{
desc: "missing props",
props: missingProps,
intProps: defaultIntProps,
expectedProps: nil,
success: false,
},
{
desc: "missing integer props",
props: defaultProps,
intProps: missingIntProps,
expectedProps: nil,
success: false,
},
} {
t.Run(tc.desc, func(t *testing.T) {
mockPropsGetter := newMockPropsGetter(tc.props, tc.intProps)
propsLoader := newPropsLoader(mockPropsGetter)
actualProps, err := propsLoader.load()
if tc.success {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.Equal(t, tc.expectedProps, actualProps)
})
}
}

View File

@ -0,0 +1,111 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strings"
"errors"
)
// stat represents one line from rocksdb statistics data, stat may have one or more properties
// examples:
// - rocksdb.block.cache.miss COUNT : 5
// - rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
// `rocksdb.compaction.times.micros` is name of stat, P50, COUNT, SUM, etc... are props of stat
type stat struct {
name string
props map[string]string
}
// parseSerializedStats parses serialisedStats into map of stat objects
// example of serializedStats:
// rocksdb.block.cache.miss COUNT : 5
// rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
func parseSerializedStats(serializedStats string) (map[string]*stat, error) {
stats := make(map[string]*stat, 0)
serializedStatList := strings.Split(serializedStats, "\n")
if len(serializedStatList) == 0 {
return nil, errors.New("serializedStats is empty")
}
serializedStatList = serializedStatList[:len(serializedStatList)-1]
// iterate over stats line by line
for _, serializedStat := range serializedStatList {
stat, err := parseSerializedStat(serializedStat)
if err != nil {
return nil, err
}
stats[stat.name] = stat
}
return stats, nil
}
// parseSerializedStat parses serialisedStat into stat object
// example of serializedStat:
// rocksdb.block.cache.miss COUNT : 5
func parseSerializedStat(serializedStat string) (*stat, error) {
tokens := strings.Split(serializedStat, " ")
tokensNum := len(tokens)
if err := validateTokens(tokens); err != nil {
return nil, fmt.Errorf("tokens are invalid: %v", err)
}
props := make(map[string]string)
for idx := 1; idx < tokensNum; idx += 3 {
// never should happen, but double check to avoid unexpected panic
if idx+2 >= tokensNum {
break
}
key := tokens[idx]
sep := tokens[idx+1]
value := tokens[idx+2]
if err := validateStatProperty(key, value, sep); err != nil {
return nil, fmt.Errorf("invalid stat property: %v", err)
}
props[key] = value
}
return &stat{
name: tokens[0],
props: props,
}, nil
}
// validateTokens validates that tokens contains name + N triples (key, sep, value)
func validateTokens(tokens []string) error {
tokensNum := len(tokens)
if tokensNum < 4 {
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
}
if (tokensNum-1)%3 != 0 {
return fmt.Errorf("invalid number of tokens: %v, tokens: %v", tokensNum, tokens)
}
if tokens[0] == "" {
return fmt.Errorf("stat name shouldn't be empty")
}
return nil
}
// validateStatProperty validates that key and value are divided by separator and aren't empty
func validateStatProperty(key, value, sep string) error {
if key == "" {
return fmt.Errorf("key shouldn't be empty")
}
if sep != ":" {
return fmt.Errorf("separator should be :")
}
if value == "" {
return fmt.Errorf("value shouldn't be empty")
}
return nil
}

View File

@ -0,0 +1,208 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestParseSerializedStats(t *testing.T) {
defaultSerializedStats := `rocksdb.block.cache.miss COUNT : 1
rocksdb.block.cache.hit COUNT : 2
rocksdb.block.cache.add COUNT : 3
rocksdb.block.cache.add.failures COUNT : 4
rocksdb.compaction.times.micros P50 : 1 P95 : 2 P99 : 3 P100 : 4 COUNT : 5 SUM : 6
rocksdb.compaction.times.cpu_micros P50 : 7 P95 : 8 P99 : 9 P100 : 10 COUNT : 11 SUM : 12
`
defaultExpectedStatMap := map[string]*stat{
"rocksdb.block.cache.miss": {
name: "rocksdb.block.cache.miss",
props: map[string]string{
"COUNT": "1",
},
},
"rocksdb.block.cache.hit": {
name: "rocksdb.block.cache.hit",
props: map[string]string{
"COUNT": "2",
},
},
"rocksdb.block.cache.add": {
name: "rocksdb.block.cache.add",
props: map[string]string{
"COUNT": "3",
},
},
"rocksdb.block.cache.add.failures": {
name: "rocksdb.block.cache.add.failures",
props: map[string]string{
"COUNT": "4",
},
},
"rocksdb.compaction.times.micros": {
name: "rocksdb.compaction.times.micros",
props: map[string]string{
"P50": "1",
"P95": "2",
"P99": "3",
"P100": "4",
"COUNT": "5",
"SUM": "6",
},
},
"rocksdb.compaction.times.cpu_micros": {
name: "rocksdb.compaction.times.cpu_micros",
props: map[string]string{
"P50": "7",
"P95": "8",
"P99": "9",
"P100": "10",
"COUNT": "11",
"SUM": "12",
},
},
}
for _, tc := range []struct {
desc string
serializedStats string
expectedStatMap map[string]*stat
errMsg string
}{
{
desc: "success case",
serializedStats: defaultSerializedStats,
expectedStatMap: defaultExpectedStatMap,
errMsg: "",
},
{
desc: "missing value #1",
serializedStats: `rocksdb.block.cache.miss COUNT :
`,
expectedStatMap: nil,
errMsg: "invalid number of tokens",
},
{
desc: "missing value #2",
serializedStats: `rocksdb.compaction.times.micros P50 : 1 P95 :
`,
expectedStatMap: nil,
errMsg: "invalid number of tokens",
},
{
desc: "missing stat name",
serializedStats: ` COUNT : 1
`,
expectedStatMap: nil,
errMsg: "stat name shouldn't be empty",
},
{
desc: "empty stat",
serializedStats: ``,
expectedStatMap: make(map[string]*stat),
errMsg: "",
},
} {
t.Run(tc.desc, func(t *testing.T) {
actualStatMap, err := parseSerializedStats(tc.serializedStats)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
require.Equal(t, tc.expectedStatMap, actualStatMap)
})
}
}
func TestValidateTokens(t *testing.T) {
for _, tc := range []struct {
desc string
tokens []string
errMsg string
}{
{
desc: "success case",
tokens: []string{"name", "key", ":", "value"},
errMsg: "",
},
{
desc: "missing value #1",
tokens: []string{"name", "key", ":"},
errMsg: "invalid number of tokens",
},
{
desc: "missing value #2",
tokens: []string{"name", "key", ":", "value", "key2", ":"},
errMsg: "invalid number of tokens",
},
{
desc: "empty stat name",
tokens: []string{"", "key", ":", "value"},
errMsg: "stat name shouldn't be empty",
},
} {
t.Run(tc.desc, func(t *testing.T) {
err := validateTokens(tc.tokens)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
})
}
}
func TestValidateStatProperty(t *testing.T) {
for _, tc := range []struct {
desc string
key string
value string
sep string
errMsg string
}{
{
desc: "success case",
key: "key",
value: "value",
sep: ":",
errMsg: "",
},
{
desc: "missing key",
key: "",
value: "value",
sep: ":",
errMsg: "key shouldn't be empty",
},
{
desc: "missing value",
key: "key",
value: "",
sep: ":",
errMsg: "value shouldn't be empty",
},
{
desc: "invalid separator",
key: "key",
value: "value",
sep: "#",
errMsg: "separator should be :",
},
} {
t.Run(tc.desc, func(t *testing.T) {
err := validateStatProperty(tc.key, tc.value, tc.sep)
if tc.errMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.errMsg)
}
})
}
}

View File

@ -0,0 +1,284 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"fmt"
"strconv"
)
const (
sum = "SUM"
count = "COUNT"
p50 = "P50"
p95 = "P95"
p99 = "P99"
p100 = "P100"
)
type statLoader struct {
// statMap contains map of stat objects returned by parseSerializedStats function
// example of stats:
// #1: rocksdb.block.cache.miss COUNT : 5
// #2: rocksdb.compaction.times.micros P50 : 21112 P95 : 21112 P99 : 21112 P100 : 21112 COUNT : 1 SUM : 21112
// #1 case will be cast into int64
// #2 case will be cast into float64Histogram
statMap map[string]*stat
// NOTE: some methods accumulate errors instead of returning them, these methods are private and not intended to use outside
errors []error
}
func newStatLoader(statMap map[string]*stat) *statLoader {
return &statLoader{
statMap: statMap,
errors: make([]error, 0),
}
}
type stats struct {
NumberKeysWritten int64
NumberKeysRead int64
NumberKeysUpdated int64
// total block cache misses
// BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS +
// BLOCK_CACHE_FILTER_MISS +
// BLOCK_CACHE_DATA_MISS;
// BLOCK_CACHE_INDEX_MISS: # of times cache miss when accessing index block from block cache.
// BLOCK_CACHE_FILTER_MISS: # of times cache miss when accessing filter block from block cache.
// BLOCK_CACHE_DATA_MISS: # of times cache miss when accessing data block from block cache.
BlockCacheMiss int64
// total block cache hit
// BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT +
// BLOCK_CACHE_FILTER_HIT +
// BLOCK_CACHE_DATA_HIT;
// BLOCK_CACHE_INDEX_HIT: # of times cache hit when accessing index block from block cache.
// BLOCK_CACHE_FILTER_HIT: # of times cache hit when accessing filter block from block cache.
// BLOCK_CACHE_DATA_HIT: # of times cache hit when accessing data block from block cache.
BlockCacheHit int64
// # of blocks added to block cache.
BlockCacheAdd int64
// # of failures when adding blocks to block cache.
BlockCacheAddFailures int64
BlockCacheIndexMiss int64
BlockCacheIndexHit int64
BlockCacheIndexBytesInsert int64
BlockCacheFilterMiss int64
BlockCacheFilterHit int64
BlockCacheFilterBytesInsert int64
BlockCacheDataMiss int64
BlockCacheDataHit int64
BlockCacheDataBytesInsert int64
CompactReadBytes int64 // Bytes read during compaction
CompactWriteBytes int64 // Bytes written during compaction
CompactionTimesMicros *float64Histogram
CompactionTimesCPUMicros *float64Histogram
NumFilesInSingleCompaction *float64Histogram
// Read amplification statistics.
// Read amplification can be calculated using this formula
// (READ_AMP_TOTAL_READ_BYTES / READ_AMP_ESTIMATE_USEFUL_BYTES)
//
// REQUIRES: ReadOptions::read_amp_bytes_per_bit to be enabled
// TODO(yevhenii): seems not working?
ReadAmpEstimateUsefulBytes int64 // Estimate of total bytes actually used.
ReadAmpTotalReadBytes int64 // Total size of loaded data blocks.
NumberFileOpens int64
NumberFileErrors int64
// # of times bloom filter has avoided file reads, i.e., negatives.
BloomFilterUseful int64
// # of times bloom FullFilter has not avoided the reads.
BloomFilterFullPositive int64
// # of times bloom FullFilter has not avoided the reads and data actually
// exist.
BloomFilterFullTruePositive int64
// # of memtable hits.
MemtableHit int64
// # of memtable misses.
MemtableMiss int64
// # of Get() queries served by L0
GetHitL0 int64
// # of Get() queries served by L1
GetHitL1 int64
// # of Get() queries served by L2 and up
GetHitL2AndUp int64
// The number of uncompressed bytes issued by DB::Put(), DB::Delete(),
// DB::Merge(), and DB::Write().
BytesWritten int64
// The number of uncompressed bytes read from DB::Get(). It could be
// either from memtables, cache, or table files.
// For the number of logical bytes read from DB::MultiGet(),
// please use NUMBER_MULTIGET_BYTES_READ.
BytesRead int64
// Writer has to wait for compaction or flush to finish.
StallMicros int64
DBWriteStallHistogram *float64Histogram
// Last level and non-last level read statistics
LastLevelReadBytes int64
LastLevelReadCount int64
NonLastLevelReadBytes int64
NonLastLevelReadCount int64
DBGetMicros *float64Histogram
DBWriteMicros *float64Histogram
// Value size distribution in each operation
BytesPerRead *float64Histogram
BytesPerWrite *float64Histogram
BytesPerMultiget *float64Histogram
// Time spent flushing memtable to disk
FlushMicros *float64Histogram
}
type float64Histogram struct {
Sum float64
Count float64
P50 float64
P95 float64
P99 float64
P100 float64
}
func (l *statLoader) error() error {
if len(l.errors) != 0 {
return fmt.Errorf("%v", l.errors)
}
return nil
}
func (l *statLoader) load() (*stats, error) {
stats := &stats{
NumberKeysWritten: l.getInt64StatValue("rocksdb.number.keys.written", count),
NumberKeysRead: l.getInt64StatValue("rocksdb.number.keys.read", count),
NumberKeysUpdated: l.getInt64StatValue("rocksdb.number.keys.updated", count),
BlockCacheMiss: l.getInt64StatValue("rocksdb.block.cache.miss", count),
BlockCacheHit: l.getInt64StatValue("rocksdb.block.cache.hit", count),
BlockCacheAdd: l.getInt64StatValue("rocksdb.block.cache.add", count),
BlockCacheAddFailures: l.getInt64StatValue("rocksdb.block.cache.add.failures", count),
BlockCacheIndexMiss: l.getInt64StatValue("rocksdb.block.cache.index.miss", count),
BlockCacheIndexHit: l.getInt64StatValue("rocksdb.block.cache.index.hit", count),
BlockCacheIndexBytesInsert: l.getInt64StatValue("rocksdb.block.cache.index.bytes.insert", count),
BlockCacheFilterMiss: l.getInt64StatValue("rocksdb.block.cache.filter.miss", count),
BlockCacheFilterHit: l.getInt64StatValue("rocksdb.block.cache.filter.hit", count),
BlockCacheFilterBytesInsert: l.getInt64StatValue("rocksdb.block.cache.filter.bytes.insert", count),
BlockCacheDataMiss: l.getInt64StatValue("rocksdb.block.cache.data.miss", count),
BlockCacheDataHit: l.getInt64StatValue("rocksdb.block.cache.data.hit", count),
BlockCacheDataBytesInsert: l.getInt64StatValue("rocksdb.block.cache.data.bytes.insert", count),
CompactReadBytes: l.getInt64StatValue("rocksdb.compact.read.bytes", count),
CompactWriteBytes: l.getInt64StatValue("rocksdb.compact.write.bytes", count),
CompactionTimesMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.micros"),
CompactionTimesCPUMicros: l.getFloat64HistogramStatValue("rocksdb.compaction.times.cpu_micros"),
NumFilesInSingleCompaction: l.getFloat64HistogramStatValue("rocksdb.numfiles.in.singlecompaction"),
ReadAmpEstimateUsefulBytes: l.getInt64StatValue("rocksdb.read.amp.estimate.useful.bytes", count),
ReadAmpTotalReadBytes: l.getInt64StatValue("rocksdb.read.amp.total.read.bytes", count),
NumberFileOpens: l.getInt64StatValue("rocksdb.no.file.opens", count),
NumberFileErrors: l.getInt64StatValue("rocksdb.no.file.errors", count),
BloomFilterUseful: l.getInt64StatValue("rocksdb.bloom.filter.useful", count),
BloomFilterFullPositive: l.getInt64StatValue("rocksdb.bloom.filter.full.positive", count),
BloomFilterFullTruePositive: l.getInt64StatValue("rocksdb.bloom.filter.full.true.positive", count),
MemtableHit: l.getInt64StatValue("rocksdb.memtable.hit", count),
MemtableMiss: l.getInt64StatValue("rocksdb.memtable.miss", count),
GetHitL0: l.getInt64StatValue("rocksdb.l0.hit", count),
GetHitL1: l.getInt64StatValue("rocksdb.l1.hit", count),
GetHitL2AndUp: l.getInt64StatValue("rocksdb.l2andup.hit", count),
BytesWritten: l.getInt64StatValue("rocksdb.bytes.written", count),
BytesRead: l.getInt64StatValue("rocksdb.bytes.read", count),
StallMicros: l.getInt64StatValue("rocksdb.stall.micros", count),
DBWriteStallHistogram: l.getFloat64HistogramStatValue("rocksdb.db.write.stall"),
LastLevelReadBytes: l.getInt64StatValue("rocksdb.last.level.read.bytes", count),
LastLevelReadCount: l.getInt64StatValue("rocksdb.last.level.read.count", count),
NonLastLevelReadBytes: l.getInt64StatValue("rocksdb.non.last.level.read.bytes", count),
NonLastLevelReadCount: l.getInt64StatValue("rocksdb.non.last.level.read.count", count),
DBGetMicros: l.getFloat64HistogramStatValue("rocksdb.db.get.micros"),
DBWriteMicros: l.getFloat64HistogramStatValue("rocksdb.db.write.micros"),
BytesPerRead: l.getFloat64HistogramStatValue("rocksdb.bytes.per.read"),
BytesPerWrite: l.getFloat64HistogramStatValue("rocksdb.bytes.per.write"),
BytesPerMultiget: l.getFloat64HistogramStatValue("rocksdb.bytes.per.multiget"),
FlushMicros: l.getFloat64HistogramStatValue("rocksdb.db.flush.micros"),
}
err := l.error()
if err != nil {
return nil, err
}
return stats, nil
}
// getFloat64HistogramStatValue converts stat object into float64Histogram
func (l *statLoader) getFloat64HistogramStatValue(statName string) *float64Histogram {
return &float64Histogram{
Sum: l.getFloat64StatValue(statName, sum),
Count: l.getFloat64StatValue(statName, count),
P50: l.getFloat64StatValue(statName, p50),
P95: l.getFloat64StatValue(statName, p95),
P99: l.getFloat64StatValue(statName, p99),
P100: l.getFloat64StatValue(statName, p100),
}
}
// getInt64StatValue converts property of stat object into int64
func (l *statLoader) getInt64StatValue(statName, propName string) int64 {
stringVal := l.getStatValue(statName, propName)
if stringVal == "" {
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
return 0
}
intVal, err := strconv.ParseInt(stringVal, 10, 64)
if err != nil {
l.errors = append(l.errors, fmt.Errorf("can't parse int: %v", err))
return 0
}
return intVal
}
// getFloat64StatValue converts property of stat object into float64
func (l *statLoader) getFloat64StatValue(statName, propName string) float64 {
stringVal := l.getStatValue(statName, propName)
if stringVal == "" {
l.errors = append(l.errors, fmt.Errorf("can't get stat by name: %v", statName))
return 0
}
floatVal, err := strconv.ParseFloat(stringVal, 64)
if err != nil {
l.errors = append(l.errors, fmt.Errorf("can't parse float: %v", err))
return 0
}
return floatVal
}
// getStatValue gets property of stat object
func (l *statLoader) getStatValue(statName, propName string) string {
stat, ok := l.statMap[statName]
if !ok {
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't exist", statName))
return ""
}
prop, ok := stat.props[propName]
if !ok {
l.errors = append(l.errors, fmt.Errorf("stat %v doesn't have %v property", statName, propName))
return ""
}
return prop
}

View File

@ -0,0 +1,90 @@
//go:build rocksdb
// +build rocksdb
package opendb
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestStatsLoader(t *testing.T) {
defaultStat := stat{
props: map[string]string{
"COUNT": "1",
},
}
defaultHistogramStat := stat{
props: map[string]string{
"P50": "1",
"P95": "2",
"P99": "3",
"P100": "4",
"COUNT": "5",
"SUM": "6",
},
}
defaultStatMap := map[string]*stat{
"rocksdb.number.keys.written": &defaultStat,
"rocksdb.number.keys.read": &defaultStat,
"rocksdb.number.keys.updated": &defaultStat,
"rocksdb.block.cache.miss": &defaultStat,
"rocksdb.block.cache.hit": &defaultStat,
"rocksdb.block.cache.add": &defaultStat,
"rocksdb.block.cache.add.failures": &defaultStat,
"rocksdb.block.cache.index.miss": &defaultStat,
"rocksdb.block.cache.index.hit": &defaultStat,
"rocksdb.block.cache.index.bytes.insert": &defaultStat,
"rocksdb.block.cache.filter.miss": &defaultStat,
"rocksdb.block.cache.filter.hit": &defaultStat,
"rocksdb.block.cache.filter.bytes.insert": &defaultStat,
"rocksdb.block.cache.data.miss": &defaultStat,
"rocksdb.block.cache.data.hit": &defaultStat,
"rocksdb.block.cache.data.bytes.insert": &defaultStat,
"rocksdb.compact.read.bytes": &defaultStat,
"rocksdb.compact.write.bytes": &defaultStat,
"rocksdb.compaction.times.micros": &defaultHistogramStat,
"rocksdb.compaction.times.cpu_micros": &defaultHistogramStat,
"rocksdb.numfiles.in.singlecompaction": &defaultHistogramStat,
"rocksdb.read.amp.estimate.useful.bytes": &defaultStat,
"rocksdb.read.amp.total.read.bytes": &defaultStat,
"rocksdb.no.file.opens": &defaultStat,
"rocksdb.no.file.errors": &defaultStat,
"rocksdb.bloom.filter.useful": &defaultStat,
"rocksdb.bloom.filter.full.positive": &defaultStat,
"rocksdb.bloom.filter.full.true.positive": &defaultStat,
"rocksdb.memtable.hit": &defaultStat,
"rocksdb.memtable.miss": &defaultStat,
"rocksdb.l0.hit": &defaultStat,
"rocksdb.l1.hit": &defaultStat,
"rocksdb.l2andup.hit": &defaultStat,
"rocksdb.bytes.written": &defaultStat,
"rocksdb.bytes.read": &defaultStat,
"rocksdb.stall.micros": &defaultStat,
"rocksdb.db.write.stall": &defaultHistogramStat,
"rocksdb.last.level.read.bytes": &defaultStat,
"rocksdb.last.level.read.count": &defaultStat,
"rocksdb.non.last.level.read.bytes": &defaultStat,
"rocksdb.non.last.level.read.count": &defaultStat,
"rocksdb.db.get.micros": &defaultHistogramStat,
"rocksdb.db.write.micros": &defaultHistogramStat,
"rocksdb.bytes.per.read": &defaultHistogramStat,
"rocksdb.bytes.per.write": &defaultHistogramStat,
"rocksdb.bytes.per.multiget": &defaultHistogramStat,
"rocksdb.db.flush.micros": &defaultHistogramStat,
}
statLoader := newStatLoader(defaultStatMap)
stats, err := statLoader.load()
require.NoError(t, err)
require.Equal(t, stats.NumberKeysWritten, int64(1))
require.Equal(t, stats.NumberKeysRead, int64(1))
require.Equal(t, stats.CompactionTimesMicros.P50, float64(1))
require.Equal(t, stats.CompactionTimesMicros.P95, float64(2))
require.Equal(t, stats.CompactionTimesMicros.P99, float64(3))
require.Equal(t, stats.CompactionTimesMicros.P100, float64(4))
require.Equal(t, stats.CompactionTimesMicros.Count, float64(5))
require.Equal(t, stats.CompactionTimesMicros.Sum, float64(6))
}

View File

@ -188,6 +188,8 @@
- [CommunityPoolLendWithdrawProposal](#kava.community.v1beta1.CommunityPoolLendWithdrawProposal)
- [kava/community/v1beta1/query.proto](#kava/community/v1beta1/query.proto)
- [QueryAnnualizedRewardsRequest](#kava.community.v1beta1.QueryAnnualizedRewardsRequest)
- [QueryAnnualizedRewardsResponse](#kava.community.v1beta1.QueryAnnualizedRewardsResponse)
- [QueryBalanceRequest](#kava.community.v1beta1.QueryBalanceRequest)
- [QueryBalanceResponse](#kava.community.v1beta1.QueryBalanceResponse)
- [QueryTotalBalanceRequest](#kava.community.v1beta1.QueryTotalBalanceRequest)
@ -2977,6 +2979,31 @@ CommunityPoolLendWithdrawProposal withdraws a lend position back to the communit
<a name="kava.community.v1beta1.QueryAnnualizedRewardsRequest"></a>
### QueryAnnualizedRewardsRequest
QueryAnnualizedRewardsRequest defines the request type for querying the annualized rewards.
<a name="kava.community.v1beta1.QueryAnnualizedRewardsResponse"></a>
### QueryAnnualizedRewardsResponse
QueryAnnualizedRewardsResponse defines the response type for querying the annualized rewards.
| Field | Type | Label | Description |
| ----- | ---- | ----- | ----------- |
| `staking_rewards` | [string](#string) | | staking_rewards is the calculated annualized staking rewards percentage rate |
<a name="kava.community.v1beta1.QueryBalanceRequest"></a>
### QueryBalanceRequest
@ -3043,6 +3070,7 @@ Query defines the gRPC querier service for x/community.
| ----------- | ------------ | ------------- | ------------| ------- | -------- |
| `Balance` | [QueryBalanceRequest](#kava.community.v1beta1.QueryBalanceRequest) | [QueryBalanceResponse](#kava.community.v1beta1.QueryBalanceResponse) | Balance queries the balance of all coins of x/community module. | GET|/kava/community/v1beta1/balance|
| `TotalBalance` | [QueryTotalBalanceRequest](#kava.community.v1beta1.QueryTotalBalanceRequest) | [QueryTotalBalanceResponse](#kava.community.v1beta1.QueryTotalBalanceResponse) | TotalBalance queries the balance of all coins, including x/distribution, x/community, and supplied balances. | GET|/kava/community/v1beta1/total_balance|
| `AnnualizedRewards` | [QueryAnnualizedRewardsRequest](#kava.community.v1beta1.QueryAnnualizedRewardsRequest) | [QueryAnnualizedRewardsResponse](#kava.community.v1beta1.QueryAnnualizedRewardsResponse) | AnnualizedRewards calculates and returns the current annualized reward percentages, like staking rewards, for the chain. | GET|/kava/community/v1beta1/annualized_rewards|
<!-- end services -->

57
go.mod
View File

@ -12,31 +12,34 @@ require (
github.com/cosmos/ibc-go/v6 v6.1.1
github.com/ethereum/go-ethereum v1.10.26
github.com/evmos/ethermint v0.21.0
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.3
github.com/golang/protobuf v1.5.3
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/linxGnu/grocksdb v1.8.0
github.com/pelletier/go-toml/v2 v2.0.6
github.com/prometheus/client_golang v1.14.0
github.com/spf13/cast v1.5.0
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.2
github.com/stretchr/testify v1.8.3
github.com/subosito/gotenv v1.4.2
github.com/tendermint/tendermint v0.34.27
github.com/tendermint/tm-db v0.6.7
golang.org/x/crypto v0.6.0
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.28.2-0.20230208135220-49eaa78c6c9c
golang.org/x/crypto v0.14.0
google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13
google.golang.org/grpc v1.58.3
google.golang.org/protobuf v1.31.0
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go v0.107.0 // indirect
cloud.google.com/go/compute v1.15.1 // indirect
cloud.google.com/go v0.110.8 // indirect
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.8.0 // indirect
cloud.google.com/go/storage v1.27.0 // indirect
cloud.google.com/go/iam v1.1.2 // indirect
cloud.google.com/go/storage v1.30.1 // indirect
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
github.com/99designs/keyring v1.2.1 // indirect
@ -64,7 +67,7 @@ require (
github.com/cosmos/btcutil v1.0.5 // indirect
github.com/cosmos/gogoproto v1.4.6 // indirect
github.com/cosmos/iavl v0.19.5 // indirect
github.com/cosmos/ledger-cosmos-go v0.12.2 // indirect
github.com/cosmos/ledger-cosmos-go v0.13.1 // indirect
github.com/creachadair/taskgroup v0.3.2 // indirect
github.com/danieljoos/wincred v1.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@ -84,7 +87,6 @@ require (
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-gonic/gin v1.8.1 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
@ -93,16 +95,17 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/gogo/gateway v1.1.0 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/flatbuffers v1.12.1 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/orderedcode v0.0.1 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gorilla/handlers v1.5.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
@ -129,7 +132,6 @@ require (
github.com/klauspost/compress v1.15.15 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/linxGnu/grocksdb v1.7.15 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
@ -148,7 +150,6 @@ require (
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.40.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
@ -176,19 +177,21 @@ require (
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/zondax/hid v0.9.1 // indirect
github.com/zondax/ledger-go v0.14.1 // indirect
github.com/zondax/ledger-go v0.14.2 // indirect
go.etcd.io/bbolt v1.3.7 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.107.0 // indirect
google.golang.org/api v0.128.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@ -202,7 +205,7 @@ replace (
// Use rocksdb 7.9.2
github.com/cometbft/cometbft-db => github.com/kava-labs/cometbft-db v0.7.0-rocksdb-v7.9.2-kava.1
// Use cosmos-sdk fork with backported fix for unsafe-reset-all, staking transfer events, and custom tally handler support
github.com/cosmos/cosmos-sdk => github.com/kava-labs/cosmos-sdk v0.46.11-kava.1
github.com/cosmos/cosmos-sdk => github.com/kava-labs/cosmos-sdk v0.46.11-kava.3
// See https://github.com/cosmos/cosmos-sdk/pull/13093
github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2
// Use ethermint fork that respects min-gas-price with NoBaseFee true and london enabled, and includes eip712 support
@ -214,7 +217,7 @@ replace (
// Downgraded to avoid bugs in following commits which causes "version does not exist" errors
github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
// Use cometbft fork of tendermint
github.com/tendermint/tendermint => github.com/cometbft/cometbft v0.34.27
github.com/tendermint/tendermint => github.com/kava-labs/cometbft v0.34.27-kava.0
// Indirect dependencies still use tendermint/tm-db
github.com/tendermint/tm-db => github.com/kava-labs/tm-db v0.6.7-kava.3
github.com/tendermint/tm-db => github.com/kava-labs/tm-db v0.6.7-kava.4
)

110
go.sum
View File

@ -34,8 +34,8 @@ cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w9
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww=
cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk=
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
@ -73,8 +73,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
@ -114,13 +114,12 @@ cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y97
cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=
cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4=
cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
@ -177,8 +176,9 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
@ -354,8 +354,6 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE
github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI=
github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA=
github.com/coinbase/rosetta-sdk-go v0.7.9/go.mod h1:0/knutI7XGVqXmmH4OQD8OckFrbQ8yMsUZTG7FXCR2M=
github.com/cometbft/cometbft v0.34.27 h1:ri6BvmwjWR0gurYjywcBqRe4bbwc3QVs9KRcCzgh/J0=
github.com/cometbft/cometbft v0.34.27/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw=
github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4=
github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak=
github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ=
@ -384,8 +382,8 @@ github.com/cosmos/ibc-go/v6 v6.1.1 h1:oqqMNyjj6SLQF8rvgCaDGwfdITEIsbhs8F77/8xvRI
github.com/cosmos/ibc-go/v6 v6.1.1/go.mod h1:NL17FpFAaWjRFVb1T7LUKuOoMSsATPpu+Icc4zL5/Ik=
github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo=
github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA=
github.com/cosmos/ledger-cosmos-go v0.12.2 h1:/XYaBlE2BJxtvpkHiBm97gFGSGmYGKunKyF3nNqAXZA=
github.com/cosmos/ledger-cosmos-go v0.12.2/go.mod h1:ZcqYgnfNJ6lAXe4HPtWgarNEY+B74i+2/8MhZw4ziiI=
github.com/cosmos/ledger-cosmos-go v0.13.1 h1:12ac9+GwBb9BjP7X5ygpFk09Itwzjzfmg6A2CWFjoVs=
github.com/cosmos/ledger-cosmos-go v0.13.1/go.mod h1:5tv2RVJEd2+Y38TIQN4CRjJeQGyqOEiKJDfqhk5UjqE=
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
@ -550,8 +548,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -624,8 +622,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=
github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@ -645,6 +643,8 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -654,8 +654,8 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4=
github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
@ -665,8 +665,8 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@ -802,14 +802,16 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kava-labs/cometbft v0.34.27-kava.0 h1:FUEGRkF3xtrJH+h9A5G4eA2skf7QaNoOCPaoVqHkh8k=
github.com/kava-labs/cometbft v0.34.27-kava.0/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw=
github.com/kava-labs/cometbft-db v0.7.0-rocksdb-v7.9.2-kava.1 h1:EZnZAkZ+dqK+1OM4AK+e6wYH8a5xuyg4yFTR4Ez3AXk=
github.com/kava-labs/cometbft-db v0.7.0-rocksdb-v7.9.2-kava.1/go.mod h1:mI/4J4IxRzPrXvMiwefrt0fucGwaQ5Hm9IKS7HnoJeI=
github.com/kava-labs/cosmos-sdk v0.46.11-kava.1 h1:3VRpm4zf/gQgmpRVd1p99/2P8ZecAu2FVAXHru5caIo=
github.com/kava-labs/cosmos-sdk v0.46.11-kava.1/go.mod h1:bG4AkW9bqc8ycrryyKGQEl3YV9BY2wr6HggGq8kvcgM=
github.com/kava-labs/cosmos-sdk v0.46.11-kava.3 h1:TOhyyW/xHso/9uIOgYdsrOWDIhXi6foORWZxVRe/wS0=
github.com/kava-labs/cosmos-sdk v0.46.11-kava.3/go.mod h1:bSUUbmVwWkv1ZNVTWrQHa/i+73xIUvYYPsCvl5doiCs=
github.com/kava-labs/ethermint v0.21.0-kava-v23-1 h1:5TSyCtPvFdMuSe8p2iMVqXmFBlK3lHyjaT9EqN752aI=
github.com/kava-labs/ethermint v0.21.0-kava-v23-1/go.mod h1:rdm6AinxZ4dzPEv/cjH+/AGyTbKufJ3RE7M2MDyklH0=
github.com/kava-labs/tm-db v0.6.7-kava.3 h1:4vyAh+NyZ1xTjCt0utNT6FJHnsZK1I19xwZeJttdRXQ=
github.com/kava-labs/tm-db v0.6.7-kava.3/go.mod h1:70tpLhNfwCP64nAlq+bU+rOiVfWr3Nnju1D1nhGDGKs=
github.com/kava-labs/tm-db v0.6.7-kava.4 h1:M2RibOKmbi+k2OhAFry8z9+RJF0CYuDETB7/PrSdoro=
github.com/kava-labs/tm-db v0.6.7-kava.4/go.mod h1:70tpLhNfwCP64nAlq+bU+rOiVfWr3Nnju1D1nhGDGKs=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
@ -849,8 +851,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linxGnu/grocksdb v1.7.15 h1:AEhP28lkeAybv5UYNYviYISpR6bJejEnKuYbnWAnxx0=
github.com/linxGnu/grocksdb v1.7.15/go.mod h1:pY55D0o+r8yUYLq70QmhdudxYvoDb9F+9puf4m3/W+U=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -1130,8 +1132,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@ -1192,8 +1194,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo=
github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c=
github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320=
github.com/zondax/ledger-go v0.14.2 h1:NDaba434N7JUVKg4P4nFv6TOjSkUosYb9sdiw3c61Zk=
github.com/zondax/ledger-go v0.14.2/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
@ -1243,8 +1245,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1355,8 +1358,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1382,8 +1385,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1398,8 +1401,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1505,14 +1508,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1522,9 +1525,10 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1661,8 +1665,8 @@ google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
google.golang.org/api v0.107.0 h1:I2SlFjD8ZWabaIFOfeEDg3pf0BHJDh6iYQ1ic3Yu/UU=
google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg=
google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1782,8 +1786,12 @@ google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqw
google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=
google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57 h1:vArvWooPH749rNHpBGgVl+U9B9dATjiEhJzcWGlovNs=
google.golang.org/genproto v0.0.0-20230202175211-008b39050e57/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 h1:U7+wNaVuSTaUqNvK2+osJ9ejEZxbjHHk8F2b6Hpx0AE=
google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1826,8 +1834,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1844,8 +1852,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.2-0.20230208135220-49eaa78c6c9c h1:gDe3xeLH/W6iv5d9xQBo6IwJbCdVcZRiV8xuix6FJW8=
google.golang.org/protobuf v1.28.2-0.20230208135220-49eaa78c6c9c/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

70
migrate/v0_24/migrate.md Normal file
View File

@ -0,0 +1,70 @@
# Kava 14 Upgrade Instructions
## Software Version and Key Dates
- The version of `kava` for Kava 14 is v0.24.0
- The Kava 13 chain will be shutdown with a `SoftwareUpgradeProposal` that activates at approximately 15:00 UTC on July, 12th 2023.
## Configuration Changes
**For validators with existing configurations, it is recommended to set `evm.max-tx-gas-wanted = 0` in app.toml to avoid proposing blocks that exceed the block gas limit.**
In previous versions, the default was non-zero and was used to mitigate DDoS style gas attacks. However, this setting is not required anymore and can safely be set to zero.
### On the day of the upgrade
**The kava chain is expected to halt at block height **5597000**. Do not stop your node and begin the upgrade before the upgrade height, or you may go offline and be unable to recover until after the upgrade!**
**Make sure the kava process is stopped before proceeding and that you have backed up your validator**. Failure to backup your validator could make it impossible to restart your node if the upgrade fails.
**Ensure you are using golang 1.20.x and not aa different version.** Golang 1.19 and below may cause app hash mismatches!
1. Update to v0.24.0
```sh
# check go version - look for 1.20!
go version
# go version go1.20.5 linux/arm64
# in the `kava` folder
git fetch
git checkout v0.24.0
# Note: Golang 1.20 must be installed before this step
make install
# verify versions
kava version --long
# name: kava
# server_name: kava
# version: 0.24.0
# commit: <commit placeholder>
# build_tags: netgo ledger,
# go: go version go1.20.5 linux/arm64
# build_deps:
# ...
# cosmos_sdk_version: v0.46.11
# Restart node -
kava start
```
### Risks
As a validator, performing the upgrade procedure on your consensus nodes carries a heightened risk of double-signing and being slashed. The most important piece of this procedure is verifying your software version and genesis file hash before starting your validator and signing.
The riskiest thing a validator can do is discover that they made a mistake and repeat the upgrade procedure again during the network startup. If you discover a mistake in the process, the best thing to do is wait for the network to start before correcting it. If the network is halted and you have started with a different genesis file than the expected one, seek advice from a Kava developer before resetting your validator.
### Recovery
Prior to applying the Kava 14 upgrade, validators are encouraged to take a full data snapshot at the upgrade height before proceeding. Snap-shotting depends heavily on infrastructure, but generally this can be done by backing up the .kava directory.
It is critically important to back-up the .kava/data/priv_validator_state.json file after stopping your kava process. This file is updated every block as your validator participates in consensus rounds. It is a critical file needed to prevent double-signing, in case the upgrade fails and the previous chain needs to be restarted.
In the event that the upgrade does not succeed, validators and operators must downgrade back to v0.23.x of the Kava software and restore to their latest snapshot before restarting their nodes.
### Coordination
If the Kava 14 chain does not launch by July 13th, 2023 at 00:00 UTC, the launch should be considered a failure. In the event of launch failure, coordination will occur in the [Kava discord](https://discord.com/invite/kQzh3Uv).

View File

@ -0,0 +1,80 @@
# Migrate Staking Reward Calculation to Endpoint
Kava 15 (v0.25.x) is changing the mechanism for staking rewards, which will no longer be inflationary but will be paid out of the community module. In order to continue displaying expected yields or APYs to users, wallets and explorers will need to update.
To accommodate an easy transition to these new changes, a new endpoint has been backported to v0.24.
The endpoint calculates staking rewards for the current kava version and is forward compatible with future changes.
All consumers who display yearly staking reward percentages are encouraged to migrate from the standard calculation to using the endpoint, as the standard calculation will no longer be accurate in the near-future.
Endpoint: `/kava/community/v1beta1/annualized_rewards`
Example Response:
```json
{
"staking_rewards": "0.203023625910000000"
}
```
## Before Kava 15
The staking APR is calculated the same way as other cosmos-sdk chains. Various parameters are fetched and then combined in this calculation:
```
staking_apr ≈ mint_inflation *
(1 - distribution_params.community_tax) *
(total_supply_ukava/pool.bonded_tokens)
```
_Note this doesnt include transaction fees paid to stakers._
Endpoints used:
* https://api.data.kava.io/cosmos/mint/v1beta1/params
* https://api.data.kava.io/cosmos/distribution/v1beta1/params
* https://api.data.kava.io/cosmos/bank/v1beta1/supply/by_denom?denom=ukava
* https://api.data.kava.io/cosmos/staking/v1beta1/pool
Informational Endpoints
* https://api.data.kava.io/cosmos/mint/v1beta1/inflation
* https://api.data.kava.io/cosmos/mint/v1beta1/annual_provisions
## After Kava 15
Kava 15 will implement new staking rewards as ratified in this proposal: https://www.mintscan.io/kava/proposals/141. They will come into effect at the “switchover time” on 2024-01-01.
* All delegating and claiming transactions remain unchanged. There is no change in how rewards are claimed or how claimable balances are queried.
* After the switchover time, inflation will be set to zero (and rewards will be paid from the community module account).
* After the switchover time, rewards are paid out according to:
```
staking apy ≈ community_params.staking_rewards_per_second *
seconds_per_year / pool.bonded_tokens
```
_Note this doesnt include transaction fees paid to stakers._
* There will be a new endpoint `kava/community/v1beta1/annualized_rewards`
* before the switchover time, it will return the current staking APY (calculated in the previous section)
* after the switchover time, it will return the new staking APY above
* Existing endpoints above will remain active, but the params will change such that the old apr calculation will return 0.
* https://api.data.kava.io/cosmos/mint/v1beta1/params
* no format changes
* `inflation_max` and `inflation_min` will be 0.0
* https://api.data.kava.io/cosmos/distribution/v1beta1/params
* no format changes
* `community_tax` will be 0.0
* https://api.data.kava.io/cosmos/bank/v1beta1/supply/by_denom?denom=ukava
* no changes
* https://api.data.kava.io/cosmos/staking/v1beta1/pool
* no changes
* https://api.data.kava.io/cosmos/mint/v1beta1/inflation
* no format changes
* `inflation` will be 0.0
* https://api.data.kava.io/cosmos/mint/v1beta1/annual_provisions
* no format changes
* `annual_provisions` will be 0.0

View File

@ -2,6 +2,7 @@ syntax = "proto3";
package kava.community.v1beta1;
import "cosmos/base/v1beta1/coin.proto";
import "cosmos_proto/cosmos.proto";
import "gogoproto/gogo.proto";
import "google/api/annotations.proto";
@ -19,6 +20,12 @@ service Query {
rpc TotalBalance(QueryTotalBalanceRequest) returns (QueryTotalBalanceResponse) {
option (google.api.http).get = "/kava/community/v1beta1/total_balance";
}
// AnnualizedRewards calculates and returns the current annualized reward percentages,
// like staking rewards, for the chain.
rpc AnnualizedRewards(QueryAnnualizedRewardsRequest) returns (QueryAnnualizedRewardsResponse) {
option (google.api.http).get = "/kava/community/v1beta1/annualized_rewards";
}
}
// QueryBalanceRequest defines the request type for querying x/community balance.
@ -44,3 +51,16 @@ message QueryTotalBalanceResponse {
(gogoproto.nullable) = false
];
}
// QueryAnnualizedRewardsRequest defines the request type for querying the annualized rewards.
message QueryAnnualizedRewardsRequest {}
// QueryAnnualizedRewardsResponse defines the response type for querying the annualized rewards.
message QueryAnnualizedRewardsResponse {
// staking_rewards is the calculated annualized staking rewards percentage rate
string staking_rewards = 1 [
(cosmos_proto.scalar) = "cosmos.Dec",
(gogoproto.customtype) = "cosmossdk.io/math.LegacyDec",
(gogoproto.nullable) = false
];
}

View File

@ -4,7 +4,7 @@ E2E_KAVA_FUNDED_ACCOUNT_MNEMONIC='tent fitness boat among census primary pipe no
# E2E_KVTOOL_KAVA_CONFIG_TEMPLATE is the kvtool template used to start the chain. See the `kava.configTemplate` flag in kvtool.
# Note that the config tempalte must support overriding the docker image tag via the KAVA_TAG variable.
E2E_KVTOOL_KAVA_CONFIG_TEMPLATE="master"
E2E_KVTOOL_KAVA_CONFIG_TEMPLATE="v0.23"
# E2E_INCLUDE_IBC_TESTS when true will start a 2nd chain & open an IBC channel. It will enable all IBC tests.
E2E_INCLUDE_IBC_TESTS=true
@ -15,14 +15,14 @@ E2E_SKIP_SHUTDOWN=false
# The following variables should be defined to run an upgrade.
# E2E_INCLUDE_AUTOMATED_UPGRADE when true enables the automated upgrade & corresponding tests in the suite.
E2E_INCLUDE_AUTOMATED_UPGRADE=false
E2E_INCLUDE_AUTOMATED_UPGRADE=true
# E2E_KAVA_UPGRADE_NAME is the name of the upgrade that must be in the current local image.
E2E_KAVA_UPGRADE_NAME=
E2E_KAVA_UPGRADE_NAME=v0.24.0
# E2E_KAVA_UPGRADE_HEIGHT is the height at which the upgrade will be applied.
# If IBC tests are enabled this should be >30. Otherwise, this should be >10.
E2E_KAVA_UPGRADE_HEIGHT=
E2E_KAVA_UPGRADE_HEIGHT=35
# E2E_KAVA_UPGRADE_BASE_IMAGE_TAG is the tag of the docker image the chain should upgrade from.
E2E_KAVA_UPGRADE_BASE_IMAGE_TAG=
E2E_KAVA_UPGRADE_BASE_IMAGE_TAG=v0.23.1
# E2E_KAVA_ERC20_ADDRESS is the address of a pre-deployed ERC20 token.
# The E2E_KAVA_FUNDED_ACCOUNT_MNEMONIC account should have a balance.

View File

@ -2,6 +2,16 @@ package e2e_test
import (
"fmt"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
evmtypes "github.com/evmos/ethermint/x/evm/types"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/tests/util"
committeetypes "github.com/kava-labs/kava/x/committee/types"
evmutiltypes "github.com/kava-labs/kava/x/evmutil/types"
)
// TestUpgradeHandler can be used to run tests post-upgrade. If an upgrade is enabled, all tests
@ -12,7 +22,110 @@ func (suite *IntegrationTestSuite) TestUpgradeHandler() {
fmt.Println("An upgrade has run!")
suite.True(true)
// Thorough testing of the upgrade handler for v0.24 depends on:
// - chain starting from v0.23 template
// - funded account has ibc denom for ATOM
// - Stability committee existing with committee id 1
// Uncomment & use these contexts to compare chain state before & after the upgrade occurs.
// beforeUpgradeCtx := util.CtxAtHeight(suite.UpgradeHeight - 1)
// afterUpgradeCtx := util.CtxAtHeight(suite.UpgradeHeight)
beforeUpgradeCtx := util.CtxAtHeight(suite.UpgradeHeight - 1)
afterUpgradeCtx := util.CtxAtHeight(suite.UpgradeHeight)
// check x/evmutil module consensus version has been updated
suite.Run("x/evmutil consensus version 1 -> 2", func() {
before, err := suite.Kava.Upgrade.ModuleVersions(
beforeUpgradeCtx,
&upgradetypes.QueryModuleVersionsRequest{
ModuleName: evmutiltypes.ModuleName,
},
)
suite.NoError(err)
suite.Equal(uint64(1), before.ModuleVersions[0].Version)
after, err := suite.Kava.Upgrade.ModuleVersions(
afterUpgradeCtx,
&upgradetypes.QueryModuleVersionsRequest{
ModuleName: evmutiltypes.ModuleName,
},
)
suite.NoError(err)
suite.Equal(uint64(2), after.ModuleVersions[0].Version)
})
// check evmutil params before & after upgrade
suite.Run("x/evmutil AllowedCosmosDenoms updated", func() {
before, err := suite.Kava.Evmutil.Params(beforeUpgradeCtx, &evmutiltypes.QueryParamsRequest{})
suite.NoError(err)
suite.Len(before.Params.AllowedCosmosDenoms, 0)
after, err := suite.Kava.Evmutil.Params(afterUpgradeCtx, &evmutiltypes.QueryParamsRequest{})
suite.NoError(err)
suite.Len(after.Params.AllowedCosmosDenoms, 1)
tokenInfo := after.Params.AllowedCosmosDenoms[0]
suite.Equal(app.MainnetAtomDenom, tokenInfo.CosmosDenom)
})
// check x/evm param for allowed eip712 messages
// use of these messages is performed in e2e_convert_cosmos_coins_test.go
suite.Run("EIP712 signing allowed for new messages", func() {
before, err := suite.Kava.Evm.Params(
beforeUpgradeCtx,
&evmtypes.QueryParamsRequest{},
)
suite.NoError(err)
suite.NotContains(before.Params.EIP712AllowedMsgs, app.EIP712AllowedMsgConvertCosmosCoinToERC20)
suite.NotContains(before.Params.EIP712AllowedMsgs, app.EIP712AllowedMsgConvertCosmosCoinFromERC20)
after, err := suite.Kava.Evm.Params(
afterUpgradeCtx,
&evmtypes.QueryParamsRequest{},
)
suite.NoError(err)
suite.Contains(after.Params.EIP712AllowedMsgs, app.EIP712AllowedMsgConvertCosmosCoinToERC20)
suite.Contains(after.Params.EIP712AllowedMsgs, app.EIP712AllowedMsgConvertCosmosCoinFromERC20)
})
// check stability committee permissions were updated
suite.Run("stability committee ParamsChangePermission adds AllowedCosmosDenoms", func() {
before, err := suite.Kava.Committee.Committee(
beforeUpgradeCtx,
&committeetypes.QueryCommitteeRequest{
CommitteeId: app.MainnetStabilityCommitteeId,
},
)
suite.NoError(err)
fmt.Println("BEFORE: ", before.Committee)
suite.NotContains(
suite.getParamsChangePerm(before.Committee),
app.AllowedParamsChangeAllowedCosmosDenoms,
)
after, err := suite.Kava.Committee.Committee(
afterUpgradeCtx,
&committeetypes.QueryCommitteeRequest{
CommitteeId: app.MainnetStabilityCommitteeId,
},
)
suite.NoError(err)
fmt.Println("AFTER: ", after.Committee)
suite.Contains(
suite.getParamsChangePerm(after.Committee),
app.AllowedParamsChangeAllowedCosmosDenoms,
)
})
}
func (suite *IntegrationTestSuite) getParamsChangePerm(anyComm *codectypes.Any) []committeetypes.AllowedParamsChange {
var committee committeetypes.Committee
err := suite.Kava.EncodingConfig.Marshaler.UnpackAny(anyComm, &committee)
if err != nil {
panic(err)
}
permissions := committee.GetPermissions()
for _, perm := range permissions {
if paramsChangePerm, ok := perm.(*committeetypes.ParamsChangePermission); ok {
return paramsChangePerm.AllowedParamsChanges
}
}
panic("no ParamsChangePermission found for stability committee")
}

View File

@ -2,7 +2,9 @@ package auction
import (
"errors"
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/auction/keeper"
@ -12,6 +14,8 @@ import (
// BeginBlocker closes all expired auctions at the end of each block. It panics if
// there's an error other than ErrAuctionNotFound.
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
err := k.CloseExpiredAuctions(ctx)
if err != nil && !errors.Is(err, types.ErrAuctionNotFound) {
panic(err)

View File

@ -1,13 +1,19 @@
package bep3
import (
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/bep3/keeper"
"github.com/kava-labs/kava/x/bep3/types"
)
// BeginBlocker on every block expires outdated atomic swaps and removes closed
// swap from long term storage (default storage time of 1 week)
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
k.UpdateTimeBasedSupplyLimits(ctx)
k.UpdateExpiredAtomicSwaps(ctx)
k.DeleteClosedAtomicSwapsFromLongtermStorage(ctx)

View File

@ -2,17 +2,22 @@ package cdp
import (
"errors"
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/kava-labs/kava/x/cdp/keeper"
"github.com/kava-labs/kava/x/cdp/types"
pricefeedtypes "github.com/kava-labs/kava/x/pricefeed/types"
)
// BeginBlocker compounds the debt in outstanding cdps and liquidates cdps that are below the required collateralization ratio
func BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
params := k.GetParams(ctx)
for _, cp := range params.CollateralParams {

View File

@ -1,14 +1,20 @@
package committee
import (
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/kava-labs/kava/x/committee/keeper"
"github.com/kava-labs/kava/x/committee/types"
)
// BeginBlocker runs at the start of every block.
func BeginBlocker(ctx sdk.Context, _ abci.RequestBeginBlock, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
k.ProcessProposals(ctx)
}

View File

@ -19,7 +19,8 @@ func GetQueryCmd() *cobra.Command {
}
commands := []*cobra.Command{
GetCmdQueryBalance(),
getCmdQueryBalance(),
getCmdQueryAnnualizedRewards(),
}
for _, cmd := range commands {
@ -31,8 +32,8 @@ func GetQueryCmd() *cobra.Command {
return communityQueryCmd
}
// GetCmdQueryBalance implements a command to return the current community pool balance.
func GetCmdQueryBalance() *cobra.Command {
// getCmdQueryBalance implements a command to return the current community pool balance.
func getCmdQueryBalance() *cobra.Command {
return &cobra.Command{
Use: "balance",
Short: "Query the current balance of the community module account",
@ -53,3 +54,26 @@ func GetCmdQueryBalance() *cobra.Command {
},
}
}
// getCmdQueryAnnualizedRewards implements a command to return the current annualized rewards.
func getCmdQueryAnnualizedRewards() *cobra.Command {
return &cobra.Command{
Use: "annualized-rewards",
Short: "Query a current calculation of annualized rewards for the chain.",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
queryClient := types.NewQueryClient(clientCtx)
res, err := queryClient.AnnualizedRewards(cmd.Context(), &types.QueryAnnualizedRewardsRequest{})
if err != nil {
return err
}
return clientCtx.PrintProto(res)
},
}
}

View File

@ -3,6 +3,7 @@ package keeper
import (
"context"
sdkmath "cosmossdk.io/math"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/community/types"
)
@ -45,3 +46,33 @@ func (s queryServer) TotalBalance(
Pool: totalBalance,
}, nil
}
// AnnualizedRewards calculates the annualized rewards for the chain.
// This method is backported from v0.25.x to allow for early migration.
func (s queryServer) AnnualizedRewards(
c context.Context,
req *types.QueryAnnualizedRewardsRequest,
) (*types.QueryAnnualizedRewardsResponse, error) {
ctx := sdk.UnwrapSDKContext(c)
bondDenom := s.keeper.stakingKeeper.BondDenom(ctx)
totalSupply := s.keeper.bankKeeper.GetSupply(ctx, bondDenom).Amount
totalBonded := s.keeper.stakingKeeper.TotalBondedTokens(ctx)
rewardsPerSecond := sdkmath.LegacyZeroDec() // always zero. this method is backported from v0.25.x to allow for early migration.
// need to convert these from sdk.Dec to sdkmath.LegacyDec
inflationRate := convertDecToLegacyDec(s.keeper.mintKeeper.GetMinter(ctx).Inflation)
communityTax := convertDecToLegacyDec(s.keeper.distrKeeper.GetCommunityTax(ctx))
return &types.QueryAnnualizedRewardsResponse{
StakingRewards: CalculateStakingAnnualPercentage(totalSupply, totalBonded, inflationRate, communityTax, rewardsPerSecond),
}, nil
}
// convertDecToLegacyDec is a helper method for converting between new and old Dec types
// current version of cosmos-sdk in this repo uses sdk.Dec
// this module uses sdkmath.LegacyDec in its parameters
// TODO: remove me after upgrade to cosmos-sdk v50 (LegacyDec is everywhere)
func convertDecToLegacyDec(in sdk.Dec) sdkmath.LegacyDec {
return sdkmath.LegacyNewDecFromBigIntWithPrec(in.BigInt(), sdk.Precision)
}

View File

@ -9,18 +9,20 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/suite"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/community/keeper"
"github.com/kava-labs/kava/x/community/testutil"
"github.com/kava-labs/kava/x/community/types"
)
type grpcQueryTestSuite struct {
KeeperTestSuite
testutil.Suite
queryClient types.QueryClient
}
func (suite *grpcQueryTestSuite) SetupTest() {
suite.KeeperTestSuite.SetupTest()
suite.Suite.SetupTest()
queryHelper := baseapp.NewQueryServerTestHelper(suite.Ctx, suite.App.InterfaceRegistry())
types.RegisterQueryServer(queryHelper, keeper.NewQueryServerImpl(suite.Keeper))
@ -144,3 +146,105 @@ func (suite *grpcQueryTestSuite) TestGrpcQueryTotalBalance() {
})
}
}
// backported from v0.25.x. Does not actually use `rewardsPerSec` because concept does not exist.
// NOTE: this test makes use of the fact that there is always an initial 1e6 bonded tokens
// To adjust the bonded ratio, it adjusts the total supply by minting tokens.
func (suite *grpcQueryTestSuite) TestGrpcQueryAnnualizedRewards() {
testCases := []struct {
name string
bondedRatio sdk.Dec
inflation sdk.Dec
rewardsPerSec sdkmath.LegacyDec
communityTax sdk.Dec
expectedRate sdkmath.LegacyDec
}{
{
name: "sanity check: no inflation, no rewards => 0%",
bondedRatio: sdk.MustNewDecFromStr("0.3456"),
inflation: sdk.ZeroDec(),
rewardsPerSec: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "inflation sanity check: 100% inflation, 100% bonded => 100%",
bondedRatio: sdk.OneDec(),
inflation: sdk.OneDec(),
rewardsPerSec: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyOneDec(),
},
{
name: "inflation sanity check: 100% community tax => 0%",
bondedRatio: sdk.OneDec(),
inflation: sdk.OneDec(),
communityTax: sdk.OneDec(),
rewardsPerSec: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "inflation enabled: realistic example",
bondedRatio: sdk.MustNewDecFromStr("0.148"),
inflation: sdk.MustNewDecFromStr("0.595"),
communityTax: sdk.MustNewDecFromStr("0.9495"),
rewardsPerSec: sdkmath.LegacyZeroDec(),
// expect ~20.23%
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.203023625910000000"),
},
}
for _, tc := range testCases {
suite.Run(tc.name, func() {
suite.SetupTest()
// set inflation
mk := suite.App.GetMintKeeper()
minter := mk.GetMinter(suite.Ctx)
minter.Inflation = tc.inflation
mk.SetMinter(suite.Ctx, minter)
// set community tax
communityTax := sdk.ZeroDec()
if !tc.communityTax.IsNil() {
communityTax = tc.communityTax
}
dk := suite.App.GetDistrKeeper()
distParams := dk.GetParams(suite.Ctx)
distParams.CommunityTax = communityTax
dk.SetParams(suite.Ctx, distParams)
// set bonded tokens
suite.adjustBondedRatio(tc.bondedRatio)
// query for annualized rewards
res, err := suite.queryClient.AnnualizedRewards(suite.Ctx, &types.QueryAnnualizedRewardsRequest{})
// verify results match expected
suite.Require().NoError(err)
suite.Equal(tc.expectedRate, res.StakingRewards)
})
}
}
// adjustBondRatio changes the ratio of bonded coins
// it leverages the fact that there is a constant number of bonded tokens
// and adjusts the total supply to make change the bonded ratio.
// returns the new total supply of the bond denom
func (suite *grpcQueryTestSuite) adjustBondedRatio(desiredRatio sdk.Dec) sdkmath.Int {
// from the InitGenesis validator
bondedTokens := sdkmath.NewInt(1e6)
bondDenom := suite.App.GetStakingKeeper().BondDenom(suite.Ctx)
// first, burn all non-delegated coins (bonded ratio = 100%)
suite.App.DeleteGenesisValidatorCoins(suite.T(), suite.Ctx)
if desiredRatio.Equal(sdk.OneDec()) {
return bondedTokens
}
// mint new tokens to adjust the bond ratio
newTotalSupply := sdk.NewDecFromInt(bondedTokens).Quo(desiredRatio).TruncateInt()
coinsToMint := newTotalSupply.Sub(bondedTokens)
err := suite.App.FundAccount(suite.Ctx, app.RandomAddress(), sdk.NewCoins(sdk.NewCoin(bondDenom, coinsToMint)))
suite.Require().NoError(err)
return newTotalSupply
}

View File

@ -16,12 +16,22 @@ type Keeper struct {
distrKeeper types.DistributionKeeper
hardKeeper types.HardKeeper
moduleAddress sdk.AccAddress
mintKeeper types.MintKeeper
stakingKeeper types.StakingKeeper
legacyCommunityPoolAddress sdk.AccAddress
}
// NewKeeper creates a new community Keeper instance
func NewKeeper(ak types.AccountKeeper, bk types.BankKeeper, ck types.CdpKeeper, dk types.DistributionKeeper, hk types.HardKeeper) Keeper {
func NewKeeper(
ak types.AccountKeeper,
bk types.BankKeeper,
ck types.CdpKeeper,
dk types.DistributionKeeper,
hk types.HardKeeper,
mk types.MintKeeper,
sk types.StakingKeeper,
) Keeper {
// ensure community module account is set
addr := ak.GetModuleAddress(types.ModuleAccountName)
if addr == nil {
@ -37,6 +47,8 @@ func NewKeeper(ak types.AccountKeeper, bk types.BankKeeper, ck types.CdpKeeper,
cdpKeeper: ck,
distrKeeper: dk,
hardKeeper: hk,
mintKeeper: mk,
stakingKeeper: sk,
moduleAddress: addr,
legacyCommunityPoolAddress: legacyAddr,

View File

@ -0,0 +1,27 @@
package keeper
import (
sdkmath "cosmossdk.io/math"
)
const SecondsPerYear = 365 * 24 * 3600
// CalculateStakingAnnualPercentage returns the annualized staking reward rate.
// It assumes that staking comes from one of two sources depending on if inflation is enabled or not.
func CalculateStakingAnnualPercentage(totalSupply, totalBonded sdkmath.Int, inflationRate, communityTax, rewardsPerSecond sdkmath.LegacyDec) sdkmath.LegacyDec {
// no rewards are given if no tokens are bonded, in addition avoid division by zero
if totalBonded.IsZero() {
return sdkmath.LegacyZeroDec()
}
// the percent of inflationRate * totalSupply tokens that are distributed to stakers
percentInflationDistributedToStakers := sdkmath.LegacyOneDec().Sub(communityTax)
// the total amount of tokens distributed to stakers in a year
amountGivenPerYear := inflationRate.
MulInt(totalSupply).Mul(percentInflationDistributedToStakers). // portion provided by inflation via mint & distribution modules
Add(rewardsPerSecond.Mul(sdkmath.LegacyNewDec(SecondsPerYear))) // portion provided by community module
// divide by total bonded tokens to get the percent return
return amountGivenPerYear.QuoInt(totalBonded)
}

View File

@ -0,0 +1,189 @@
package keeper_test
import (
"math/big"
"testing"
sdkmath "cosmossdk.io/math"
"github.com/stretchr/testify/require"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/community/keeper"
)
func TestStakingRewardsCalculator(t *testing.T) {
hugeInflation := new(big.Int).Exp(big.NewInt(2), big.NewInt(205), nil)
hugeRewardsPerSec := new(big.Int).Exp(big.NewInt(2), big.NewInt(230), nil)
testCases := []struct {
name string
totalSupply sdkmath.Int
totalBonded sdkmath.Int
inflation sdkmath.LegacyDec
communityTax sdkmath.LegacyDec
perSecReward sdkmath.LegacyDec
expectedRate sdkmath.LegacyDec
}{
{
name: "no inflation, no rewards per sec -> 0%",
totalSupply: sdkmath.ZeroInt(),
totalBonded: sdkmath.ZeroInt(),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
//
//
// inflation-only
//
//
{
name: "inflation only: no bonded tokens -> 0%",
totalSupply: sdk.NewInt(42),
totalBonded: sdkmath.ZeroInt(),
inflation: sdkmath.LegacyOneDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "inflation only: 0% inflation -> 0%",
totalSupply: sdk.NewInt(123),
totalBonded: sdkmath.NewInt(45),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "inflation only: 100% bonded w/ 100% inflation -> 100%",
totalSupply: sdk.NewInt(42),
totalBonded: sdk.NewInt(42),
inflation: sdkmath.LegacyOneDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyOneDec(),
},
{
name: "inflation only: 100% community tax -> 0%",
totalSupply: sdk.NewInt(123),
totalBonded: sdkmath.NewInt(45),
inflation: sdkmath.LegacyMustNewDecFromStr("0.853"),
communityTax: sdkmath.LegacyOneDec(),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "inflation only: Oct 2023 case",
totalSupply: sdk.NewInt(857570000e6),
totalBonded: sdk.NewInt(127680000e6),
inflation: sdkmath.LegacyMustNewDecFromStr("0.595"),
communityTax: sdkmath.LegacyMustNewDecFromStr("0.9495"),
perSecReward: sdkmath.LegacyZeroDec(),
// expect 20.18% staking reward
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.201815746984649122"), // verified manually
},
{
name: "inflation only: low inflation",
totalSupply: sdk.NewInt(857570000e6),
totalBonded: sdk.NewInt(127680000e6),
inflation: sdkmath.LegacyMustNewDecFromStr("0.0000000001"),
communityTax: sdkmath.LegacyMustNewDecFromStr("0.9495"),
perSecReward: sdkmath.LegacyZeroDec(),
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.000000000033918612"), // verified manually, rounded would be 0.000000000033918613
},
{
name: "inflation only: absurdly high inflation",
totalSupply: sdk.NewInt(857570000e6),
totalBonded: sdk.NewInt(127680000e6),
inflation: sdkmath.LegacyNewDecFromBigInt(hugeInflation), // 2^205. a higher exponent than this overflows.
communityTax: sdkmath.LegacyMustNewDecFromStr("0.9495"),
perSecReward: sdkmath.LegacyZeroDec(),
// https://www.wolframalpha.com/input?i=%282%5E205%29+*+%281+-+0.9495%29+*+%28857570000e6+%2F127680000e6%29
expectedRate: sdkmath.LegacyMustNewDecFromStr("17441635052648297161685283657196753398188161373334495592570113.113824561403508771"), // verified manually, would round up
},
//
//
// rewards-only
//
//
{
name: "rps only: no bonded tokens -> 0%",
totalSupply: sdk.NewInt(42),
totalBonded: sdkmath.ZeroInt(),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyMustNewDecFromStr("1234567.123456"),
expectedRate: sdkmath.LegacyZeroDec(),
},
{
name: "rps only: rps = total bonded / seconds in year -> basically 100%",
totalSupply: sdk.NewInt(12345),
totalBonded: sdkmath.NewInt(1234),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyNewDec(1234).Quo(sdkmath.LegacyNewDec(keeper.SecondsPerYear)),
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.999999999999987228"), // <-- for 6-decimal token, this is negligible rounding
},
{
name: "rps only: 10M kava / year rewards",
totalSupply: sdk.NewInt(870950000e6),
totalBonded: sdkmath.NewInt(130380000e6),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyMustNewDecFromStr("317097.919837645865043125"), // 10 million kava per year
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.076698880196349133"), // verified manually
},
{
name: "rps only: 25M kava / year rewards",
totalSupply: sdk.NewInt(870950000e6),
totalBonded: sdkmath.NewInt(130380000e6),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyMustNewDecFromStr("792744.799594114662607813"), // 25 million kava per year
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.191747200490872833"), // verified manually
},
{
name: "rps only: too much kava / year rewards",
totalSupply: sdk.NewInt(870950000e6),
totalBonded: sdkmath.NewInt(130380000e6),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyNewDecFromBigInt(hugeRewardsPerSec), // 2^230. a higher exponent than this overflows.
// https://www.wolframalpha.com/input?i=%28%28365+*+24+*+3600%29+%2F+130380000e6%29+*+%282%5E230%29
expectedRate: sdkmath.LegacyMustNewDecFromStr("417344440850566075319340506352140425426634017001007267992800590.431305795858260469"), // verified manually
},
{
name: "rps only: low kava / year rewards",
totalSupply: sdk.NewInt(870950000e6),
totalBonded: sdkmath.NewInt(130380000e6),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyMustNewDecFromStr("0.1"),
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.000000024187758858"), // verified manually, rounded would be 0.000000024187758859
},
{
name: "rps only: 1 ukava / year rewards",
totalSupply: sdk.NewInt(870950000e6),
totalBonded: sdkmath.NewInt(130380000e6),
inflation: sdkmath.LegacyZeroDec(),
communityTax: sdkmath.LegacyZeroDec(),
perSecReward: sdkmath.LegacyMustNewDecFromStr("0.000000031709791984"), // 1 ukava per year
expectedRate: sdkmath.LegacyMustNewDecFromStr("0.000000000000007669"), // verified manually, rounded would be 0.000000000000007670
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
rewardRate := keeper.CalculateStakingAnnualPercentage(
tc.totalSupply,
tc.totalBonded,
tc.inflation,
tc.communityTax,
tc.perSecReward)
require.Equal(t, tc.expectedRate, rewardRate)
})
}
}

View File

@ -28,9 +28,8 @@ func (suite *Suite) SetupTest() {
tApp := app.NewTestApp()
ctx := tApp.NewContext(true, tmproto.Header{Height: 1, Time: tmtime.Now()})
tApp.InitializeFromGenesisStates()
suite.App = tApp.InitializeFromGenesisStates()
suite.App = tApp
suite.Ctx = ctx
suite.Keeper = tApp.GetCommunityKeeper()
communityPoolAddress := tApp.GetAccountKeeper().GetModuleAddress(types.ModuleAccountName)

View File

@ -1,8 +1,10 @@
package types
import (
sdkmath "cosmossdk.io/math"
sdk "github.com/cosmos/cosmos-sdk/types"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
)
// AccountKeeper defines the contract required for account APIs.
@ -17,6 +19,8 @@ type BankKeeper interface {
SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error
SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error
GetSupply(ctx sdk.Context, denom string) sdk.Coin
}
// CdpKeeper defines the contract needed to be fulfilled for cdp dependencies.
@ -36,4 +40,15 @@ type DistributionKeeper interface {
DistributeFromFeePool(ctx sdk.Context, amount sdk.Coins, receiveAddr sdk.AccAddress) error
FundCommunityPool(ctx sdk.Context, amount sdk.Coins, sender sdk.AccAddress) error
GetFeePoolCommunityCoins(ctx sdk.Context) sdk.DecCoins
GetCommunityTax(ctx sdk.Context) sdk.Dec
}
type MintKeeper interface {
GetMinter(ctx sdk.Context) (minter minttypes.Minter)
}
// StakingKeeper expected interface for the staking keeper
type StakingKeeper interface {
BondDenom(ctx sdk.Context) string
TotalBondedTokens(ctx sdk.Context) sdkmath.Int
}

View File

@ -5,7 +5,9 @@ package types
import (
context "context"
cosmossdk_io_math "cosmossdk.io/math"
fmt "fmt"
_ "github.com/cosmos/cosmos-proto"
github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types"
types "github.com/cosmos/cosmos-sdk/types"
_ "github.com/gogo/protobuf/gogoproto"
@ -197,11 +199,89 @@ func (m *QueryTotalBalanceResponse) GetPool() github_com_cosmos_cosmos_sdk_types
return nil
}
// QueryAnnualizedRewardsRequest defines the request type for querying the annualized rewards.
type QueryAnnualizedRewardsRequest struct {
}
func (m *QueryAnnualizedRewardsRequest) Reset() { *m = QueryAnnualizedRewardsRequest{} }
func (m *QueryAnnualizedRewardsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryAnnualizedRewardsRequest) ProtoMessage() {}
func (*QueryAnnualizedRewardsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_f236f06c43149273, []int{4}
}
func (m *QueryAnnualizedRewardsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryAnnualizedRewardsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryAnnualizedRewardsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryAnnualizedRewardsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryAnnualizedRewardsRequest.Merge(m, src)
}
func (m *QueryAnnualizedRewardsRequest) XXX_Size() int {
return m.Size()
}
func (m *QueryAnnualizedRewardsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_QueryAnnualizedRewardsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_QueryAnnualizedRewardsRequest proto.InternalMessageInfo
// QueryAnnualizedRewardsResponse defines the response type for querying the annualized rewards.
type QueryAnnualizedRewardsResponse struct {
// staking_rewards is the calculated annualized staking rewards percentage rate
StakingRewards cosmossdk_io_math.LegacyDec `protobuf:"bytes,1,opt,name=staking_rewards,json=stakingRewards,proto3,customtype=cosmossdk.io/math.LegacyDec" json:"staking_rewards"`
}
func (m *QueryAnnualizedRewardsResponse) Reset() { *m = QueryAnnualizedRewardsResponse{} }
func (m *QueryAnnualizedRewardsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryAnnualizedRewardsResponse) ProtoMessage() {}
func (*QueryAnnualizedRewardsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_f236f06c43149273, []int{5}
}
func (m *QueryAnnualizedRewardsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryAnnualizedRewardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryAnnualizedRewardsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryAnnualizedRewardsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryAnnualizedRewardsResponse.Merge(m, src)
}
func (m *QueryAnnualizedRewardsResponse) XXX_Size() int {
return m.Size()
}
func (m *QueryAnnualizedRewardsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_QueryAnnualizedRewardsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_QueryAnnualizedRewardsResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*QueryBalanceRequest)(nil), "kava.community.v1beta1.QueryBalanceRequest")
proto.RegisterType((*QueryBalanceResponse)(nil), "kava.community.v1beta1.QueryBalanceResponse")
proto.RegisterType((*QueryTotalBalanceRequest)(nil), "kava.community.v1beta1.QueryTotalBalanceRequest")
proto.RegisterType((*QueryTotalBalanceResponse)(nil), "kava.community.v1beta1.QueryTotalBalanceResponse")
proto.RegisterType((*QueryAnnualizedRewardsRequest)(nil), "kava.community.v1beta1.QueryAnnualizedRewardsRequest")
proto.RegisterType((*QueryAnnualizedRewardsResponse)(nil), "kava.community.v1beta1.QueryAnnualizedRewardsResponse")
}
func init() {
@ -209,33 +289,41 @@ func init() {
}
var fileDescriptor_f236f06c43149273 = []byte{
// 411 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x4f, 0x8e, 0xd3, 0x30,
0x14, 0xc6, 0x93, 0x42, 0x41, 0x32, 0xac, 0x42, 0x41, 0x6d, 0x54, 0xa5, 0x10, 0x09, 0xb5, 0x52,
0xa9, 0xdd, 0x3f, 0x37, 0x28, 0x70, 0x00, 0x2a, 0x56, 0x6c, 0x90, 0x13, 0xac, 0x10, 0x35, 0xf5,
0x4b, 0x6b, 0xa7, 0x22, 0xdb, 0xee, 0x91, 0x90, 0xb8, 0x01, 0x4b, 0xce, 0xc0, 0x01, 0xba, 0xac,
0xc4, 0x66, 0x56, 0x33, 0xa3, 0x76, 0x0e, 0x32, 0xb2, 0xe3, 0xa9, 0x3a, 0xa3, 0x74, 0xd4, 0x59,
0xd9, 0xb2, 0xdf, 0xf7, 0x7d, 0x3f, 0xbf, 0x67, 0xe4, 0x4f, 0xe9, 0x92, 0x92, 0x10, 0x66, 0xb3,
0x8c, 0xc7, 0x32, 0x27, 0xcb, 0x41, 0xc0, 0x24, 0x1d, 0x90, 0x79, 0xc6, 0x16, 0x39, 0x4e, 0x17,
0x20, 0xc1, 0x79, 0xa5, 0x6a, 0xf0, 0xbe, 0x06, 0x9b, 0x1a, 0xd7, 0x0b, 0x41, 0xcc, 0x40, 0x90,
0x80, 0x0a, 0xb6, 0x17, 0x86, 0x10, 0xf3, 0x42, 0xe7, 0xd6, 0x22, 0x88, 0x40, 0x6f, 0x89, 0xda,
0x99, 0xd3, 0x66, 0x04, 0x10, 0x25, 0x8c, 0xd0, 0x34, 0x26, 0x94, 0x73, 0x90, 0x54, 0xc6, 0xc0,
0x45, 0x71, 0xeb, 0xbf, 0x44, 0x2f, 0x3e, 0xa9, 0xe8, 0x31, 0x4d, 0x28, 0x0f, 0xd9, 0x84, 0xcd,
0x33, 0x26, 0xa4, 0x9f, 0xa3, 0xda, 0xed, 0x63, 0x91, 0x02, 0x17, 0xcc, 0xa1, 0xa8, 0xaa, 0x02,
0x45, 0xdd, 0x7e, 0xfd, 0xa8, 0xf3, 0x6c, 0xd8, 0xc0, 0x05, 0x12, 0x56, 0x48, 0x37, 0x9c, 0xf8,
0x3d, 0xc4, 0x7c, 0xdc, 0x5f, 0x9f, 0xb7, 0xac, 0xbf, 0x17, 0xad, 0x4e, 0x14, 0xcb, 0xef, 0x59,
0xa0, 0x9e, 0x43, 0x0c, 0x7f, 0xb1, 0xf4, 0xc4, 0xb7, 0x29, 0x91, 0x79, 0xca, 0x84, 0x16, 0x88,
0x49, 0xe1, 0xec, 0xbb, 0xa8, 0xae, 0xa3, 0x3f, 0x83, 0xa4, 0xc9, 0x1d, 0xac, 0x95, 0x8d, 0x1a,
0x25, 0x97, 0x06, 0x8e, 0xa1, 0xc7, 0x29, 0x40, 0x62, 0xd8, 0x9a, 0xa5, 0x6c, 0x1f, 0x58, 0xa8,
0xf1, 0x46, 0x06, 0xaf, 0x7b, 0x02, 0x9e, 0xd1, 0x88, 0x89, 0xb6, 0x1f, 0xfe, 0xab, 0xa0, 0xaa,
0x86, 0x70, 0x7e, 0xda, 0xe8, 0xa9, 0x81, 0x70, 0xba, 0xb8, 0x7c, 0x6a, 0xb8, 0xa4, 0xbd, 0xee,
0xbb, 0xd3, 0x8a, 0x8b, 0x77, 0xf9, 0xed, 0xd5, 0xff, 0xab, 0xdf, 0x95, 0x37, 0x4e, 0x8b, 0x1c,
0xf9, 0x3c, 0x81, 0x61, 0xf8, 0x63, 0xa3, 0xe7, 0x87, 0x9d, 0x71, 0xfa, 0xf7, 0xe6, 0x94, 0x74,
0xd8, 0x1d, 0x3c, 0x40, 0x61, 0xf0, 0x7a, 0x1a, 0xaf, 0xed, 0xbc, 0x3d, 0x86, 0x27, 0x95, 0xea,
0xab, 0x81, 0x1c, 0x7f, 0x5c, 0x6f, 0x3d, 0x7b, 0xb3, 0xf5, 0xec, 0xcb, 0xad, 0x67, 0xff, 0xda,
0x79, 0xd6, 0x66, 0xe7, 0x59, 0x67, 0x3b, 0xcf, 0xfa, 0x72, 0x38, 0x0b, 0x65, 0xd5, 0x4b, 0x68,
0x20, 0x0a, 0xd3, 0x1f, 0x07, 0xb6, 0x7a, 0x28, 0xc1, 0x13, 0xfd, 0x7f, 0x47, 0xd7, 0x01, 0x00,
0x00, 0xff, 0xff, 0x67, 0x9d, 0xae, 0xd2, 0x51, 0x03, 0x00, 0x00,
// 537 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x31, 0x6f, 0xd3, 0x40,
0x14, 0xc7, 0x73, 0x94, 0x82, 0x38, 0x10, 0x08, 0x53, 0x50, 0x62, 0x8a, 0x5d, 0x2c, 0xa1, 0x46,
0xb4, 0xf1, 0x35, 0xa9, 0x60, 0x27, 0x84, 0x8d, 0x05, 0x8b, 0xa9, 0x4b, 0x74, 0x76, 0x4e, 0xae,
0x15, 0xe7, 0xce, 0xcd, 0x9d, 0x0b, 0x46, 0x4c, 0xdd, 0x91, 0x90, 0xf8, 0x06, 0x8c, 0xcc, 0x88,
0xcf, 0xd0, 0xb1, 0x82, 0x05, 0x31, 0x14, 0x94, 0xf0, 0x15, 0xd8, 0x91, 0xcf, 0x2f, 0x51, 0xa0,
0x76, 0xd5, 0x4e, 0xf6, 0xdd, 0x7b, 0xff, 0xf7, 0x7e, 0xcf, 0xef, 0x2f, 0x63, 0x67, 0x48, 0xf7,
0x29, 0x09, 0xc4, 0x68, 0x94, 0xf2, 0x48, 0x65, 0x64, 0xbf, 0xed, 0x33, 0x45, 0xdb, 0x64, 0x2f,
0x65, 0xe3, 0xcc, 0x4d, 0xc6, 0x42, 0x09, 0xe3, 0x4e, 0x9e, 0xe3, 0xce, 0x73, 0x5c, 0xc8, 0x31,
0xad, 0x40, 0xc8, 0x91, 0x90, 0xc4, 0xa7, 0x92, 0xcd, 0x85, 0x81, 0x88, 0x78, 0xa1, 0x33, 0x1b,
0x45, 0xbc, 0xaf, 0x4f, 0xa4, 0x38, 0x40, 0x68, 0x25, 0x14, 0xa1, 0x28, 0xee, 0xf3, 0x37, 0xb8,
0x5d, 0x0d, 0x85, 0x08, 0x63, 0x46, 0x68, 0x12, 0x11, 0xca, 0xb9, 0x50, 0x54, 0x45, 0x82, 0x83,
0xc6, 0xb9, 0x8d, 0x6f, 0xbd, 0xc8, 0xa9, 0xba, 0x34, 0xa6, 0x3c, 0x60, 0x1e, 0xdb, 0x4b, 0x99,
0x54, 0x4e, 0x86, 0x57, 0xfe, 0xbd, 0x96, 0x89, 0xe0, 0x92, 0x19, 0x14, 0x2f, 0xe7, 0x2c, 0xb2,
0x8e, 0xd6, 0x96, 0x9a, 0x57, 0x3b, 0x0d, 0x17, 0x00, 0x72, 0xda, 0xd9, 0x08, 0xee, 0x53, 0x11,
0xf1, 0xee, 0xd6, 0xe1, 0xb1, 0x5d, 0xfb, 0xf4, 0xd3, 0x6e, 0x86, 0x91, 0xda, 0x4d, 0xfd, 0x7c,
0x52, 0xa0, 0x85, 0x47, 0x4b, 0x0e, 0x86, 0x44, 0x65, 0x09, 0x93, 0x5a, 0x20, 0xbd, 0xa2, 0xb2,
0x63, 0xe2, 0xba, 0x6e, 0xfd, 0x52, 0x28, 0x1a, 0xff, 0x87, 0x75, 0x80, 0x70, 0xa3, 0x24, 0x08,
0x70, 0x0c, 0x5f, 0x4c, 0x84, 0x88, 0x81, 0x6d, 0xb5, 0x94, 0xad, 0xc7, 0x02, 0x8d, 0xb7, 0x0d,
0x78, 0x1b, 0x67, 0xc0, 0x03, 0x8d, 0xf4, 0x74, 0x79, 0xc7, 0xc6, 0xf7, 0x34, 0xc3, 0x13, 0xce,
0x53, 0x1a, 0x47, 0x6f, 0xd8, 0xc0, 0x63, 0xaf, 0xe8, 0x78, 0x20, 0x67, 0x94, 0x6f, 0xb1, 0x55,
0x95, 0x00, 0xa4, 0x3b, 0xf8, 0x86, 0x54, 0x74, 0x18, 0xf1, 0xb0, 0x3f, 0x2e, 0x42, 0x75, 0xb4,
0x86, 0x9a, 0x57, 0xba, 0xed, 0x1c, 0xeb, 0xc7, 0xb1, 0x7d, 0xb7, 0x80, 0x90, 0x83, 0xa1, 0x1b,
0x09, 0x32, 0xa2, 0x6a, 0xd7, 0x7d, 0xce, 0x42, 0x1a, 0x64, 0x3d, 0x16, 0x7c, 0xfd, 0xdc, 0xc2,
0x30, 0x5a, 0x8f, 0x05, 0xde, 0x75, 0xa8, 0x04, 0x3d, 0x3a, 0x7f, 0x96, 0xf0, 0xb2, 0x6e, 0x6f,
0xbc, 0x43, 0xf8, 0x32, 0x7c, 0x23, 0x63, 0xc3, 0x2d, 0xf7, 0x9b, 0x5b, 0xb2, 0x7d, 0x73, 0xf3,
0x6c, 0xc9, 0xc5, 0x30, 0xce, 0xfa, 0xc1, 0xb7, 0xdf, 0x1f, 0x2e, 0xdc, 0x37, 0x6c, 0x52, 0x61,
0x7b, 0x1f, 0x18, 0x3e, 0x22, 0x7c, 0x6d, 0x71, 0x71, 0xc6, 0xd6, 0xa9, 0x7d, 0x4a, 0x0c, 0x60,
0xb6, 0xcf, 0xa1, 0x00, 0xbc, 0x96, 0xc6, 0x5b, 0x37, 0x1e, 0x54, 0xe1, 0xa9, 0x5c, 0xd5, 0x9f,
0x41, 0x7e, 0x41, 0xf8, 0xe6, 0x89, 0xc5, 0x19, 0x8f, 0x4e, 0xed, 0x5b, 0xe5, 0x04, 0xf3, 0xf1,
0x79, 0x65, 0xc0, 0xdc, 0xd1, 0xcc, 0x9b, 0xc6, 0xc3, 0x2a, 0x66, 0x3a, 0x97, 0xce, 0x0c, 0xd4,
0x7d, 0x76, 0x38, 0xb1, 0xd0, 0xd1, 0xc4, 0x42, 0xbf, 0x26, 0x16, 0x7a, 0x3f, 0xb5, 0x6a, 0x47,
0x53, 0xab, 0xf6, 0x7d, 0x6a, 0xd5, 0x76, 0x16, 0x3d, 0x9e, 0xd7, 0x6b, 0xc5, 0xd4, 0x97, 0x45,
0xe5, 0xd7, 0x0b, 0xb5, 0xb5, 0xd9, 0xfd, 0x4b, 0xfa, 0xbf, 0xb0, 0xfd, 0x37, 0x00, 0x00, 0xff,
0xff, 0x67, 0x42, 0x2a, 0xb9, 0xc4, 0x04, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -255,6 +343,9 @@ type QueryClient interface {
// TotalBalance queries the balance of all coins, including x/distribution,
// x/community, and supplied balances.
TotalBalance(ctx context.Context, in *QueryTotalBalanceRequest, opts ...grpc.CallOption) (*QueryTotalBalanceResponse, error)
// AnnualizedRewards calculates and returns the current annualized reward percentages,
// like staking rewards, for the chain.
AnnualizedRewards(ctx context.Context, in *QueryAnnualizedRewardsRequest, opts ...grpc.CallOption) (*QueryAnnualizedRewardsResponse, error)
}
type queryClient struct {
@ -283,6 +374,15 @@ func (c *queryClient) TotalBalance(ctx context.Context, in *QueryTotalBalanceReq
return out, nil
}
func (c *queryClient) AnnualizedRewards(ctx context.Context, in *QueryAnnualizedRewardsRequest, opts ...grpc.CallOption) (*QueryAnnualizedRewardsResponse, error) {
out := new(QueryAnnualizedRewardsResponse)
err := c.cc.Invoke(ctx, "/kava.community.v1beta1.Query/AnnualizedRewards", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// QueryServer is the server API for Query service.
type QueryServer interface {
// Balance queries the balance of all coins of x/community module.
@ -290,6 +390,9 @@ type QueryServer interface {
// TotalBalance queries the balance of all coins, including x/distribution,
// x/community, and supplied balances.
TotalBalance(context.Context, *QueryTotalBalanceRequest) (*QueryTotalBalanceResponse, error)
// AnnualizedRewards calculates and returns the current annualized reward percentages,
// like staking rewards, for the chain.
AnnualizedRewards(context.Context, *QueryAnnualizedRewardsRequest) (*QueryAnnualizedRewardsResponse, error)
}
// UnimplementedQueryServer can be embedded to have forward compatible implementations.
@ -302,6 +405,9 @@ func (*UnimplementedQueryServer) Balance(ctx context.Context, req *QueryBalanceR
func (*UnimplementedQueryServer) TotalBalance(ctx context.Context, req *QueryTotalBalanceRequest) (*QueryTotalBalanceResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TotalBalance not implemented")
}
func (*UnimplementedQueryServer) AnnualizedRewards(ctx context.Context, req *QueryAnnualizedRewardsRequest) (*QueryAnnualizedRewardsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AnnualizedRewards not implemented")
}
func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
s.RegisterService(&_Query_serviceDesc, srv)
@ -343,6 +449,24 @@ func _Query_TotalBalance_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
func _Query_AnnualizedRewards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryAnnualizedRewardsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).AnnualizedRewards(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/kava.community.v1beta1.Query/AnnualizedRewards",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).AnnualizedRewards(ctx, req.(*QueryAnnualizedRewardsRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Query_serviceDesc = grpc.ServiceDesc{
ServiceName: "kava.community.v1beta1.Query",
HandlerType: (*QueryServer)(nil),
@ -355,6 +479,10 @@ var _Query_serviceDesc = grpc.ServiceDesc{
MethodName: "TotalBalance",
Handler: _Query_TotalBalance_Handler,
},
{
MethodName: "AnnualizedRewards",
Handler: _Query_AnnualizedRewards_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "kava/community/v1beta1/query.proto",
@ -480,6 +608,62 @@ func (m *QueryTotalBalanceResponse) MarshalToSizedBuffer(dAtA []byte) (int, erro
return len(dAtA) - i, nil
}
func (m *QueryAnnualizedRewardsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryAnnualizedRewardsRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryAnnualizedRewardsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
return len(dAtA) - i, nil
}
func (m *QueryAnnualizedRewardsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryAnnualizedRewardsResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryAnnualizedRewardsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size := m.StakingRewards.Size()
i -= size
if _, err := m.StakingRewards.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintQuery(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
offset -= sovQuery(v)
base := offset
@ -539,6 +723,26 @@ func (m *QueryTotalBalanceResponse) Size() (n int) {
return n
}
func (m *QueryAnnualizedRewardsRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
return n
}
func (m *QueryAnnualizedRewardsResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.StakingRewards.Size()
n += 1 + l + sovQuery(uint64(l))
return n
}
func sovQuery(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -813,6 +1017,140 @@ func (m *QueryTotalBalanceResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *QueryAnnualizedRewardsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryAnnualizedRewardsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryAnnualizedRewardsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *QueryAnnualizedRewardsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryAnnualizedRewardsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryAnnualizedRewardsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StakingRewards", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthQuery
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthQuery
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.StakingRewards.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipQuery(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View File

@ -69,6 +69,24 @@ func local_request_Query_TotalBalance_0(ctx context.Context, marshaler runtime.M
}
func request_Query_AnnualizedRewards_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryAnnualizedRewardsRequest
var metadata runtime.ServerMetadata
msg, err := client.AnnualizedRewards(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Query_AnnualizedRewards_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryAnnualizedRewardsRequest
var metadata runtime.ServerMetadata
msg, err := server.AnnualizedRewards(ctx, &protoReq)
return msg, metadata, err
}
// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
// UnaryRPC :call QueryServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@ -121,6 +139,29 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
})
mux.Handle("GET", pattern_Query_AnnualizedRewards_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Query_AnnualizedRewards_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_AnnualizedRewards_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@ -202,6 +243,26 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
})
mux.Handle("GET", pattern_Query_AnnualizedRewards_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Query_AnnualizedRewards_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_AnnualizedRewards_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@ -209,10 +270,14 @@ var (
pattern_Query_Balance_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kava", "community", "v1beta1", "balance"}, "", runtime.AssumeColonVerbOpt(false)))
pattern_Query_TotalBalance_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kava", "community", "v1beta1", "total_balance"}, "", runtime.AssumeColonVerbOpt(false)))
pattern_Query_AnnualizedRewards_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kava", "community", "v1beta1", "annualized_rewards"}, "", runtime.AssumeColonVerbOpt(false)))
)
var (
forward_Query_Balance_0 = runtime.ForwardResponseMessage
forward_Query_TotalBalance_0 = runtime.ForwardResponseMessage
forward_Query_AnnualizedRewards_0 = runtime.ForwardResponseMessage
)

View File

@ -1,11 +1,17 @@
package hard
import (
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/hard/keeper"
"github.com/kava-labs/kava/x/hard/types"
)
// BeginBlocker updates interest rates
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
k.ApplyInterestRateUpdates(ctx)
}

View File

@ -2,14 +2,19 @@ package incentive
import (
"fmt"
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/incentive/keeper"
"github.com/kava-labs/kava/x/incentive/types"
)
// BeginBlocker runs at the start of every block
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
params := k.GetParams(ctx)
for _, rp := range params.USDXMintingRewardPeriods {

View File

@ -1,12 +1,18 @@
package issuance
import (
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/issuance/keeper"
"github.com/kava-labs/kava/x/issuance/types"
)
// BeginBlocker iterates over each asset and seizes coins from blocked addresses by returning them to the asset owner
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
err := k.SeizeCoinsForBlockableAssets(ctx)
if err != nil {
panic(err)

View File

@ -1,12 +1,18 @@
package kavadist
import (
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/kavadist/keeper"
"github.com/kava-labs/kava/x/kavadist/types"
)
func BeginBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker)
err := k.MintPeriodInflation(ctx)
if err != nil {
panic(err)

12
x/metrics/abci.go Normal file
View File

@ -0,0 +1,12 @@
package metrics
import (
"github.com/kava-labs/kava/x/metrics/types"
sdk "github.com/cosmos/cosmos-sdk/types"
)
// BeginBlocker publishes metrics at the start of each block.
func BeginBlocker(ctx sdk.Context, metrics *types.Metrics) {
metrics.LatestBlockHeight.Set(float64(ctx.BlockHeight()))
}

45
x/metrics/abci_test.go Normal file
View File

@ -0,0 +1,45 @@
package metrics_test
import (
"testing"
kitmetrics "github.com/go-kit/kit/metrics"
"github.com/stretchr/testify/require"
sdk "github.com/cosmos/cosmos-sdk/types"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/x/metrics"
"github.com/kava-labs/kava/x/metrics/types"
)
type MockGauge struct {
value float64
}
func (mg *MockGauge) With(labelValues ...string) kitmetrics.Gauge { return mg }
func (mg *MockGauge) Set(value float64) { mg.value = value }
func (*MockGauge) Add(_ float64) {}
func ctxWithHeight(height int64) sdk.Context {
tApp := app.NewTestApp()
tApp.InitializeFromGenesisStates()
return tApp.NewContext(false, tmproto.Header{Height: height})
}
func TestBeginBlockEmitsLatestHeight(t *testing.T) {
gauge := MockGauge{}
myMetrics := &types.Metrics{
LatestBlockHeight: &gauge,
}
metrics.BeginBlocker(ctxWithHeight(1), myMetrics)
require.EqualValues(t, 1, gauge.value)
metrics.BeginBlocker(ctxWithHeight(100), myMetrics)
require.EqualValues(t, 100, gauge.value)
metrics.BeginBlocker(ctxWithHeight(17e6), myMetrics)
require.EqualValues(t, 17e6, gauge.value)
}

125
x/metrics/module.go Normal file
View File

@ -0,0 +1,125 @@
package metrics
import (
"encoding/json"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/spf13/cobra"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/module"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/kava-labs/kava/x/metrics/types"
)
var (
_ module.AppModule = AppModule{}
_ module.AppModuleBasic = AppModuleBasic{}
)
// AppModuleBasic app module basics object
type AppModuleBasic struct{}
// Name returns the module name
func (AppModuleBasic) Name() string {
return types.ModuleName
}
// RegisterLegacyAminoCodec register module codec
// Deprecated: unused but necessary to fulfill AppModuleBasic interface
func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {}
// DefaultGenesis default genesis state
func (AppModuleBasic) DefaultGenesis(_ codec.JSONCodec) json.RawMessage {
return []byte("{}")
}
// ValidateGenesis module validate genesis
func (AppModuleBasic) ValidateGenesis(_ codec.JSONCodec, _ client.TxEncodingConfig, _ json.RawMessage) error {
return nil
}
// RegisterInterfaces implements InterfaceModule.RegisterInterfaces
func (a AppModuleBasic) RegisterInterfaces(registry codectypes.InterfaceRegistry) {}
// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module.
func (a AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) {}
// GetTxCmd returns the root tx command for the module.
func (AppModuleBasic) GetTxCmd() *cobra.Command {
return nil
}
// GetQueryCmd returns no root query command for the module.
func (AppModuleBasic) GetQueryCmd() *cobra.Command {
return nil
}
//____________________________________________________________________________
// AppModule app module type
type AppModule struct {
AppModuleBasic
metrics *types.Metrics
}
// NewAppModule creates a new AppModule object
func NewAppModule(telemetryOpts types.TelemetryOptions) AppModule {
return AppModule{
AppModuleBasic: AppModuleBasic{},
metrics: types.NewMetrics(telemetryOpts),
}
}
// Name module name
func (am AppModule) Name() string {
return am.AppModuleBasic.Name()
}
// RegisterInvariants register module invariants
func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {}
// Route module message route name
// Deprecated: unused but necessary to fulfill AppModule interface
func (am AppModule) Route() sdk.Route { return sdk.Route{} }
// QuerierRoute module querier route name
// Deprecated: unused but necessary to fulfill AppModule interface
func (AppModule) QuerierRoute() string { return types.ModuleName }
// LegacyQuerierHandler returns no sdk.Querier.
// Deprecated: unused but necessary to fulfill AppModule interface
func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier {
return nil
}
// ConsensusVersion implements AppModule/ConsensusVersion.
func (AppModule) ConsensusVersion() uint64 { return 1 }
// RegisterServices registers module services.
func (am AppModule) RegisterServices(cfg module.Configurator) {}
// InitGenesis module init-genesis
func (am AppModule) InitGenesis(ctx sdk.Context, _ codec.JSONCodec, _ json.RawMessage) []abci.ValidatorUpdate {
return []abci.ValidatorUpdate{}
}
// ExportGenesis module export genesis
func (am AppModule) ExportGenesis(_ sdk.Context, cdc codec.JSONCodec) json.RawMessage {
return nil
}
// BeginBlock module begin-block
func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) {
BeginBlocker(ctx, am.metrics)
}
// EndBlock module end-block
func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate {
return []abci.ValidatorUpdate{}
}

36
x/metrics/spec/README.md Normal file
View File

@ -0,0 +1,36 @@
<!--
order: 0
title: "Metrics Overview"
parent:
title: "metrics"
-->
# `metrics`
## Abstract
`x/metrics` is a stateless module that does not affect consensus. It captures chain metrics and emits them when the `instrumentation.prometheus` option is enabled in `config.toml`.
## Precision
The metrics emitted by `x/metrics` are `float64`s. They use `github.com/go-kit/kit/metrics` Prometheus gauges. Cosmos-sdk's `telemetry` package was not used because, at the time of writing, it only supports `float32`s and so does not maintain accurate representations of ints larger than ~16.8M. With `float64`s, integers may be accurately represented up to ~9e15.
## Metrics
The following metrics are defined:
* `cometbft_blocksync_latest_block_height` - this emulates the blocksync `latest_block_height` metric in CometBFT v0.38+. The `cometbft` namespace comes from the `instrumentation.namespace` config.toml value.
## Metric Labels
All metrics emitted have the labels defined in app.toml's `telemetry.global-labels` field. This is the same field used by cosmos-sdk's `telemetry` package.
example:
```toml
# app.toml
[telemetry]
global-labels = [
["chain_id", "kava_2222-10"],
["my_label", "my_value"],
]
```

6
x/metrics/types/keys.go Normal file
View File

@ -0,0 +1,6 @@
package types
const (
// Name of the module
ModuleName = "metrics"
)

View File

@ -0,0 +1,89 @@
package types
import (
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/discard"
prometheus "github.com/go-kit/kit/metrics/prometheus"
stdprometheus "github.com/prometheus/client_golang/prometheus"
"github.com/spf13/cast"
servertypes "github.com/cosmos/cosmos-sdk/server/types"
)
// TelemetryOptions defines the app configurations for the x/metrics module
type TelemetryOptions struct {
// CometBFT config value for instrumentation.prometheus (config.toml)
PrometheusEnabled bool
// Namespace for CometBFT metrics. Used to emulate CometBFT metrics.
CometBFTMetricsNamespace string
// A list of keys and values used as labels on all metrics
GlobalLabelsAndValues []string
}
// TelemetryOptionsFromAppOpts creates the TelemetryOptions from server AppOptions
func TelemetryOptionsFromAppOpts(appOpts servertypes.AppOptions) TelemetryOptions {
prometheusEnabled := cast.ToBool(appOpts.Get("instrumentation.prometheus"))
if !prometheusEnabled {
return TelemetryOptions{
GlobalLabelsAndValues: []string{},
}
}
// parse the app.toml global-labels into a slice of alternating label & value strings
// the value is expected to be a list of [label, value] tuples.
rawLabels := cast.ToSlice(appOpts.Get("telemetry.global-labels"))
globalLabelsAndValues := make([]string, 0, len(rawLabels)*2)
for _, gl := range rawLabels {
l := cast.ToStringSlice(gl)
globalLabelsAndValues = append(globalLabelsAndValues, l[0], l[1])
}
return TelemetryOptions{
PrometheusEnabled: true,
CometBFTMetricsNamespace: cast.ToString(appOpts.Get("instrumentation.namespace")),
GlobalLabelsAndValues: globalLabelsAndValues,
}
}
// Metrics contains metrics exposed by this module.
// They use go-kit metrics like CometBFT as opposed to using cosmos-sdk telemetry
// because the sdk's telemetry only supports float32s, whereas go-kit prometheus
// metrics correctly handle float64s (and thus a larger number of int64s)
type Metrics struct {
// The height of the latest block.
// This gauges exactly emulates the default blocksync metric in CometBFT v0.38+
// It should be removed when kava has been updated to CometBFT v0.38+.
// see https://github.com/cometbft/cometbft/blob/v0.38.0-rc3/blocksync/metrics.gen.go
LatestBlockHeight metrics.Gauge
}
// NewMetrics creates a new Metrics object based on whether or not prometheus instrumentation is enabled.
func NewMetrics(opts TelemetryOptions) *Metrics {
if opts.PrometheusEnabled {
return PrometheusMetrics(opts)
}
return NoopMetrics()
}
// PrometheusMetrics returns the gauges for when prometheus instrumentation is enabled.
func PrometheusMetrics(opts TelemetryOptions) *Metrics {
labels := []string{}
for i := 0; i < len(opts.GlobalLabelsAndValues); i += 2 {
labels = append(labels, opts.GlobalLabelsAndValues[i])
}
return &Metrics{
LatestBlockHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
Namespace: opts.CometBFTMetricsNamespace,
Subsystem: "blocksync",
Name: "latest_block_height",
Help: "The height of the latest block.",
}, labels).With(opts.GlobalLabelsAndValues...),
}
}
// NoopMetrics are a do-nothing placeholder used when prometheus instrumentation is not enabled.
func NoopMetrics() *Metrics {
return &Metrics{
LatestBlockHeight: discard.NewGauge(),
}
}

View File

@ -0,0 +1,72 @@
package types_test
import (
"testing"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/prometheus"
"github.com/kava-labs/kava/x/metrics/types"
"github.com/stretchr/testify/require"
)
func isPrometheusGauge(g metrics.Gauge) bool {
_, ok := g.(*prometheus.Gauge)
return ok
}
var (
disabledOpts = types.TelemetryOptions{
PrometheusEnabled: false,
}
enabledOpts = types.TelemetryOptions{
PrometheusEnabled: true,
CometBFTMetricsNamespace: "cometbft",
GlobalLabelsAndValues: []string{"label1", "value1", "label2", "value2"},
}
)
func TestNewMetrics_DisabledVsEnabled(t *testing.T) {
myMetrics := types.NewMetrics(disabledOpts)
require.False(t, isPrometheusGauge(myMetrics.LatestBlockHeight))
myMetrics = types.NewMetrics(enabledOpts)
require.True(t, isPrometheusGauge(myMetrics.LatestBlockHeight))
}
type MockAppOpts struct {
store map[string]interface{}
}
func (mao *MockAppOpts) Get(key string) interface{} {
return mao.store[key]
}
func TestTelemetryOptionsFromAppOpts(t *testing.T) {
appOpts := MockAppOpts{store: make(map[string]interface{})}
// test disabled functionality
appOpts.store["instrumentation.prometheus"] = false
opts := types.TelemetryOptionsFromAppOpts(&appOpts)
require.False(t, opts.PrometheusEnabled)
// test enabled functionality
appOpts.store["instrumentation.prometheus"] = true
appOpts.store["instrumentation.namespace"] = "magic"
appOpts.store["telemetry.global-labels"] = []interface{}{}
opts = types.TelemetryOptionsFromAppOpts(&appOpts)
require.True(t, opts.PrometheusEnabled)
require.Equal(t, "magic", opts.CometBFTMetricsNamespace)
require.Len(t, opts.GlobalLabelsAndValues, 0)
appOpts.store["telemetry.global-labels"] = []interface{}{
[]interface{}{"label1", "value1"},
[]interface{}{"label2", "value2"},
}
opts = types.TelemetryOptionsFromAppOpts(&appOpts)
require.True(t, opts.PrometheusEnabled)
require.Equal(t, "magic", opts.CometBFTMetricsNamespace)
require.Len(t, opts.GlobalLabelsAndValues, 4)
require.Equal(t, enabledOpts.GlobalLabelsAndValues, opts.GlobalLabelsAndValues)
}

View File

@ -2,7 +2,9 @@ package pricefeed
import (
"errors"
"time"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/kava-labs/kava/x/pricefeed/keeper"
"github.com/kava-labs/kava/x/pricefeed/types"
@ -10,6 +12,8 @@ import (
// EndBlocker updates the current pricefeed
func EndBlocker(ctx sdk.Context, k keeper.Keeper) {
defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyEndBlocker)
// Update the current price of each asset.
for _, market := range k.GetMarkets(ctx) {
if !market.Active {