diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..e21f08cba75 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,103 @@ +# Copyright the Hyperledger Fabric contributors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +name: Release +on: + workflow_dispatch: + inputs: + release: + description: 'Fabric Release, e.g. 2.4.7' + required: true + type: string + two_digit_release: + description: 'Fabric Two Digit Release, e.g. 2.4' + required: true + type: string + commit_hash: + description: 'Commit hash, e.g. df9c661a192f8cf11376d9d643a0021f1a76c34b' + required: true + type: string + +env: + GO_VER: 1.18.7 + +permissions: + contents: read + +jobs: + build-binaries: + name: Build Fabric Binaries + strategy: + matrix: + include: + - image: ubuntu-22.04 + target: linux + arch: amd64 + - image: macos-11 + target: darwin + arch: amd64 + - image: windows-2022 + target: windows + arch: amd64 + runs-on: ubuntu-22.04 + steps: + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VER }} + - name: Checkout Fabric Code + uses: actions/checkout@v3 + - name: Compile Binary and Create Tarball + run: ./ci/scripts/create_binary_package.sh + env: + TARGET: ${{ matrix.target }}-${{ matrix.arch }} + RELEASE: ${{ inputs.release }} + - name: Publish Release Artifact + uses: actions/upload-artifact@v3 + with: + name: hyperledger-fabric-${{ matrix.target }}-${{ matrix.arch }}-${{ inputs.release }}.tar.gz + path: release/${{ matrix.target }}-${{ matrix.arch }}/hyperledger-fabric-${{ matrix.target }}-${{ matrix.arch }}-${{ inputs.release }}.tar.gz + build-and-push-docker-images: + name: Build and Push Fabric Docker Images + runs-on: ubuntu-22.04 + steps: + - name: Run APT Clean + run: sudo apt clean + - name: Run Apt Update + run: sudo apt update + - name: Install Dependencies + run: sudo apt install -y gcc haveged libtool make + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VER }} + - name: Checkout Fabric Code + uses: actions/checkout@v3 + - name: Publish Docker Images + run: ./ci/scripts/publish_docker.sh + env: + RELEASE: ${{ inputs.release }} + TWO_DIGIT_RELEASE: ${{ inputs.two_digit_release }} + DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }} + DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + create-release: + name: Create GitHub Release + needs: [ build-binaries, build-and-push-docker-images ] + runs-on: ubuntu-22.04 + permissions: + contents: write + steps: + - name: Checkout Fabric Code + uses: actions/checkout@v3 + - name: Download Artifacts + id: download + uses: actions/download-artifact@v3 + - name: Release Fabric Version + uses: ncipollo/release-action@v1 + with: + artifacts: "*.tar.gz/*.tar.gz" + bodyFile: release_notes/v${{ inputs.release }}.md + commit: ${{ inputs.commit_hash }} + tag: v${{ inputs.release }} + token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/trigger.yml b/.github/workflows/trigger.yml deleted file mode 100644 index 2f654d53506..00000000000 --- a/.github/workflows/trigger.yml +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright the Hyperledger Fabric contributors. All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -on: - issue_comment: - types: [created] -name: Automatically Trigger Azure Pipeline -jobs: - trigger: - name: TriggerAZP - if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/ci-run') - runs-on: ubuntu-latest - steps: - - name: Trigger Build - run: | - org=$(jq -r ".repository.owner.login" "${GITHUB_EVENT_PATH}") - pr_number=$(jq -r ".issue.number" "${GITHUB_EVENT_PATH}") - project=$(jq -r ".repository.name" "${GITHUB_EVENT_PATH}") - repo=$(jq -r ".repository.full_name" "${GITHUB_EVENT_PATH}") - - comment_url="https://api.github.com/repos/${repo}/issues/${pr_number}/comments" - pr_url="https://api.github.com/repos/${repo}/pulls/${pr_number}" - - pr_resp=$(curl "${pr_url}") - sha=$(echo "${pr_resp}" | jq -r ".head.sha") - - az extension add --name azure-devops - echo ${AZP_TOKEN} | az devops login --organization "https://dev.azure.com/${org}" - runs=$(az pipelines build list --project ${project} | jq -c ".[] | select(.sourceVersion | contains(\"${sha}\"))" | jq -r .status | grep -v completed | wc -l) - if [[ $runs -eq 0 ]]; then - az pipelines build queue --branch refs/pull/${pr_number}/merge --commit-id ${sha} --project ${project} --definition-name Fabric-Pull-Request - curl -s -H "Authorization: token ${GITHUB_TOKEN}" -X POST -d '{"body": "AZP build triggered!"}' "${comment_url}" - else - curl -s -H "Authorization: token ${GITHUB_TOKEN}" -X POST -d '{"body": "AZP build already running!"}' "${comment_url}" - fi - env: - AZP_TOKEN: ${{ secrets.AZP_TOKEN }} - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/verify-build.yml b/.github/workflows/verify-build.yml new file mode 100644 index 00000000000..d4d57979433 --- /dev/null +++ b/.github/workflows/verify-build.yml @@ -0,0 +1,120 @@ +# Copyright the Hyperledger Fabric contributors. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 + +name: Verify Build +on: + push: + branches: ["**"] + pull_request: + branches: ["**"] + workflow_dispatch: + +env: + GOPATH: /opt/go + PATH: /opt/go/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin + GO_VER: 1.16.7 + +jobs: + basic-checks: + name: Basic Checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: make basic-checks + name: Run Basic Checks + unit-tests: + name: Unit Tests + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make unit-test + name: Run Unit Tests + integration-tests-1: + name: Integration Tests 1 + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make integration-test INTEGRATION_TEST_SUITE="configtx gossip raft" + name: Run Integration Tests 1 + integration-tests-2: + name: Integration Tests 2 + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make integration-test INTEGRATION_TEST_SUITE="idemix devmode nwo pvtdata" + name: Run Integration Tests 2 + integration-tests-3: + name: Integration Tests 3 + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make integration-test INTEGRATION_TEST_SUITE="configtxlator pkcs11 pluggable sbe msp discovery" + name: Run Integration Tests 3 + integration-tests-4: + name: Integration Tests 4 + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make integration-test INTEGRATION_TEST_SUITE="e2e ledger" + name: Run Integration Tests 4 + integration-tests-5: + name: Integration Tests 5 + needs: basic-checks + runs-on: ubuntu-20.04 + steps: + - uses: actions/setup-go@v3 + name: Install Go + with: + go-version: ${{ env.GO_VER }} + - uses: actions/checkout@v3 + name: Checkout Fabric Code + - run: ci/scripts/setup_hsm.sh + name: Install SoftHSM + - run: make integration-test INTEGRATION_TEST_SUITE="gateway lifecycle" + name: Run Integration Tests 5 diff --git a/Makefile b/Makefile index 63d4ffc8f6d..d63779b7b56 100644 --- a/Makefile +++ b/Makefile @@ -45,8 +45,8 @@ # - unit-test - runs the go-test based unit tests # - verify - runs unit tests for only the changed package tree -ALPINE_VER ?= 3.12 -BASE_VERSION = 2.3.0 +ALPINE_VER ?= 3.14 +BASE_VERSION = 2.3.3 # 3rd party image version # These versions are also set in the runners in ./integration/runners/ @@ -77,7 +77,7 @@ METADATA_VAR += CommitSHA=$(EXTRA_VERSION) METADATA_VAR += BaseDockerLabel=$(BASE_DOCKER_LABEL) METADATA_VAR += DockerNamespace=$(DOCKER_NS) -GO_VER = 1.14.12 +GO_VER = 1.16.7 GO_TAGS ?= RELEASE_EXES = orderer $(TOOLS_EXES) @@ -140,7 +140,7 @@ check-go-version: .PHONY: integration-test integration-test: integration-test-prereqs - ./scripts/run-integration-tests.sh + ./scripts/run-integration-tests.sh $(INTEGRATION_TEST_SUITE) .PHONY: integration-test-prereqs integration-test-prereqs: gotool.ginkgo baseos-docker ccenv-docker docker-thirdparty @@ -221,6 +221,7 @@ $(BUILD_DIR)/images/ccenv/$(DUMMY): BUILD_CONTEXT=images/ccenv $(BUILD_DIR)/images/baseos/$(DUMMY): BUILD_CONTEXT=images/baseos $(BUILD_DIR)/images/peer/$(DUMMY): BUILD_ARGS=--build-arg GO_TAGS=${GO_TAGS} $(BUILD_DIR)/images/orderer/$(DUMMY): BUILD_ARGS=--build-arg GO_TAGS=${GO_TAGS} +$(BUILD_DIR)/images/tools/$(DUMMY): BUILD_ARGS=--build-arg GO_TAGS=${GO_TAGS} $(BUILD_DIR)/images/%/$(DUMMY): @echo "Building Docker image $(DOCKER_NS)/fabric-$*" diff --git a/README.md b/README.md index 66c5edfcea5..7f9a22d32fe 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,7 @@ Follow the release discussion on the [#fabric-release](https://chat.hyperledger. Please visit our online documentation for information on getting started using and developing with the fabric, SDK and chaincode: +- [v2.3](http://hyperledger-fabric.readthedocs.io/en/release-2.3/) - [v2.2](http://hyperledger-fabric.readthedocs.io/en/release-2.2/) - [v2.1](http://hyperledger-fabric.readthedocs.io/en/release-2.1/) - [v2.0](http://hyperledger-fabric.readthedocs.io/en/release-2.0/) diff --git a/bccsp/factory/pkcs11factory.go b/bccsp/factory/pkcs11factory.go index bb6fc054596..6388a2d65cd 100644 --- a/bccsp/factory/pkcs11factory.go +++ b/bccsp/factory/pkcs11factory.go @@ -9,6 +9,8 @@ SPDX-License-Identifier: Apache-2.0 package factory import ( + "encoding/hex" + "github.com/hyperledger/fabric/bccsp" "github.com/hyperledger/fabric/bccsp/pkcs11" "github.com/hyperledger/fabric/bccsp/sw" @@ -35,8 +37,27 @@ func (f *PKCS11Factory) Get(config *FactoryOpts) (bccsp.BCCSP, error) { return nil, errors.New("Invalid config. It must not be nil.") } - p11Opts := config.PKCS11 + p11Opts := *config.PKCS11 ks := sw.NewDummyKeyStore() + mapper := skiMapper(p11Opts) + + return pkcs11.New(p11Opts, ks, pkcs11.WithKeyMapper(mapper)) +} - return pkcs11.New(*p11Opts, ks) +func skiMapper(p11Opts pkcs11.PKCS11Opts) func([]byte) []byte { + keyMap := map[string]string{} + for _, k := range p11Opts.KeyIDs { + keyMap[k.SKI] = k.ID + } + + return func(ski []byte) []byte { + keyID := hex.EncodeToString(ski) + if id, ok := keyMap[keyID]; ok { + return []byte(id) + } + if p11Opts.AltID != "" { + return []byte(p11Opts.AltID) + } + return ski + } } diff --git a/bccsp/factory/pkcs11factory_test.go b/bccsp/factory/pkcs11factory_test.go index 675d43badd9..679cdab4960 100644 --- a/bccsp/factory/pkcs11factory_test.go +++ b/bccsp/factory/pkcs11factory_test.go @@ -9,6 +9,8 @@ SPDX-License-Identifier: Apache-2.0 package factory import ( + "crypto/sha256" + "encoding/hex" "testing" "github.com/hyperledger/fabric/bccsp/pkcs11" @@ -38,46 +40,13 @@ func TestPKCS11FactoryGetInvalidArgs(t *testing.T) { func TestPKCS11FactoryGet(t *testing.T) { f := &PKCS11Factory{} - lib, pin, label := pkcs11.FindPKCS11Lib() opts := &FactoryOpts{ - PKCS11: &pkcs11.PKCS11Opts{ - Security: 256, - Hash: "SHA2", - Library: lib, - Pin: pin, - Label: label, - }, + PKCS11: defaultOptions(), } csp, err := f.Get(opts) require.NoError(t, err) require.NotNil(t, csp) - - opts = &FactoryOpts{ - PKCS11: &pkcs11.PKCS11Opts{ - Security: 256, - Hash: "SHA2", - Library: lib, - Pin: pin, - Label: label, - }, - } - csp, err = f.Get(opts) - require.NoError(t, err) - require.NotNil(t, csp) - - opts = &FactoryOpts{ - PKCS11: &pkcs11.PKCS11Opts{ - Security: 256, - Hash: "SHA2", - Library: lib, - Pin: pin, - Label: label, - }, - } - csp, err = f.Get(opts) - require.NoError(t, err) - require.NotNil(t, csp) } func TestPKCS11FactoryGetEmptyKeyStorePath(t *testing.T) { @@ -110,3 +79,42 @@ func TestPKCS11FactoryGetEmptyKeyStorePath(t *testing.T) { require.NoError(t, err) require.NotNil(t, csp) } + +func TestSKIMapper(t *testing.T) { + inputSKI := sha256.New().Sum([]byte("some-ski")) + tests := []struct { + name string + altID string + keyIDs map[string]string + expected []byte + }{ + {name: "DefaultBehavior", expected: inputSKI}, + {name: "AltIDOnly", altID: "alternate-ID", expected: []byte("alternate-ID")}, + {name: "MapEntry", keyIDs: map[string]string{hex.EncodeToString(inputSKI): "mapped-id"}, expected: []byte("mapped-id")}, + {name: "AltIDAsDefault", altID: "alternate-ID", keyIDs: map[string]string{"another-ski": "another-id"}, expected: []byte("alternate-ID")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + options := defaultOptions() + options.AltID = tt.altID + for k, v := range tt.keyIDs { + options.KeyIDs = append(options.KeyIDs, pkcs11.KeyIDMapping{SKI: k, ID: v}) + } + + mapper := skiMapper(*options) + result := mapper(inputSKI) + require.Equal(t, tt.expected, result, "got %x, want %x", result, tt.expected) + }) + } +} + +func defaultOptions() *pkcs11.PKCS11Opts { + lib, pin, label := pkcs11.FindPKCS11Lib() + return &pkcs11.PKCS11Opts{ + Security: 256, + Hash: "SHA2", + Library: lib, + Pin: pin, + Label: label, + } +} diff --git a/bccsp/pkcs11/conf.go b/bccsp/pkcs11/conf.go index d4bff5a25f2..a6604ebe7fb 100644 --- a/bccsp/pkcs11/conf.go +++ b/bccsp/pkcs11/conf.go @@ -21,13 +21,22 @@ type PKCS11Opts struct { Hash string `json:"hash"` // PKCS11 options - Library string `json:"library"` - Label string `json:"label"` - Pin string `json:"pin"` - SoftwareVerify bool `json:"softwareverify,omitempty"` - Immutable bool `json:"immutable,omitempty"` + Library string `json:"library"` + Label string `json:"label"` + Pin string `json:"pin"` + SoftwareVerify bool `json:"softwareverify,omitempty"` + Immutable bool `json:"immutable,omitempty"` + AltID string `json:"altid,omitempty"` + KeyIDs []KeyIDMapping `json:"keyids,omitempty" mapstructure:"keyids"` sessionCacheSize int createSessionRetries int createSessionRetryDelay time.Duration } + +// A KeyIDMapping associates the CKA_ID attribute of a cryptoki object with a +// subject key identifer. +type KeyIDMapping struct { + SKI string `json:"ski,omitempty"` + ID string `json:"id,omitempty"` +} diff --git a/bccsp/pkcs11/pkcs11.go b/bccsp/pkcs11/pkcs11.go index aea87688592..173c6bc12a7 100644 --- a/bccsp/pkcs11/pkcs11.go +++ b/bccsp/pkcs11/pkcs11.go @@ -41,6 +41,7 @@ type Provider struct { softVerify bool immutable bool + getKeyIDForSKI func(ski []byte) []byte createSessionRetries int createSessionRetryDelay time.Duration @@ -56,6 +57,19 @@ type Provider struct { // Ensure we satisfy the BCCSP interfaces. var _ bccsp.BCCSP = (*Provider)(nil) +// An Option is used to configure the Provider. +type Option func(p *Provider) error + +// WithKeyMapper returns an option that configures the Provider to use the +// provided function to map a subject key identifier to a cryptoki CKA_ID +// identifer. +func WithKeyMapper(mapper func([]byte) []byte) Option { + return func(p *Provider) error { + p.getKeyIDForSKI = mapper + return nil + } +} + // New returns a new instance of a BCCSP that uses PKCS#11 standard interfaces // to generate and use elliptic curve key pairs for signing and verification using // curves that satisfy the requested security level from opts. @@ -63,7 +77,7 @@ var _ bccsp.BCCSP = (*Provider)(nil) // All other cryptographic functions are delegated to a software based BCCSP // implementation that is configured to use the security level and hashing // familly from opts and the key store that is provided. -func New(opts PKCS11Opts, keyStore bccsp.KeyStore) (*Provider, error) { +func New(opts PKCS11Opts, keyStore bccsp.KeyStore, options ...Option) (*Provider, error) { curve, err := curveForSecurityLevel(opts.Security) if err != nil { return nil, errors.Wrapf(err, "Failed initializing configuration") @@ -92,6 +106,7 @@ func New(opts PKCS11Opts, keyStore bccsp.KeyStore) (*Provider, error) { csp := &Provider{ BCCSP: swCSP, curve: curve, + getKeyIDForSKI: func(ski []byte) []byte { return ski }, createSessionRetries: opts.createSessionRetries, createSessionRetryDelay: opts.createSessionRetryDelay, sessPool: sessPool, @@ -102,6 +117,12 @@ func New(opts PKCS11Opts, keyStore bccsp.KeyStore) (*Provider, error) { immutable: opts.Immutable, } + for _, o := range options { + if err := o(csp); err != nil { + return nil, err + } + } + return csp.initialize(opts) } @@ -556,7 +577,7 @@ func (csp *Provider) generateECKey(curve asn1.ObjectIdentifier, ephemeral bool) return nil, nil, fmt.Errorf("P11: Private Key copy failed with error [%s]. Please contact your HSM vendor", prvCopyerror) } prvKeyDestroyError := csp.ctx.DestroyObject(session, prv) - if pubKeyDestroyError != nil { + if prvKeyDestroyError != nil { return nil, nil, fmt.Errorf("P11: Private Key destroy failed with error [%s]. Please contact your HSM vendor", prvKeyDestroyError) } } @@ -697,7 +718,7 @@ func (csp *Provider) findKeyPairFromSKI(session pkcs11.SessionHandle, ski []byte template := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_CLASS, ktype), - pkcs11.NewAttribute(pkcs11.CKA_ID, ski), + pkcs11.NewAttribute(pkcs11.CKA_ID, csp.getKeyIDForSKI(ski)), } if err := csp.ctx.FindObjectsInit(session, template); err != nil { return 0, err diff --git a/bccsp/pkcs11/pkcs11_test.go b/bccsp/pkcs11/pkcs11_test.go index 4ff898a475d..63a190680c0 100644 --- a/bccsp/pkcs11/pkcs11_test.go +++ b/bccsp/pkcs11/pkcs11_test.go @@ -49,9 +49,9 @@ func newKeyStore(t *testing.T) (bccsp.KeyStore, func()) { return ks, func() { os.RemoveAll(tempDir) } } -func newProvider(t *testing.T, opts PKCS11Opts) (*Provider, func()) { +func newProvider(t *testing.T, opts PKCS11Opts, options ...Option) (*Provider, func()) { ks, ksCleanup := newKeyStore(t) - csp, err := New(opts, ks) + csp, err := New(opts, ks, options...) require.NoError(t, err) cleanup := func() { @@ -334,6 +334,74 @@ func TestECDSASign(t *testing.T) { }) } +type mapper struct { + input []byte + result []byte +} + +func (m *mapper) skiToID(ski []byte) []byte { + m.input = ski + return m.result +} + +func TestKeyMapper(t *testing.T) { + mapper := &mapper{} + csp, cleanup := newProvider(t, defaultOptions(), WithKeyMapper(mapper.skiToID)) + defer cleanup() + + k, err := csp.KeyGen(&bccsp.ECDSAKeyGenOpts{Temporary: false}) + require.NoError(t, err) + + digest, err := csp.Hash([]byte("Hello World"), &bccsp.SHAOpts{}) + require.NoError(t, err) + + sess, err := csp.getSession() + require.NoError(t, err, "failed to get session") + defer csp.returnSession(sess) + + newID := []byte("mapped-id") + updateKeyIdentifier(t, csp.ctx, sess, pkcs11.CKO_PUBLIC_KEY, k.SKI(), newID) + updateKeyIdentifier(t, csp.ctx, sess, pkcs11.CKO_PRIVATE_KEY, k.SKI(), newID) + + t.Run("ToMissingID", func(t *testing.T) { + csp.clearCaches() + mapper.result = k.SKI() + _, err := csp.Sign(k, digest, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "Private key not found") + require.Equal(t, k.SKI(), mapper.input, "expected mapper to receive ski %x, got %x", k.SKI(), mapper.input) + }) + t.Run("ToNewID", func(t *testing.T) { + csp.clearCaches() + mapper.result = newID + signature, err := csp.Sign(k, digest, nil) + require.NoError(t, err) + require.NotEmpty(t, signature, "signature must not be empty") + require.Equal(t, k.SKI(), mapper.input, "expected mapper to receive ski %x, got %x", k.SKI(), mapper.input) + }) +} + +func updateKeyIdentifier(t *testing.T, pctx *pkcs11.Ctx, sess pkcs11.SessionHandle, class uint, currentID, newID []byte) { + pkt := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + pkcs11.NewAttribute(pkcs11.CKA_ID, currentID), + } + err := pctx.FindObjectsInit(sess, pkt) + require.NoError(t, err) + + objs, _, err := pctx.FindObjects(sess, 1) + require.NoError(t, err) + require.Len(t, objs, 1) + + err = pctx.FindObjectsFinal(sess) + require.NoError(t, err) + + err = pctx.SetAttributeValue(sess, objs[0], []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_ID, newID), + }) + require.NoError(t, err) +} + func TestECDSAVerify(t *testing.T) { csp, cleanup := newProvider(t, defaultOptions()) defer cleanup() diff --git a/ci/azure-pipelines-merge.yml b/ci/azure-pipelines-merge.yml deleted file mode 100644 index ab932c65226..00000000000 --- a/ci/azure-pipelines-merge.yml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright the Hyperledger Fabric contributors. All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -name: $(SourceBranchName)-$(Date:yyyyMMdd)$(Rev:.rrr) -trigger: -- master -- release-2.* -pr: none - -variables: - GOPATH: $(Agent.BuildDirectory)/go - PATH: $(Agent.BuildDirectory)/go/bin:/usr/local/go/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin - GOVER: 1.14.12 - -jobs: - - job: UnitTests - pool: - vmImage: ubuntu-18.04 - steps: - - template: install_deps.yml - - checkout: self - path: 'go/src/github.com/hyperledger/fabric' - displayName: Checkout Fabric Code - - script: ./ci/scripts/setup_hsm.sh - displayName: Install SoftHSM - - script: make unit-test - displayName: Run Unit Tests diff --git a/ci/azure-pipelines-release.yml b/ci/azure-pipelines-release.yml deleted file mode 100644 index df17b0b2db6..00000000000 --- a/ci/azure-pipelines-release.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright the Hyperledger Fabric contributors. All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -name: RELEASE-$(Date:yyyyMMdd)$(Rev:.rrr) -trigger: none -pr: none - -variables: - - group: credentials - - name: GOPATH - value: $(Agent.BuildDirectory)/go - - name: GOVER - value: 1.14.12 - -stages: - - stage: BuildBinaries - dependsOn: [] - displayName: "Build Fabric Binaries" - jobs: - - job: Build - pool: - vmImage: ubuntu-18.04 - container: golang:$(GOVER) - strategy: - matrix: - Linux-amd64: - TARGET: linux-amd64 - MacOS-amd64: - TARGET: darwin-amd64 - Windows-amd64: - TARGET: windows-amd64 - steps: - - checkout: self - path: 'go/src/github.com/hyperledger/fabric' - displayName: Checkout Fabric Code - - script: ./ci/scripts/create_binary_package.sh - displayName: Compile Binary and Create Tarball - - publish: release/$(TARGET)/hyperledger-fabric-$(TARGET)-$(RELEASE).tar.gz - artifact: hyperledger-fabric-$(TARGET)-$(RELEASE).tar.gz - displayName: Publish Release Artifact - - - stage: BuildAndPushDockerImages - dependsOn: [] - displayName: "Build and Push Fabric Docker Images" - jobs: - - job: Docker - pool: - vmImage: ubuntu-18.04 - steps: - - template: install_deps.yml - - checkout: self - path: 'go/src/github.com/hyperledger/fabric' - displayName: Checkout Fabric Code - - script: ./ci/scripts/publish_docker.sh - env: - DOCKER_PASSWORD: $(DockerHub-Password) - DOCKER_USERNAME: $(DockerHub-Username) - displayName: Publish Docker Images - - - stage: DraftRelease - displayName: "Draft GitHub Release" - dependsOn: - - BuildBinaries - - BuildAndPushDockerImages - jobs: - - job: Release - pool: - vmImage: ubuntu-18.04 - steps: - - download: current - patterns: '*.tar.gz' - displayName: Download Artifacts - - checkout: self - - task: GitHubRelease@0 - inputs: - action: create - addChangeLog: true - assets: $(Pipeline.Workspace)/*amd64*/* - compareWith: lastFullRelease - gitHubConnection: fabric-release - isDraft: true - releaseNotesFile: release_notes/v$(RELEASE).md - repositoryName: $(Build.Repository.Name) - releaseNotesSource: file - tag: v$(RELEASE) - tagSource: manual - title: v$(RELEASE) - displayName: Draft Release of Fabric diff --git a/ci/azure-pipelines.yml b/ci/azure-pipelines.yml deleted file mode 100644 index cef05ca2026..00000000000 --- a/ci/azure-pipelines.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright the Hyperledger Fabric contributors. All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -name: $(SourceBranchName)-$(Date:yyyyMMdd)$(Rev:.rrr) -trigger: none -pr: -- master -- release-2.* - -variables: - GOPATH: $(Agent.BuildDirectory)/go - PATH: $(Agent.BuildDirectory)/go/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin - GOVER: 1.14.12 - -stages: - - stage: VerifyBuild - dependsOn: [] - jobs: - - job: Checks - pool: - vmImage: ubuntu-18.04 - steps: - - template: install_deps.yml - - checkout: self - path: 'fabric' - displayName: Checkout Fabric Code - - script: make basic-checks native - displayName: Run Basic Checks - - script: ./ci/scripts/evaluate_commits.sh - name: SetJobTriggers - - - stage: UnitTests - dependsOn: VerifyBuild - jobs: - - job: UnitTests - condition: eq(stageDependencies.VerifyBuild.Checks.outputs['SetJobTriggers.runTests'], 'true') - pool: - vmImage: ubuntu-18.04 - steps: - - template: install_deps.yml - - checkout: self - path: 'fabric' - displayName: Checkout Fabric Code - - script: ./ci/scripts/setup_hsm.sh - displayName: Install SoftHSM - - script: make unit-test - displayName: Run Unit Tests - - - stage: IntegrationTests - dependsOn: VerifyBuild - jobs: - - job: IntegrationTests - condition: eq(stageDependencies.VerifyBuild.Checks.outputs['SetJobTriggers.runTests'], 'true') - pool: - vmImage: ubuntu-18.04 - strategy: - parallel: 5 - timeoutInMinutes: 90 - steps: - - template: install_deps.yml - - checkout: self - path: 'fabric' - displayName: Checkout Fabric Code - - script: ./ci/scripts/setup_hsm.sh - displayName: Install SoftHSM - - script: make integration-test - displayName: Run Integration Tests diff --git a/ci/install_deps.yml b/ci/install_deps.yml deleted file mode 100644 index aa00b94c3ca..00000000000 --- a/ci/install_deps.yml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright the Hyperledger Fabric contributors. All rights reserved. -# -# SPDX-License-Identifier: Apache-2.0 - -steps: - - script: | - sudo apt-get clean - sudo apt-get update - sudo apt-get install -y gcc make - echo "vsts hard nofile 65535" | sudo tee -a /etc/security/limits.conf - echo "vsts soft nofile 65535" | sudo tee -a /etc/security/limits.conf - displayName: Install Dependencies - - task: GoTool@0 - inputs: - version: $(GOVER) - goPath: $(GOPATH) - displayName: Install GoLang diff --git a/cmd/osnadmin/.gitignore b/cmd/osnadmin/.gitignore new file mode 100644 index 00000000000..e5bf9168178 --- /dev/null +++ b/cmd/osnadmin/.gitignore @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: Apache-2.0 + +osnadmin diff --git a/cmd/osnadmin/main.go b/cmd/osnadmin/main.go index 9d331946370..4f1a5688f14 100644 --- a/cmd/osnadmin/main.go +++ b/cmd/osnadmin/main.go @@ -49,16 +49,19 @@ func executeForArgs(args []string) (output string, exit int, err error) { channel := app.Command("channel", "Channel actions") join := channel.Command("join", "Join an Ordering Service Node (OSN) to a channel. If the channel does not yet exist, it will be created.") - joinChannelID := join.Flag("channel-id", "Channel ID").Short('c').Required().String() + joinChannelID := join.Flag("channelID", "Channel ID").Short('c').Required().String() configBlockPath := join.Flag("config-block", "Path to the file containing an up-to-date config block for the channel").Short('b').Required().String() - list := channel.Command("list", "List channel information for an Ordering Service Node (OSN). If the channel-id flag is set, more detailed information will be provided for that channel.") - listChannelID := list.Flag("channel-id", "Channel ID").Short('c').String() + list := channel.Command("list", "List channel information for an Ordering Service Node (OSN). If the channelID flag is set, more detailed information will be provided for that channel.") + listChannelID := list.Flag("channelID", "Channel ID").Short('c').String() remove := channel.Command("remove", "Remove an Ordering Service Node (OSN) from a channel.") - removeChannelID := remove.Flag("channel-id", "Channel ID").Short('c').Required().String() + removeChannelID := remove.Flag("channelID", "Channel ID").Short('c').Required().String() - command := kingpin.MustParse(app.Parse(args)) + command, err := app.Parse(args) + if err != nil { + return "", 1, err + } // // flag validation @@ -173,7 +176,7 @@ func validateBlockChannelID(blockBytes []byte, channelID string) error { // quick sanity check that the orderer admin is joining // the channel they think they're joining. if channelID != blockChannelID { - return fmt.Errorf("specified --channel-id %s does not match channel ID %s in config block", channelID, blockChannelID) + return fmt.Errorf("specified --channelID %s does not match channel ID %s in config block", channelID, blockChannelID) } return nil diff --git a/cmd/osnadmin/main_test.go b/cmd/osnadmin/main_test.go index 40840c858fd..0adf79576ac 100644 --- a/cmd/osnadmin/main_test.go +++ b/cmd/osnadmin/main_test.go @@ -162,7 +162,7 @@ var _ = Describe("osnadmin", func() { "channel", "list", "--orderer-address", ordererURL, - "--channel-id", "tell-me-your-secrets", + "--channelID", "tell-me-your-secrets", "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, @@ -188,7 +188,7 @@ var _ = Describe("osnadmin", func() { "channel", "list", "--orderer-address", ordererURL, - "--channel-id", "tell-me-your-secrets", + "--channelID", "tell-me-your-secrets", "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, @@ -240,7 +240,7 @@ var _ = Describe("osnadmin", func() { "channel", "list", "--orderer-address", ordererURL, - "--channel-id", "tell-me-your-secrets", + "--channelID", "tell-me-your-secrets", } output, exit, err := executeForArgs(args) Expect(err).NotTo(HaveOccurred()) @@ -264,7 +264,7 @@ var _ = Describe("osnadmin", func() { "channel", "remove", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, @@ -286,7 +286,7 @@ var _ = Describe("osnadmin", func() { "remove", "--ca-file", ordererCACert, "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--client-cert", clientCert, "--client-key", clientKey, } @@ -308,7 +308,7 @@ var _ = Describe("osnadmin", func() { "channel", "remove", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, } output, exit, err := executeForArgs(args) Expect(err).NotTo(HaveOccurred()) @@ -343,7 +343,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, "--ca-file", ordererCACert, "--client-cert", clientCert, @@ -370,7 +370,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, "--ca-file", ordererCACert, "--client-cert", clientCert, @@ -382,7 +382,7 @@ var _ = Describe("osnadmin", func() { }) }) - Context("when the --channel-id does not match the channel ID in the block", func() { + Context("when the --channelID does not match the channel ID in the block", func() { BeforeEach(func() { channelID = "not-the-channel-youre-looking-for" }) @@ -392,7 +392,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, "--ca-file", ordererCACert, "--client-cert", clientCert, @@ -400,7 +400,7 @@ var _ = Describe("osnadmin", func() { } output, exit, err := executeForArgs(args) - checkFlagError(output, exit, err, "specified --channel-id not-the-channel-youre-looking-for does not match channel ID testing123 in config block") + checkFlagError(output, exit, err, "specified --channelID not-the-channel-youre-looking-for does not match channel ID testing123 in config block") }) }) @@ -430,7 +430,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, "--ca-file", ordererCACert, "--client-cert", clientCert, @@ -457,7 +457,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, "--ca-file", ordererCACert, "--client-cert", clientCert, @@ -481,7 +481,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--config-block", blockPath, } output, exit, err := executeForArgs(args) @@ -498,7 +498,7 @@ var _ = Describe("osnadmin", func() { }) Describe("Flags", func() { - It("accepts short versions of the --orderer-address, --channel-id, and --config-block flags", func() { + It("accepts short versions of the --orderer-address, --channelID, and --config-block flags", func() { configBlock := blockWithGroups( map[string]*cb.ConfigGroup{ "Application": {}, @@ -534,6 +534,18 @@ var _ = Describe("osnadmin", func() { checkOutput(output, exit, err, 201, expectedOutput) }) + Context("when an unknown flag is used", func() { + It("returns an error for long flags", func() { + _, _, err := executeForArgs([]string{"channel", "list", "--bad-flag"}) + Expect(err).To(MatchError("unknown long flag '--bad-flag'")) + }) + + It("returns an error for short flags", func() { + _, _, err := executeForArgs([]string{"channel", "list", "-z"}) + Expect(err).To(MatchError("unknown short flag '-z'")) + }) + }) + Context("when the ca cert cannot be read", func() { BeforeEach(func() { ordererCACert = "not-the-ca-cert-youre-looking-for" @@ -544,7 +556,7 @@ var _ = Describe("osnadmin", func() { "channel", "list", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, @@ -564,7 +576,7 @@ var _ = Describe("osnadmin", func() { "channel", "remove", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, @@ -605,7 +617,7 @@ var _ = Describe("osnadmin", func() { "channel", "join", "--orderer-address", ordererURL, - "--channel-id", channelID, + "--channelID", channelID, "--ca-file", ordererCACert, "--client-cert", clientCert, "--client-key", clientKey, diff --git a/common/crypto/tlsgen/ca.go b/common/crypto/tlsgen/ca.go index 8e9d29fdc22..7ee89d34dc6 100644 --- a/common/crypto/tlsgen/ca.go +++ b/common/crypto/tlsgen/ca.go @@ -40,7 +40,10 @@ type CA interface { // with a given custom SAN. // The certificate is signed by the CA. // Returns nil, error in case of failure - NewServerCertKeyPair(host string) (*CertKeyPair, error) + NewServerCertKeyPair(hosts ...string) (*CertKeyPair, error) + + // Signer returns a crypto.Signer that signs with the CA's private key. + Signer() crypto.Signer } type ca struct { @@ -50,7 +53,7 @@ type ca struct { func NewCA() (CA, error) { c := &ca{} var err error - c.caCert, err = newCertKeyPair(true, false, "", nil, nil) + c.caCert, err = newCertKeyPair(true, false, nil, nil) if err != nil { return nil, err } @@ -60,7 +63,7 @@ func NewCA() (CA, error) { func (c *ca) NewIntermediateCA() (CA, error) { intermediateCA := &ca{} var err error - intermediateCA.caCert, err = newCertKeyPair(true, false, "", c.caCert.Signer, c.caCert.TLSCert) + intermediateCA.caCert, err = newCertKeyPair(true, false, c.caCert.Signer, c.caCert.TLSCert) if err != nil { return nil, err } @@ -76,16 +79,21 @@ func (c *ca) CertBytes() []byte { // or nil, error in case of failure // The certificate is signed by the CA and is used as a client TLS certificate func (c *ca) NewClientCertKeyPair() (*CertKeyPair, error) { - return newCertKeyPair(false, false, "", c.caCert.Signer, c.caCert.TLSCert) + return newCertKeyPair(false, false, c.caCert.Signer, c.caCert.TLSCert) } // newServerCertKeyPair returns a certificate and private key pair and nil, // or nil, error in case of failure // The certificate is signed by the CA and is used as a server TLS certificate -func (c *ca) NewServerCertKeyPair(host string) (*CertKeyPair, error) { - keypair, err := newCertKeyPair(false, true, host, c.caCert.Signer, c.caCert.TLSCert) +func (c *ca) NewServerCertKeyPair(hosts ...string) (*CertKeyPair, error) { + keypair, err := newCertKeyPair(false, true, c.caCert.Signer, c.caCert.TLSCert, hosts...) if err != nil { return nil, err } return keypair, nil } + +// Signer returns a crypto.Signer that signs with the CA's private key. +func (c *ca) Signer() crypto.Signer { + return c.caCert.Signer +} diff --git a/common/crypto/tlsgen/ca_test.go b/common/crypto/tlsgen/ca_test.go index 400bd069fe8..1cdeef93fd1 100644 --- a/common/crypto/tlsgen/ca_test.go +++ b/common/crypto/tlsgen/ca_test.go @@ -82,3 +82,9 @@ func TestTLSCA(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "context deadline exceeded") } + +func TestTLSCASigner(t *testing.T) { + tlsCA, err := NewCA() + require.NoError(t, err) + require.Equal(t, tlsCA.(*ca).caCert.Signer, tlsCA.Signer()) +} diff --git a/common/crypto/tlsgen/key.go b/common/crypto/tlsgen/key.go index a57b0bcd999..ae1ea107bc9 100644 --- a/common/crypto/tlsgen/key.go +++ b/common/crypto/tlsgen/key.go @@ -11,6 +11,7 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "crypto/sha256" "crypto/x509" "crypto/x509/pkix" "encoding/pem" @@ -47,7 +48,7 @@ func newCertTemplate() (x509.Certificate, error) { }, nil } -func newCertKeyPair(isCA bool, isServer bool, host string, certSigner crypto.Signer, parent *x509.Certificate) (*CertKeyPair, error) { +func newCertKeyPair(isCA bool, isServer bool, certSigner crypto.Signer, parent *x509.Certificate, hosts ...string) (*CertKeyPair, error) { privateKey, privBytes, err := newPrivKey() if err != nil { return nil, err @@ -74,12 +75,15 @@ func newCertKeyPair(isCA bool, isServer bool, host string, certSigner crypto.Sig if isServer { template.NotAfter = tenYearsFromNow template.ExtKeyUsage = append(template.ExtKeyUsage, x509.ExtKeyUsageServerAuth) - if ip := net.ParseIP(host); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, host) + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } } } + template.SubjectKeyId = computeSKI(&privateKey.PublicKey) // If no parent cert, it's a self signed cert if parent == nil || certSigner == nil { parent = &template @@ -111,3 +115,10 @@ func newCertKeyPair(isCA bool, isServer bool, host string, certSigner crypto.Sig func encodePEM(keyType string, data []byte) []byte { return pem.EncodeToMemory(&pem.Block{Type: keyType, Bytes: data}) } + +// RFC 7093, Section 2, Method 4 +func computeSKI(key *ecdsa.PublicKey) []byte { + raw := elliptic.Marshal(key.Curve, key.X, key.Y) + hash := sha256.Sum256(raw) + return hash[:] +} diff --git a/common/crypto/tlsgen/key_test.go b/common/crypto/tlsgen/key_test.go index ccbe5e7a928..d3dc44711b4 100644 --- a/common/crypto/tlsgen/key_test.go +++ b/common/crypto/tlsgen/key_test.go @@ -16,7 +16,7 @@ import ( ) func TestLoadCert(t *testing.T) { - pair, err := newCertKeyPair(false, false, "", nil, nil) + pair, err := newCertKeyPair(false, false, nil, nil) require.NoError(t, err) require.NotNil(t, pair) tlsCertPair, err := tls.X509KeyPair(pair.Cert, pair.Key) diff --git a/common/deliver/acl.go b/common/deliver/acl.go index 6f704a60b91..5a2469efb87 100644 --- a/common/deliver/acl.go +++ b/common/deliver/acl.go @@ -57,7 +57,7 @@ type SessionAccessControl struct { // changes. func (ac *SessionAccessControl) Evaluate() error { if !ac.sessionEndTime.IsZero() && time.Now().After(ac.sessionEndTime) { - return errors.Errorf("client identity expired %v before", time.Since(ac.sessionEndTime)) + return errors.Errorf("deliver client identity expired %v before", time.Since(ac.sessionEndTime)) } policyCheckNeeded := !ac.usedAtLeastOnce diff --git a/common/deliver/acl_test.go b/common/deliver/acl_test.go index d7204d4b578..33f529155a1 100644 --- a/common/deliver/acl_test.go +++ b/common/deliver/acl_test.go @@ -116,7 +116,7 @@ var _ = Describe("SessionAccessControl", func() { err = sac.Evaluate() Expect(err).NotTo(HaveOccurred()) - Eventually(sac.Evaluate).Should(MatchError(ContainSubstring("client identity expired"))) + Eventually(sac.Evaluate).Should(MatchError(ContainSubstring("deliver client identity expired"))) }) }) diff --git a/common/deliver/mock/block_reader.go b/common/deliver/mock/block_reader.go index 63b7e6a6e2b..5f71afe7061 100644 --- a/common/deliver/mock/block_reader.go +++ b/common/deliver/mock/block_reader.go @@ -4,6 +4,7 @@ package mock import ( "sync" + "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/orderer" "github.com/hyperledger/fabric/common/ledger/blockledger" ) @@ -32,6 +33,19 @@ type BlockReader struct { result1 blockledger.Iterator result2 uint64 } + RetrieveBlockByNumberStub func(uint64) (*common.Block, error) + retrieveBlockByNumberMutex sync.RWMutex + retrieveBlockByNumberArgsForCall []struct { + arg1 uint64 + } + retrieveBlockByNumberReturns struct { + result1 *common.Block + result2 error + } + retrieveBlockByNumberReturnsOnCall map[int]struct { + result1 *common.Block + result2 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -41,15 +55,16 @@ func (fake *BlockReader) Height() uint64 { ret, specificReturn := fake.heightReturnsOnCall[len(fake.heightArgsForCall)] fake.heightArgsForCall = append(fake.heightArgsForCall, struct { }{}) + stub := fake.HeightStub + fakeReturns := fake.heightReturns fake.recordInvocation("Height", []interface{}{}) fake.heightMutex.Unlock() - if fake.HeightStub != nil { - return fake.HeightStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1 } - fakeReturns := fake.heightReturns return fakeReturns.result1 } @@ -94,15 +109,16 @@ func (fake *BlockReader) Iterator(arg1 *orderer.SeekPosition) (blockledger.Itera fake.iteratorArgsForCall = append(fake.iteratorArgsForCall, struct { arg1 *orderer.SeekPosition }{arg1}) + stub := fake.IteratorStub + fakeReturns := fake.iteratorReturns fake.recordInvocation("Iterator", []interface{}{arg1}) fake.iteratorMutex.Unlock() - if fake.IteratorStub != nil { - return fake.IteratorStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.iteratorReturns return fakeReturns.result1, fakeReturns.result2 } @@ -151,6 +167,70 @@ func (fake *BlockReader) IteratorReturnsOnCall(i int, result1 blockledger.Iterat }{result1, result2} } +func (fake *BlockReader) RetrieveBlockByNumber(arg1 uint64) (*common.Block, error) { + fake.retrieveBlockByNumberMutex.Lock() + ret, specificReturn := fake.retrieveBlockByNumberReturnsOnCall[len(fake.retrieveBlockByNumberArgsForCall)] + fake.retrieveBlockByNumberArgsForCall = append(fake.retrieveBlockByNumberArgsForCall, struct { + arg1 uint64 + }{arg1}) + stub := fake.RetrieveBlockByNumberStub + fakeReturns := fake.retrieveBlockByNumberReturns + fake.recordInvocation("RetrieveBlockByNumber", []interface{}{arg1}) + fake.retrieveBlockByNumberMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *BlockReader) RetrieveBlockByNumberCallCount() int { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + return len(fake.retrieveBlockByNumberArgsForCall) +} + +func (fake *BlockReader) RetrieveBlockByNumberCalls(stub func(uint64) (*common.Block, error)) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = stub +} + +func (fake *BlockReader) RetrieveBlockByNumberArgsForCall(i int) uint64 { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + argsForCall := fake.retrieveBlockByNumberArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *BlockReader) RetrieveBlockByNumberReturns(result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + fake.retrieveBlockByNumberReturns = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + +func (fake *BlockReader) RetrieveBlockByNumberReturnsOnCall(i int, result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + if fake.retrieveBlockByNumberReturnsOnCall == nil { + fake.retrieveBlockByNumberReturnsOnCall = make(map[int]struct { + result1 *common.Block + result2 error + }) + } + fake.retrieveBlockByNumberReturnsOnCall[i] = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + func (fake *BlockReader) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -158,6 +238,8 @@ func (fake *BlockReader) Invocations() map[string][][]interface{} { defer fake.heightMutex.RUnlock() fake.iteratorMutex.RLock() defer fake.iteratorMutex.RUnlock() + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/common/fabhttp/fabhttp_suite_test.go b/common/fabhttp/fabhttp_suite_test.go index c2775e2fafe..e530de4441f 100644 --- a/common/fabhttp/fabhttp_suite_test.go +++ b/common/fabhttp/fabhttp_suite_test.go @@ -48,7 +48,7 @@ func generateCertificates(tempDir string) { Expect(err).NotTo(HaveOccurred()) } -func newHTTPClient(tlsDir string, withClientCert bool) *http.Client { +func newHTTPClient(tlsDir string, withClientCert bool, tlsOpts ...func(config *tls.Config)) *http.Client { clientCertPool := x509.NewCertPool() caCert, err := ioutil.ReadFile(filepath.Join(tlsDir, "server-ca.pem")) Expect(err).NotTo(HaveOccurred()) @@ -66,6 +66,10 @@ func newHTTPClient(tlsDir string, withClientCert bool) *http.Client { tlsClientConfig.Certificates = []tls.Certificate{clientCert} } + for _, opt := range tlsOpts { + opt(tlsClientConfig) + } + return &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsClientConfig, diff --git a/common/fabhttp/server_test.go b/common/fabhttp/server_test.go index 5d2248e578d..b8dcac1cc8d 100644 --- a/common/fabhttp/server_test.go +++ b/common/fabhttp/server_test.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package fabhttp_test import ( + "crypto/tls" "fmt" "io/ioutil" "net" @@ -67,6 +68,27 @@ var _ = Describe("Server", func() { } }) + When("trying to connect with an old TLS version", func() { + BeforeEach(func() { + tlsOpts := []func(config *tls.Config){func(config *tls.Config) { + config.MaxVersion = tls.VersionTLS11 + config.ClientAuth = tls.RequireAndVerifyClientCert + }} + + client = newHTTPClient(tempDir, true, tlsOpts...) + }) + + It("does not answer clients using an older TLS version than 1.2", func() { + server.RegisterHandler(AdditionalTestApiPath, &fakes.Handler{Code: http.StatusOK, Text: "secure"}, options.TLS.Enabled) + err := server.Start() + Expect(err).NotTo(HaveOccurred()) + + addApiURL := fmt.Sprintf("https://%s%s", server.Addr(), AdditionalTestApiPath) + _, err = client.Get(addApiURL) + Expect(err.Error()).To(ContainSubstring("tls: protocol version not supported")) + }) + }) + It("does not host a secure endpoint for additional APIs by default", func() { err := server.Start() Expect(err).NotTo(HaveOccurred()) diff --git a/common/fabhttp/tls.go b/common/fabhttp/tls.go index dd426bcdcd7..9e17c776d9b 100644 --- a/common/fabhttp/tls.go +++ b/common/fabhttp/tls.go @@ -39,6 +39,7 @@ func (t TLS) Config() (*tls.Config, error) { caCertPool.AppendCertsFromPEM(caPem) } tlsConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, Certificates: []tls.Certificate{cert}, CipherSuites: comm.DefaultTLSCipherSuites, ClientCAs: caCertPool, diff --git a/common/fabhttp/tls_test.go b/common/fabhttp/tls_test.go index f5103339e77..f6efe689ebf 100644 --- a/common/fabhttp/tls_test.go +++ b/common/fabhttp/tls_test.go @@ -59,7 +59,13 @@ var _ = Describe("TLS", func() { tlsConfig, err := httpTLS.Config() Expect(err).NotTo(HaveOccurred()) + + // https://go-review.googlesource.com/c/go/+/229917 + Expect(tlsConfig.ClientCAs.Subjects()).To(Equal(clientCAPool.Subjects())) + tlsConfig.ClientCAs = nil + Expect(tlsConfig).To(Equal(&tls.Config{ + MinVersion: tls.VersionTLS12, Certificates: []tls.Certificate{cert}, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, @@ -69,7 +75,6 @@ var _ = Describe("TLS", func() { tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, }, - ClientCAs: clientCAPool, ClientAuth: tls.RequireAndVerifyClientCert, })) }) diff --git a/common/graph/choose.go b/common/graph/choose.go index 08a6ce0dc49..d7df824868f 100644 --- a/common/graph/choose.go +++ b/common/graph/choose.go @@ -59,7 +59,14 @@ func choose(n int, targetAmount int, i int, currentSubGroup []int, subGroups *or return } // We either pick the current element - choose(n, targetAmount, i+1, append(currentSubGroup, i), subGroups) + choose(n, targetAmount, i+1, concatInts(currentSubGroup, i), subGroups) // Or don't pick it choose(n, targetAmount, i+1, currentSubGroup, subGroups) } + +func concatInts(a []int, elements ...int) []int { + var res []int + res = append(res, a...) + res = append(res, elements...) + return res +} diff --git a/common/graph/choose_test.go b/common/graph/choose_test.go index 33326f455bf..108697268df 100644 --- a/common/graph/choose_test.go +++ b/common/graph/choose_test.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package graph import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -24,3 +25,32 @@ func TestCombinationsExceed(t *testing.T) { // N < K returns false require.False(t, CombinationsExceed(20, 30, 0)) } + +func TestChooseKoutOfN(t *testing.T) { + expectedSets := indiceSets{ + &indiceSet{[]int{0, 1, 2, 3}}, + &indiceSet{[]int{0, 1, 2, 4}}, + &indiceSet{[]int{0, 1, 2, 5}}, + &indiceSet{[]int{0, 1, 3, 4}}, + &indiceSet{[]int{0, 1, 3, 5}}, + &indiceSet{[]int{0, 1, 4, 5}}, + &indiceSet{[]int{0, 2, 3, 4}}, + &indiceSet{[]int{0, 2, 3, 5}}, + &indiceSet{[]int{0, 2, 4, 5}}, + &indiceSet{[]int{0, 3, 4, 5}}, + &indiceSet{[]int{1, 2, 3, 4}}, + &indiceSet{[]int{1, 2, 3, 5}}, + &indiceSet{[]int{1, 2, 4, 5}}, + &indiceSet{[]int{1, 3, 4, 5}}, + &indiceSet{[]int{2, 3, 4, 5}}, + } + require.Equal(t, indiceSetsToStrings(expectedSets), indiceSetsToStrings(chooseKoutOfN(6, 4))) +} + +func indiceSetsToStrings(sets indiceSets) []string { + var res []string + for _, set := range sets { + res = append(res, fmt.Sprintf("%v", set.indices)) + } + return res +} diff --git a/common/ledger/blockledger/fileledger/impl.go b/common/ledger/blockledger/fileledger/impl.go index f6044de865b..910104461bf 100644 --- a/common/ledger/blockledger/fileledger/impl.go +++ b/common/ledger/blockledger/fileledger/impl.go @@ -29,6 +29,7 @@ type FileLedgerBlockStore interface { GetBlockchainInfo() (*cb.BlockchainInfo, error) RetrieveBlocks(startBlockNumber uint64) (ledger.ResultsIterator, error) Shutdown() + RetrieveBlockByNumber(blockNum uint64) (*cb.Block, error) } // NewFileLedger creates a new FileLedger for interaction with the ledger @@ -93,6 +94,7 @@ func (fl *FileLedger) Iterator(startPosition *ab.SeekPosition) (blockledger.Iter iterator, err := fl.blockStore.RetrieveBlocks(startingBlockNumber) if err != nil { + logger.Warnw("Failed to initialize block iterator", "blockNum", startingBlockNumber, "error", err) return &blockledger.NotFoundErrorIterator{}, 0 } @@ -117,3 +119,7 @@ func (fl *FileLedger) Append(block *cb.Block) error { } return err } + +func (fl *FileLedger) RetrieveBlockByNumber(blockNumber uint64) (*cb.Block, error) { + return fl.blockStore.RetrieveBlockByNumber(blockNumber) +} diff --git a/common/ledger/blockledger/fileledger/mock/file_ledger_block_store.go b/common/ledger/blockledger/fileledger/mock/file_ledger_block_store.go index 1018249dc6b..bdd2dcc70fd 100644 --- a/common/ledger/blockledger/fileledger/mock/file_ledger_block_store.go +++ b/common/ledger/blockledger/fileledger/mock/file_ledger_block_store.go @@ -32,6 +32,19 @@ type FileLedgerBlockStore struct { result1 *common.BlockchainInfo result2 error } + RetrieveBlockByNumberStub func(uint64) (*common.Block, error) + retrieveBlockByNumberMutex sync.RWMutex + retrieveBlockByNumberArgsForCall []struct { + arg1 uint64 + } + retrieveBlockByNumberReturns struct { + result1 *common.Block + result2 error + } + retrieveBlockByNumberReturnsOnCall map[int]struct { + result1 *common.Block + result2 error + } RetrieveBlocksStub func(uint64) (ledger.ResultsIterator, error) retrieveBlocksMutex sync.RWMutex retrieveBlocksArgsForCall []struct { @@ -59,15 +72,16 @@ func (fake *FileLedgerBlockStore) AddBlock(arg1 *common.Block) error { fake.addBlockArgsForCall = append(fake.addBlockArgsForCall, struct { arg1 *common.Block }{arg1}) + stub := fake.AddBlockStub + fakeReturns := fake.addBlockReturns fake.recordInvocation("AddBlock", []interface{}{arg1}) fake.addBlockMutex.Unlock() - if fake.AddBlockStub != nil { - return fake.AddBlockStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1 } - fakeReturns := fake.addBlockReturns return fakeReturns.result1 } @@ -118,15 +132,16 @@ func (fake *FileLedgerBlockStore) GetBlockchainInfo() (*common.BlockchainInfo, e ret, specificReturn := fake.getBlockchainInfoReturnsOnCall[len(fake.getBlockchainInfoArgsForCall)] fake.getBlockchainInfoArgsForCall = append(fake.getBlockchainInfoArgsForCall, struct { }{}) + stub := fake.GetBlockchainInfoStub + fakeReturns := fake.getBlockchainInfoReturns fake.recordInvocation("GetBlockchainInfo", []interface{}{}) fake.getBlockchainInfoMutex.Unlock() - if fake.GetBlockchainInfoStub != nil { - return fake.GetBlockchainInfoStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.getBlockchainInfoReturns return fakeReturns.result1, fakeReturns.result2 } @@ -168,21 +183,86 @@ func (fake *FileLedgerBlockStore) GetBlockchainInfoReturnsOnCall(i int, result1 }{result1, result2} } +func (fake *FileLedgerBlockStore) RetrieveBlockByNumber(arg1 uint64) (*common.Block, error) { + fake.retrieveBlockByNumberMutex.Lock() + ret, specificReturn := fake.retrieveBlockByNumberReturnsOnCall[len(fake.retrieveBlockByNumberArgsForCall)] + fake.retrieveBlockByNumberArgsForCall = append(fake.retrieveBlockByNumberArgsForCall, struct { + arg1 uint64 + }{arg1}) + stub := fake.RetrieveBlockByNumberStub + fakeReturns := fake.retrieveBlockByNumberReturns + fake.recordInvocation("RetrieveBlockByNumber", []interface{}{arg1}) + fake.retrieveBlockByNumberMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FileLedgerBlockStore) RetrieveBlockByNumberCallCount() int { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + return len(fake.retrieveBlockByNumberArgsForCall) +} + +func (fake *FileLedgerBlockStore) RetrieveBlockByNumberCalls(stub func(uint64) (*common.Block, error)) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = stub +} + +func (fake *FileLedgerBlockStore) RetrieveBlockByNumberArgsForCall(i int) uint64 { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + argsForCall := fake.retrieveBlockByNumberArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FileLedgerBlockStore) RetrieveBlockByNumberReturns(result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + fake.retrieveBlockByNumberReturns = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + +func (fake *FileLedgerBlockStore) RetrieveBlockByNumberReturnsOnCall(i int, result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + if fake.retrieveBlockByNumberReturnsOnCall == nil { + fake.retrieveBlockByNumberReturnsOnCall = make(map[int]struct { + result1 *common.Block + result2 error + }) + } + fake.retrieveBlockByNumberReturnsOnCall[i] = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + func (fake *FileLedgerBlockStore) RetrieveBlocks(arg1 uint64) (ledger.ResultsIterator, error) { fake.retrieveBlocksMutex.Lock() ret, specificReturn := fake.retrieveBlocksReturnsOnCall[len(fake.retrieveBlocksArgsForCall)] fake.retrieveBlocksArgsForCall = append(fake.retrieveBlocksArgsForCall, struct { arg1 uint64 }{arg1}) + stub := fake.RetrieveBlocksStub + fakeReturns := fake.retrieveBlocksReturns fake.recordInvocation("RetrieveBlocks", []interface{}{arg1}) fake.retrieveBlocksMutex.Unlock() - if fake.RetrieveBlocksStub != nil { - return fake.RetrieveBlocksStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.retrieveBlocksReturns return fakeReturns.result1, fakeReturns.result2 } @@ -235,9 +315,10 @@ func (fake *FileLedgerBlockStore) Shutdown() { fake.shutdownMutex.Lock() fake.shutdownArgsForCall = append(fake.shutdownArgsForCall, struct { }{}) + stub := fake.ShutdownStub fake.recordInvocation("Shutdown", []interface{}{}) fake.shutdownMutex.Unlock() - if fake.ShutdownStub != nil { + if stub != nil { fake.ShutdownStub() } } @@ -261,6 +342,8 @@ func (fake *FileLedgerBlockStore) Invocations() map[string][][]interface{} { defer fake.addBlockMutex.RUnlock() fake.getBlockchainInfoMutex.RLock() defer fake.getBlockchainInfoMutex.RUnlock() + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() fake.retrieveBlocksMutex.RLock() defer fake.retrieveBlocksMutex.RUnlock() fake.shutdownMutex.RLock() diff --git a/common/ledger/blockledger/ledger.go b/common/ledger/blockledger/ledger.go index f3f81e1fa1c..1a734aad374 100644 --- a/common/ledger/blockledger/ledger.go +++ b/common/ledger/blockledger/ledger.go @@ -43,6 +43,8 @@ type Reader interface { Iterator(startType *ab.SeekPosition) (Iterator, uint64) // Height returns the number of blocks on the ledger Height() uint64 + // retrieve blockByNumber + RetrieveBlockByNumber(blockNumber uint64) (*cb.Block, error) } // Writer allows the caller to modify the ledger diff --git a/common/ledger/blockledger/util.go b/common/ledger/blockledger/util.go index 3edc736fc3f..98c03339cf4 100644 --- a/common/ledger/blockledger/util.go +++ b/common/ledger/blockledger/util.go @@ -10,9 +10,12 @@ import ( "github.com/golang/protobuf/proto" cb "github.com/hyperledger/fabric-protos-go/common" ab "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/protoutil" ) +var logger = flogging.MustGetLogger("common.ledger.blockledger.util") + var closedChan chan struct{} func init() { @@ -95,3 +98,8 @@ func GetBlock(rl Reader, index uint64) *cb.Block { } return block } + +func GetBlockByNumber(rl Reader, blockNum uint64) (*cb.Block, error) { + logger.Debugw("Retrieving block", "blockNum", blockNum) + return rl.RetrieveBlockByNumber(blockNum) +} diff --git a/common/viperutil/config_util.go b/common/viperutil/config_util.go index 30c30a986fe..4c1178986ed 100644 --- a/common/viperutil/config_util.go +++ b/common/viperutil/config_util.go @@ -449,3 +449,19 @@ func (c *ConfigParser) EnhancedExactUnmarshal(output interface{}) error { } return decoder.Decode(leafKeys) } + +// YamlStringToStructHook is a hook for viper(viper.Unmarshal(*,*, here)), it is able to parse a string of minified yaml into a slice of structs +func YamlStringToStructHook(m interface{}) func(rf reflect.Kind, rt reflect.Kind, data interface{}) (interface{}, error) { + return func(rf reflect.Kind, rt reflect.Kind, data interface{}) (interface{}, error) { + if rf != reflect.String || rt != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return m, nil + } + + return m, yaml.UnmarshalStrict([]byte(raw), &m) + } +} diff --git a/core/chaincode/config_test.go b/core/chaincode/config_test.go index dae7b66253f..70cf4fb193b 100644 --- a/core/chaincode/config_test.go +++ b/core/chaincode/config_test.go @@ -36,6 +36,7 @@ var _ = Describe("Config", func() { viper.Set("chaincode.logging.format", "test-chaincode-logging-format") viper.Set("chaincode.logging.level", "warning") viper.Set("chaincode.logging.shim", "warning") + viper.Set("chaincode.system.somecc", true) config := chaincode.GlobalConfig() Expect(config.TLSEnabled).To(BeTrue()) @@ -46,6 +47,7 @@ var _ = Describe("Config", func() { Expect(config.LogFormat).To(Equal("test-chaincode-logging-format")) Expect(config.LogLevel).To(Equal("warn")) Expect(config.ShimLogLevel).To(Equal("warn")) + Expect(config.SCCAllowlist).To(Equal(map[string]bool{"somecc": true})) }) Context("when an invalid keepalive is configured", func() { diff --git a/core/chaincode/platforms/golang/list_test.go b/core/chaincode/platforms/golang/list_test.go index 5ea0cfbbbca..a76c9d26c39 100644 --- a/core/chaincode/platforms/golang/list_test.go +++ b/core/chaincode/platforms/golang/list_test.go @@ -122,5 +122,6 @@ func Test_listModuleInfoFailure(t *testing.T) { require.NoError(t, err, "failed to change to temporary directory") _, err = listModuleInfo() - require.EqualError(t, err, "'go list' failed with: go: cannot find main module; see 'go help modules': exit status 1") + require.ErrorContains(t, err, "'go list' failed with: go: ") + require.ErrorContains(t, err, "see 'go help modules': exit status 1") } diff --git a/core/chaincode/platforms/golang/platform.go b/core/chaincode/platforms/golang/platform.go index de9f417bdc5..21945c157bf 100644 --- a/core/chaincode/platforms/golang/platform.go +++ b/core/chaincode/platforms/golang/platform.go @@ -219,7 +219,7 @@ elif [ -f "/chaincode/input/src/%[2]s/go.mod" ]; then cd /chaincode/input/src/%[2]s GO111MODULE=on go build -v -mod=readonly %[1]s -o /chaincode/output/chaincode . else - GOPATH=/chaincode/input:$GOPATH go build -v %[1]s -o /chaincode/output/chaincode %[2]s + GO111MODULE=off GOPATH=/chaincode/input:$GOPATH go build -v %[1]s -o /chaincode/output/chaincode %[2]s fi echo Done! ` diff --git a/core/chaincode/platforms/golang/platform_test.go b/core/chaincode/platforms/golang/platform_test.go index fb9d245e165..f420a4cc4f0 100644 --- a/core/chaincode/platforms/golang/platform_test.go +++ b/core/chaincode/platforms/golang/platform_test.go @@ -378,7 +378,7 @@ elif [ -f "/chaincode/input/src/the-path/go.mod" ]; then cd /chaincode/input/src/the-path GO111MODULE=on go build -v -mod=readonly -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode . else - GOPATH=/chaincode/input:$GOPATH go build -v -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode the-path + GO111MODULE=off GOPATH=/chaincode/input:$GOPATH go build -v -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode the-path fi echo Done! `, @@ -419,7 +419,7 @@ elif [ -f "/chaincode/input/src/the-path/go.mod" ]; then cd /chaincode/input/src/the-path GO111MODULE=on go build -v -mod=readonly -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode . else - GOPATH=/chaincode/input:$GOPATH go build -v -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode the-path + GO111MODULE=off GOPATH=/chaincode/input:$GOPATH go build -v -ldflags "-linkmode external -extldflags '-static'" -o /chaincode/output/chaincode the-path fi echo Done! `, diff --git a/core/chaincode/platforms/util/utils.go b/core/chaincode/platforms/util/utils.go index 1d0562463c1..0e020dc2e4e 100644 --- a/core/chaincode/platforms/util/utils.go +++ b/core/chaincode/platforms/util/utils.go @@ -29,6 +29,10 @@ type DockerBuildOptions struct { OutputStream io.Writer } +func (dbo DockerBuildOptions) String() string { + return fmt.Sprintf("Image=%s Env=%s Cmd=%s)", dbo.Image, dbo.Env, dbo.Cmd) +} + //------------------------------------------------------------------------------------------- // DockerBuild //------------------------------------------------------------------------------------------- @@ -60,7 +64,7 @@ func DockerBuild(opts DockerBuildOptions, client *docker.Client) error { } } - logger.Debugf("Attempting build with image %s", opts.Image) + logger.Debugf("Attempting build with options: %s", opts) //----------------------------------------------------------------------------------- // Ensure the image exists locally, or pull it from a registry if it doesn't @@ -145,6 +149,7 @@ func DockerBuild(opts DockerBuildOptions, client *docker.Client) error { } if retval > 0 { + logger.Errorf("Docker build failed using options: %s", opts) return fmt.Errorf("Error returned from build: %d \"%s\"", retval, stdout.String()) } diff --git a/core/chaincode/platforms/util/utils_test.go b/core/chaincode/platforms/util/utils_test.go index 7c7d414eb99..f29eca3d9d9 100644 --- a/core/chaincode/platforms/util/utils_test.go +++ b/core/chaincode/platforms/util/utils_test.go @@ -105,3 +105,15 @@ func TestTwoDigitVersion(t *testing.T) { actual = twoDigitVersion(version) require.Equal(t, expected, actual, `Error parsing two digit version. Expected "%s", got "%s"`, expected, actual) } + +func TestDockerBuildOptions(t *testing.T) { + buildOptions := DockerBuildOptions{ + Image: "imageName", + Cmd: "theCommand", + Env: []string{"ENV_VARIABLE"}, + } + + actualBuildOptionsString := buildOptions.String() + expectedBuildOptionsString := "Image=imageName Env=[ENV_VARIABLE] Cmd=theCommand)" + require.Equal(t, expectedBuildOptionsString, actualBuildOptionsString, `Expected "%s", got "%s"`, expectedBuildOptionsString, actualBuildOptionsString) +} diff --git a/core/deliverservice/config.go b/core/deliverservice/config.go index ad0c1440d16..046e07b772f 100644 --- a/core/deliverservice/config.go +++ b/core/deliverservice/config.go @@ -29,6 +29,8 @@ const ( type DeliverServiceConfig struct { // PeerTLSEnabled enables/disables Peer TLS. PeerTLSEnabled bool + // BlockGossipEnabled enables block forwarding via gossip + BlockGossipEnabled bool // ReConnectBackoffThreshold sets the delivery service maximal delay between consencutive retries. ReConnectBackoffThreshold time.Duration // ReconnectTotalTimeThreshold sets the total time the delivery service may spend in reconnection attempts @@ -95,6 +97,13 @@ func LoadOverridesMap() (map[string]*orderers.Endpoint, error) { } func (c *DeliverServiceConfig) loadDeliverServiceConfig() { + enabledKey := "peer.deliveryclient.blockGossipEnabled" + enabledConfigOptionMissing := !viper.IsSet(enabledKey) + if enabledConfigOptionMissing { + logger.Infof("peer.deliveryclient.blockGossipEnabled is not set, defaulting to true.") + } + c.BlockGossipEnabled = enabledConfigOptionMissing || viper.GetBool(enabledKey) + c.PeerTLSEnabled = viper.GetBool("peer.tls.enabled") c.ReConnectBackoffThreshold = viper.GetDuration("peer.deliveryclient.reConnectBackoffThreshold") diff --git a/core/deliverservice/config_test.go b/core/deliverservice/config_test.go index 3d64d7e5197..b3658bb83a6 100644 --- a/core/deliverservice/config_test.go +++ b/core/deliverservice/config_test.go @@ -92,6 +92,7 @@ func TestGlobalConfig(t *testing.T) { coreConfig := deliverservice.GlobalConfig() expectedConfig := &deliverservice.DeliverServiceConfig{ + BlockGossipEnabled: true, PeerTLSEnabled: true, ReConnectBackoffThreshold: 25 * time.Second, ReconnectTotalTimeThreshold: 20 * time.Second, @@ -118,6 +119,7 @@ func TestGlobalConfigDefault(t *testing.T) { coreConfig := deliverservice.GlobalConfig() expectedConfig := &deliverservice.DeliverServiceConfig{ + BlockGossipEnabled: true, PeerTLSEnabled: false, ReConnectBackoffThreshold: deliverservice.DefaultReConnectBackoffThreshold, ReconnectTotalTimeThreshold: deliverservice.DefaultReConnectTotalTimeThreshold, diff --git a/core/deliverservice/deliveryclient.go b/core/deliverservice/deliveryclient.go index 88356448d70..41b5f6b4c6c 100644 --- a/core/deliverservice/deliveryclient.go +++ b/core/deliverservice/deliveryclient.go @@ -128,15 +128,16 @@ func (d *deliverServiceImpl) StartDeliverForChannel(chainID string, ledgerInfo b Dialer: DialerAdapter{ Client: d.conf.DeliverGRPCClient, }, - Orderers: d.conf.OrdererSource, - DoneC: make(chan struct{}), - Signer: d.conf.Signer, - DeliverStreamer: DeliverAdapter{}, - Logger: flogging.MustGetLogger("peer.blocksprovider").With("channel", chainID), - MaxRetryDelay: d.conf.DeliverServiceConfig.ReConnectBackoffThreshold, - MaxRetryDuration: d.conf.DeliverServiceConfig.ReconnectTotalTimeThreshold, - InitialRetryDelay: 100 * time.Millisecond, - YieldLeadership: !d.conf.IsStaticLeader, + Orderers: d.conf.OrdererSource, + DoneC: make(chan struct{}), + Signer: d.conf.Signer, + DeliverStreamer: DeliverAdapter{}, + Logger: flogging.MustGetLogger("peer.blocksprovider").With("channel", chainID), + MaxRetryDelay: d.conf.DeliverServiceConfig.ReConnectBackoffThreshold, + MaxRetryDuration: d.conf.DeliverServiceConfig.ReconnectTotalTimeThreshold, + BlockGossipDisabled: !d.conf.DeliverServiceConfig.BlockGossipEnabled, + InitialRetryDelay: 100 * time.Millisecond, + YieldLeadership: !d.conf.IsStaticLeader, } if d.conf.DeliverGRPCClient.MutualTLSRequired() { diff --git a/core/endorser/endorser.go b/core/endorser/endorser.go index 075854508ed..a7a857b1bad 100644 --- a/core/endorser/endorser.go +++ b/core/endorser/endorser.go @@ -339,6 +339,7 @@ func (e *Endorser) ProcessProposal(ctx context.Context, signedProp *pb.SignedPro pResp, err := e.ProcessProposalSuccessfullyOrError(up) if err != nil { + endorserLogger.Warnw("Failed to invoke chaincode", "channel", up.ChannelHeader.ChannelId, "chaincode", up.ChaincodeName, "error", err.Error()) return &pb.ProposalResponse{Response: &pb.Response{Status: 500, Message: err.Error()}}, nil } diff --git a/core/handlers/auth/filter/expiration.go b/core/handlers/auth/filter/expiration.go index d201803140f..71f1ef524ba 100644 --- a/core/handlers/auth/filter/expiration.go +++ b/core/handlers/auth/filter/expiration.go @@ -48,7 +48,7 @@ func validateProposal(signedProp *peer.SignedProposal) error { } expirationTime := crypto.ExpiresAt(sh.Creator) if !expirationTime.IsZero() && time.Now().After(expirationTime) { - return errors.New("identity expired") + return errors.New("proposal client identity expired") } return nil } diff --git a/core/handlers/auth/filter/expiration_test.go b/core/handlers/auth/filter/expiration_test.go index 477b17f547a..ddefff67bca 100644 --- a/core/handlers/auth/filter/expiration_test.go +++ b/core/handlers/auth/filter/expiration_test.go @@ -95,7 +95,7 @@ func TestExpirationCheckFilter(t *testing.T) { // Scenario I: Expired x509 identity sp := createValidSignedProposal(t, createX509Identity(t, "expiredCert.pem")) _, err := auth.ProcessProposal(context.Background(), sp) - require.Equal(t, err.Error(), "identity expired") + require.Equal(t, err.Error(), "proposal client identity expired") require.False(t, nextEndorser.invoked) // Scenario II: Not expired x509 identity diff --git a/core/ledger/kvledger/history/db_test.go b/core/ledger/kvledger/history/db_test.go index aa4fe5e2363..6b7fc5f7259 100644 --- a/core/ledger/kvledger/history/db_test.go +++ b/core/ledger/kvledger/history/db_test.go @@ -13,8 +13,11 @@ import ( "strconv" "testing" + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/ledger/queryresult" + "github.com/hyperledger/fabric-protos-go/ledger/rwset" + "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" "github.com/hyperledger/fabric-protos-go/peer" configtxtest "github.com/hyperledger/fabric/common/configtx/test" "github.com/hyperledger/fabric/common/flogging" @@ -502,6 +505,61 @@ func TestDrop(t *testing.T) { require.EqualError(t, env.testHistoryDBProvider.Drop("ledger2"), "internal leveldb error while obtaining db iterator: leveldb: closed") } +// TestHistoryWithKVWriteOfNilValue - See FAB-18386 for details +func TestHistoryWithKVWriteOfNilValue(t *testing.T) { + env := newTestHistoryEnv(t) + defer env.cleanup() + provider := env.testBlockStorageEnv.provider + store, err := provider.Open("ledger1") + require.NoError(t, err) + defer store.Shutdown() + + bg, gb := testutil.NewBlockGenerator(t, "ledger1", false) + + kvRWSet := &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + // explicitly set IsDelete to false while the value to nil. As this will never be generated by simulation + {Key: "key1", IsDelete: false, Value: nil}, + }, + } + kvRWsetBytes, err := proto.Marshal(kvRWSet) + require.NoError(t, err) + + txRWSet := &rwset.TxReadWriteSet{ + NsRwset: []*rwset.NsReadWriteSet{ + { + Namespace: "ns1", + Rwset: kvRWsetBytes, + }, + }, + } + + txRWSetBytes, err := proto.Marshal(txRWSet) + require.NoError(t, err) + + block1 := bg.NextBlockWithTxid([][]byte{txRWSetBytes}, []string{"txid1"}) + + historydb := env.testHistoryDBProvider.GetDBHandle("ledger1") + require.NoError(t, store.AddBlock(gb)) + require.NoError(t, historydb.Commit(gb)) + require.NoError(t, store.AddBlock(block1)) + require.NoError(t, historydb.Commit(block1)) + + historydbQE, err := historydb.NewQueryExecutor(store) + require.NoError(t, err) + itr, err := historydbQE.GetHistoryForKey("ns1", "key1") + require.NoError(t, err) + kmod, err := itr.Next() + require.NoError(t, err) + keyModification := kmod.(*queryresult.KeyModification) + // despite IsDelete set to "false" in the write-set, historydb results should set this to "true" + require.True(t, keyModification.IsDelete) + + kmod, err = itr.Next() + require.NoError(t, err) + require.Nil(t, kmod) +} + // verify history results func testutilVerifyResults(t *testing.T, hqe ledger.HistoryQueryExecutor, ns, key string, expectedVals []string) { itr, err := hqe.GetHistoryForKey(ns, key) diff --git a/core/ledger/kvledger/history/query_executer.go b/core/ledger/kvledger/history/query_executer.go index 4e5168275db..202f4cbe989 100644 --- a/core/ledger/kvledger/history/query_executer.go +++ b/core/ledger/kvledger/history/query_executer.go @@ -135,7 +135,7 @@ func getKeyModificationFromTran(tranEnvelope *common.Envelope, namespace string, for _, kvWrite := range nsRWSet.KvRwSet.Writes { if kvWrite.Key == key { return &queryresult.KeyModification{TxId: txID, Value: kvWrite.Value, - Timestamp: timestamp, IsDelete: kvWrite.IsDelete}, nil + Timestamp: timestamp, IsDelete: rwsetutil.IsKVWriteDelete(kvWrite)}, nil } } // end keys loop logger.Debugf("key [%s] not found in namespace [%s]'s writeset", key, namespace) diff --git a/core/ledger/kvledger/kv_ledger_provider.go b/core/ledger/kvledger/kv_ledger_provider.go index 2c197661de0..ae28e2ff7a2 100644 --- a/core/ledger/kvledger/kv_ledger_provider.go +++ b/core/ledger/kvledger/kv_ledger_provider.go @@ -251,10 +251,10 @@ func (p *Provider) initSnapshotDir() error { return errors.Wrapf(err, "error while deleting the dir: %s", inProgressSnapshotsPath) } if err := os.MkdirAll(inProgressSnapshotsPath, 0755); err != nil { - return errors.Wrapf(err, "error while creating the dir: %s", inProgressSnapshotsPath) + return errors.Wrapf(err, "error while creating the dir: %s, ensure peer has write access to configured ledger.snapshots.rootDir directory", inProgressSnapshotsPath) } if err := os.MkdirAll(completedSnapshotsPath, 0755); err != nil { - return errors.Wrapf(err, "error while creating the dir: %s", completedSnapshotsPath) + return errors.Wrapf(err, "error while creating the dir: %s, ensure peer has write access to configured ledger.snapshots.rootDir directory", completedSnapshotsPath) } return fileutil.SyncDir(snapshotsRootDir) } diff --git a/core/ledger/kvledger/snapshot_mgmt.go b/core/ledger/kvledger/snapshot_mgmt.go index a6fa09fb3e8..d11133a626c 100644 --- a/core/ledger/kvledger/snapshot_mgmt.go +++ b/core/ledger/kvledger/snapshot_mgmt.go @@ -7,6 +7,7 @@ SPDX-License-Identifier: Apache-2.0 package kvledger import ( + "fmt" "math" "os" "sync" @@ -48,6 +49,11 @@ type event struct { typ eventType blockNumber uint64 } + +func (e *event) String() string { + return fmt.Sprintf("{type=%s, blockNumber=%d}", e.typ, e.blockNumber) +} + type requestResponse struct { err error } @@ -105,7 +111,11 @@ func (l *kvLedger) processSnapshotMgmtEvents(lastCommittedBlockNumber uint64) { for { e := <-events - logger.Debugw("Event received", "channelID", l.ledgerID, "type", e.typ, "blockNumber", e.blockNumber, "snapshotInProgress=", snapshotInProgress) + logger.Debugw("Event received", + "channelID", l.ledgerID, "event", e, "snapshotInProgress", snapshotInProgress, + "lastCommittedBlockNumber", lastCommittedBlockNumber, "committerStatus", committerStatus, + ) + switch e.typ { case commitStart: committerStatus = blocked @@ -149,7 +159,7 @@ func (l *kvLedger) processSnapshotMgmtEvents(lastCommittedBlockNumber uint64) { case requestAdd: leastAcceptableBlockNum := lastCommittedBlockNumber if committerStatus != idle { - leastAcceptableBlockNum = +1 + leastAcceptableBlockNum++ } requestedBlockNum := e.blockNumber diff --git a/core/ledger/kvledger/snapshot_mgmt_test.go b/core/ledger/kvledger/snapshot_mgmt_test.go index d83e1422b8b..e610ef429b1 100644 --- a/core/ledger/kvledger/snapshot_mgmt_test.go +++ b/core/ledger/kvledger/snapshot_mgmt_test.go @@ -230,6 +230,38 @@ func TestSnapshotRequests(t *testing.T) { require.Eventually(t, requestsUpdated, time.Minute, 100*time.Millisecond) } +func TestSnapshotMgmtConcurrency(t *testing.T) { + conf, cleanup := testConfig(t) + defer cleanup() + provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{}) + defer provider.Close() + + ledgerID := "testsnapshotmgmtconcurrency" + bg, gb := testutil.NewBlockGenerator(t, ledgerID, false) + gbHash := protoutil.BlockHeaderHash(gb.Header) + l, err := provider.CreateFromGenesisBlock(gb) + require.NoError(t, err) + kvledger := l.(*kvLedger) + defer kvledger.Close() + + testutilCommitBlocks(t, l, bg, 5, gbHash) + + // Artificially, send event to background goroutine to indicate that commit for block 6 has started + // and then submit snapshot request for block 0, while not sending the event for commit done for block 6 + kvledger.snapshotMgr.events <- &event{typ: commitStart, blockNumber: 6} + <-kvledger.snapshotMgr.commitProceed + + require.NoError(t, kvledger.SubmitSnapshotRequest(0)) + require.Eventually(t, + func() bool { + r, err := kvledger.snapshotMgr.snapshotRequestBookkeeper.smallestRequest() + require.NoError(t, err) + return r == 6 + }, + 10*time.Millisecond, 1*time.Millisecond, + ) +} + func TestSnapshotMgrShutdown(t *testing.T) { conf, cleanup := testConfig(t) defer cleanup() diff --git a/core/ledger/kvledger/tests/client.go b/core/ledger/kvledger/tests/client.go index 7992e73ef74..26de31578a8 100644 --- a/core/ledger/kvledger/tests/client.go +++ b/core/ledger/kvledger/tests/client.go @@ -51,6 +51,10 @@ func (c *client) simulateDataTx(txid string, simulationLogic func(s *simulator)) return txAndPvtdata } +func (c *client) submitHandCraftedTx(txAndPvtdata *txAndPvtdata) { + c.simulatedTrans = append(c.simulatedTrans, txAndPvtdata) +} + func (c *client) addPostOrderTx(txid string, customTxType common.HeaderType) *txAndPvtdata { if txid == "" { txid = util.GenerateUUID() diff --git a/core/ledger/kvledger/tests/nilvalue_no_delete_marker_test.go b/core/ledger/kvledger/tests/nilvalue_no_delete_marker_test.go new file mode 100644 index 00000000000..8fb4e87e823 --- /dev/null +++ b/core/ledger/kvledger/tests/nilvalue_no_delete_marker_test.go @@ -0,0 +1,148 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package tests + +import ( + "testing" + + "github.com/golang/protobuf/proto" + "github.com/hyperledger/fabric-protos-go/ledger/rwset" + "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" + "github.com/hyperledger/fabric-protos-go/peer" + "github.com/hyperledger/fabric/common/util" + "github.com/stretchr/testify/require" +) + +// TestNilValNoDeleteMarker tests for a special writeset which carries a nil value and yet the delete marker is set to false. +// This kind of write-set gets produced in previous versions. See FAB-18386 for more details. +func TestNilValNoDeleteMarker(t *testing.T) { + env := newEnv(t) + defer env.cleanup() + env.initLedgerMgmt() + + testLedger := env.createTestLedgerFromGenesisBlk("test-ledger") + testLedger.simulateDeployTx("cc1", []*collConf{ + { + name: "coll1", + }, + }) + testLedger.cutBlockAndCommitLegacy() + + testLedger.simulateDataTx("txid1", func(s *simulator) { + s.setState("cc1", "pubKey1", "pubValue1") + s.setPvtdata("cc1", "coll1", "pvtKey1", "pvtValue1") + s.setPvtdata("cc1", "coll1", "pvtKey2", "pvtValue2") + }) + testLedger.cutBlockAndCommitLegacy() + + testLedger.verifyPubState("cc1", "pubKey1", "pubValue1") + testLedger.verifyPvtdataHashState("cc1", "coll1", "pvtKey1", util.ComputeSHA256([]byte("pvtValue1"))) + testLedger.verifyPvtdataHashState("cc1", "coll1", "pvtKey2", util.ComputeSHA256([]byte("pvtValue2"))) + testLedger.verifyPvtState("cc1", "coll1", "pvtKey1", "pvtValue1") + testLedger.verifyPvtState("cc1", "coll1", "pvtKey2", "pvtValue2") + + // Handcraft writeset that includes a delete for each of the above keys. We handcraft here because the one generated by the simulator + // will always have IsDelete flag true and we want to test when this flag is set to false and the actual value is nil. See FAB-18386 for + // more details. + pubWrites := &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + { + Key: "pubKey1", + IsDelete: false, + Value: nil, + }, + }, + } + + hashedWrites := &kvrwset.HashedRWSet{ + HashedWrites: []*kvrwset.KVWriteHash{ + { + KeyHash: util.ComputeSHA256([]byte("pvtKey1")), + IsDelete: false, + ValueHash: nil, + }, + { + KeyHash: util.ComputeSHA256([]byte("pvtKey2")), + IsDelete: false, + ValueHash: util.ComputeSHA256([]byte{}), + }, + }, + } + + pvtWrites := &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + { + Key: "pvtKey1", + IsDelete: false, + }, + { + Key: "pvtKey2", + IsDelete: false, + }, + }, + } + + pubWritesBytes, err := proto.Marshal(pubWrites) + require.NoError(t, err) + + hashedWritesBytes, err := proto.Marshal(hashedWrites) + require.NoError(t, err) + + pvtWritesBytes, err := proto.Marshal(pvtWrites) + require.NoError(t, err) + + pubRwset := &rwset.TxReadWriteSet{ + DataModel: rwset.TxReadWriteSet_KV, + NsRwset: []*rwset.NsReadWriteSet{ + { + Namespace: "cc1", + Rwset: pubWritesBytes, + CollectionHashedRwset: []*rwset.CollectionHashedReadWriteSet{ + { + CollectionName: "coll1", + HashedRwset: hashedWritesBytes, + PvtRwsetHash: util.ComputeSHA256(pvtWritesBytes), + }, + }, + }, + }, + } + pubRwsetBytes, err := proto.Marshal(pubRwset) + require.NoError(t, err) + envelope, err := constructTransaction("txid2", pubRwsetBytes) + require.NoError(t, err) + + txAndPvtdata := &txAndPvtdata{ + Txid: "txid2", + Envelope: envelope, + Pvtws: &rwset.TxPvtReadWriteSet{ + DataModel: rwset.TxReadWriteSet_KV, + NsPvtRwset: []*rwset.NsPvtReadWriteSet{ + { + Namespace: "cc1", + CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ + { + CollectionName: "coll1", + Rwset: pvtWritesBytes, + }, + }, + }, + }, + }, + } + + testLedger.submitHandCraftedTx(txAndPvtdata) + testLedger.cutBlockAndCommitLegacy() + + testLedger.verifyTxValidationCode("txid2", peer.TxValidationCode_VALID) + testLedger.verifyPubState("cc1", "pubKey1", "") + testLedger.verifyPvtdataHashState("cc1", "coll1", "pvtKey1", nil) + testLedger.verifyPvtdataHashState("cc1", "coll1", "pvtKey2", nil) + testLedger.verifyPvtState("cc1", "coll1", "pvtKey1", "") + testLedger.verifyPvtState("cc1", "coll1", "pvtKey2", "") + testLedger.verifyHistory("cc1", "pubKey1", []string{"", "pubValue1"}) +} diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/db.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/db.go index 41aa4c246c0..d3944e11896 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/db.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/db.go @@ -420,27 +420,30 @@ type indexInfo struct { const ( // Example for chaincode indexes: - // "META-INF/statedb/couchdb/indexes/indexColorSortName.json" + // "META-INF/statedb/couchdb/indexes" chaincodeIndexDirDepth = 3 + // Example for collection scoped indexes: - // "META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexCollMarbles.json" + // "META-INF/statedb/couchdb/collections/collectionMarbles/indexes" collectionDirDepth = 3 collectionNameDepth = 4 collectionIndexDirDepth = 5 ) +// Note previous functions will have ensured that the path starts +// with 'META-INF/statedb' and does not have leading or trailing +// path deliminators. func getIndexInfo(indexPath string) *indexInfo { indexInfo := &indexInfo{} - dirsDepth := strings.Split(indexPath, "/") + pathParts := strings.Split(indexPath, "/") + pathDepth := len(pathParts) + switch { - case len(dirsDepth) > chaincodeIndexDirDepth && - dirsDepth[chaincodeIndexDirDepth] == "indexes": + case pathDepth > chaincodeIndexDirDepth && pathParts[chaincodeIndexDirDepth] == "indexes": indexInfo.hasIndexForChaincode = true - case len(dirsDepth) > collectionDirDepth && - dirsDepth[collectionDirDepth] == "collections" && - dirsDepth[collectionIndexDirDepth] == "indexes": + case pathDepth > collectionIndexDirDepth && pathParts[collectionDirDepth] == "collections" && pathParts[collectionIndexDirDepth] == "indexes": indexInfo.hasIndexForCollection = true - indexInfo.collectionName = dirsDepth[collectionNameDepth] + indexInfo.collectionName = pathParts[collectionNameDepth] } return indexInfo } diff --git a/core/ledger/kvledger/txmgmt/privacyenabledstate/db_test.go b/core/ledger/kvledger/txmgmt/privacyenabledstate/db_test.go index a30b7247320..866d1058324 100644 --- a/core/ledger/kvledger/txmgmt/privacyenabledstate/db_test.go +++ b/core/ledger/kvledger/txmgmt/privacyenabledstate/db_test.go @@ -49,7 +49,7 @@ func TestHealthCheckRegister(t *testing.T) { } func TestGetIndexInfo(t *testing.T) { - chaincodeIndexPath := "META-INF/statedb/couchdb/indexes/indexColorSortName.json" + chaincodeIndexPath := "META-INF/statedb/couchdb/indexes" actualIndexInfo := getIndexInfo(chaincodeIndexPath) expectedIndexInfo := &indexInfo{ hasIndexForChaincode: true, @@ -58,7 +58,7 @@ func TestGetIndexInfo(t *testing.T) { } require.Equal(t, expectedIndexInfo, actualIndexInfo) - collectionIndexPath := "META-INF/statedb/couchdb/collections/collectionMarbles/indexes/indexCollMarbles.json" + collectionIndexPath := "META-INF/statedb/couchdb/collections/collectionMarbles/indexes" actualIndexInfo = getIndexInfo(collectionIndexPath) expectedIndexInfo = &indexInfo{ hasIndexForChaincode: false, @@ -67,7 +67,7 @@ func TestGetIndexInfo(t *testing.T) { } require.Equal(t, expectedIndexInfo, actualIndexInfo) - incorrectChaincodeIndexPath := "META-INF/statedb/couchdb/indexColorSortName.json" + incorrectChaincodeIndexPath := "META-INF/statedb/couchdb" actualIndexInfo = getIndexInfo(incorrectChaincodeIndexPath) expectedIndexInfo = &indexInfo{ hasIndexForChaincode: false, @@ -76,11 +76,15 @@ func TestGetIndexInfo(t *testing.T) { } require.Equal(t, expectedIndexInfo, actualIndexInfo) - incorrectCollectionIndexPath := "META-INF/statedb/couchdb/collections/indexes/indexCollMarbles.json" + incorrectCollectionIndexPath := "META-INF/statedb/couchdb/collections/indexes" actualIndexInfo = getIndexInfo(incorrectCollectionIndexPath) require.Equal(t, expectedIndexInfo, actualIndexInfo) - incorrectIndexPath := "META-INF/statedb/" + incorrectCollectionIndexPath = "META-INF/statedb/couchdb/collections" + actualIndexInfo = getIndexInfo(incorrectCollectionIndexPath) + require.Equal(t, expectedIndexInfo, actualIndexInfo) + + incorrectIndexPath := "META-INF/statedb" actualIndexInfo = getIndexInfo(incorrectIndexPath) require.Equal(t, expectedIndexInfo, actualIndexInfo) } diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go index b6b2105558c..70ff8f52ad5 100644 --- a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_builder_test.go @@ -357,3 +357,71 @@ func serializeTestProtoMsg(t *testing.T, protoMsg proto.Message) []byte { require.NoError(t, err) return msgBytes } + +func TestNilOrZeroLengthByteArrayValueConvertedToDelete(t *testing.T) { + t.Run("public_writeset", func(t *testing.T) { + rwsetBuilder := NewRWSetBuilder() + rwsetBuilder.AddToWriteSet("ns", "key1", nil) + rwsetBuilder.AddToWriteSet("ns", "key2", []byte{}) + + simulationResults, err := rwsetBuilder.GetTxSimulationResults() + require.NoError(t, err) + pubRWSet := &kvrwset.KVRWSet{} + require.NoError( + t, + proto.Unmarshal(simulationResults.PubSimulationResults.NsRwset[0].Rwset, pubRWSet), + ) + require.True(t, proto.Equal( + &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + {Key: "key1", IsDelete: true}, + {Key: "key2", IsDelete: true}, + }, + }, + pubRWSet, + )) + }) + + t.Run("pvtdata_and_hashes_writesets", func(t *testing.T) { + rwsetBuilder := NewRWSetBuilder() + rwsetBuilder.AddToPvtAndHashedWriteSet("ns", "coll", "key1", nil) + rwsetBuilder.AddToPvtAndHashedWriteSet("ns", "coll", "key2", []byte{}) + + simulationResults, err := rwsetBuilder.GetTxSimulationResults() + require.NoError(t, err) + + t.Run("hashed_writeset", func(t *testing.T) { + hashedRWSet := &kvrwset.HashedRWSet{} + require.NoError( + t, + proto.Unmarshal(simulationResults.PubSimulationResults.NsRwset[0].CollectionHashedRwset[0].HashedRwset, hashedRWSet), + ) + require.True(t, proto.Equal( + &kvrwset.HashedRWSet{ + HashedWrites: []*kvrwset.KVWriteHash{ + {KeyHash: util.ComputeStringHash("key1"), IsDelete: true}, + {KeyHash: util.ComputeStringHash("key2"), IsDelete: true}, + }, + }, + hashedRWSet, + )) + }) + + t.Run("pvtdata_writeset", func(t *testing.T) { + pvtWSet := &kvrwset.KVRWSet{} + require.NoError( + t, + proto.Unmarshal(simulationResults.PvtSimulationResults.NsPvtRwset[0].CollectionPvtRwset[0].Rwset, pvtWSet), + ) + require.True(t, proto.Equal( + &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + {Key: "key1", IsDelete: true}, + {Key: "key2", IsDelete: true}, + }, + }, + pvtWSet, + )) + }) + }) +} diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go index ae9cfc9d0d0..aa5888c5385 100644 --- a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util.go @@ -17,6 +17,8 @@ limitations under the License. package rwsetutil import ( + "bytes" + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/ledger/rwset" "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" @@ -343,7 +345,7 @@ func newProtoVersion(height *version.Height) *kvrwset.Version { } func newKVWrite(key string, value []byte) *kvrwset.KVWrite { - return &kvrwset.KVWrite{Key: key, IsDelete: value == nil, Value: value} + return &kvrwset.KVWrite{Key: key, IsDelete: len(value) == 0, Value: value} } func newPvtKVReadHash(key string, version *version.Height) *kvrwset.KVReadHash { @@ -359,3 +361,17 @@ func newPvtKVWriteAndHash(key string, value []byte) (*kvrwset.KVWrite, *kvrwset. } return kvWrite, &kvrwset.KVWriteHash{KeyHash: keyHash, IsDelete: kvWrite.IsDelete, ValueHash: valueHash} } + +// IsKVWriteDelete returns true if the kvWrite indicates a delete operation. See FAB-18386 for details. +func IsKVWriteDelete(kvWrite *kvrwset.KVWrite) bool { + return kvWrite.IsDelete || len(kvWrite.Value) == 0 +} + +var ( + hashOfZeroLengthByteArray = util.ComputeHash([]byte{}) +) + +// IsKVWriteHashDelete returns true if the kvWriteHash indicates a delete operation. See FAB-18386 for details. +func IsKVWriteHashDelete(kvWriteHash *kvrwset.KVWriteHash) bool { + return kvWriteHash.IsDelete || len(kvWriteHash.ValueHash) == 0 || bytes.Equal(hashOfZeroLengthByteArray, kvWriteHash.ValueHash) +} diff --git a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go index 9f948befe8c..59641bf3933 100644 --- a/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go +++ b/core/ledger/kvledger/txmgmt/rwsetutil/rwset_proto_util_test.go @@ -23,6 +23,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" "github.com/hyperledger/fabric/core/ledger/internal/version" + "github.com/hyperledger/fabric/core/ledger/util" "github.com/kr/pretty" "github.com/stretchr/testify/require" ) @@ -241,3 +242,33 @@ func TestVersionConversion(t *testing.T) { require.Nil(t, newProtoVersion(nil)) require.Equal(t, protoVer, newProtoVersion(internalVer)) } + +func TestIsDelete(t *testing.T) { + t.Run("kvWrite", func(t *testing.T) { + kvWritesToBeInterpretedAsDelete := []*kvrwset.KVWrite{ + {Value: nil, IsDelete: true}, + {Value: nil, IsDelete: false}, + {Value: []byte{}, IsDelete: true}, + {Value: []byte{}, IsDelete: false}, + } + + for _, k := range kvWritesToBeInterpretedAsDelete { + require.True(t, IsKVWriteDelete(k)) + } + }) + + t.Run("kvhashwrite", func(t *testing.T) { + kvHashesWritesToBeInterpretedAsDelete := []*kvrwset.KVWriteHash{ + {ValueHash: nil, IsDelete: true}, + {ValueHash: nil, IsDelete: false}, + {ValueHash: []byte{}, IsDelete: true}, + {ValueHash: []byte{}, IsDelete: false}, + {ValueHash: util.ComputeHash([]byte{}), IsDelete: true}, + {ValueHash: util.ComputeHash([]byte{}), IsDelete: false}, + } + + for _, k := range kvHashesWritesToBeInterpretedAsDelete { + require.True(t, IsKVWriteHashDelete(k)) + } + }) +} diff --git a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go index f1976b57cbc..8758a2daefa 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go +++ b/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go @@ -324,7 +324,7 @@ func (uniquePvtData uniquePvtDataMap) updateUsingPvtWrite(pvtWrite *kvrwset.KVWr uniquePvtData[hashedCompositeKey] = &privacyenabledstate.PvtKVWrite{ Key: pvtWrite.Key, - IsDelete: pvtWrite.IsDelete, + IsDelete: rwsetutil.IsKVWriteDelete(pvtWrite), Value: pvtWrite.Value, Version: ver, } diff --git a/core/ledger/kvledger/txmgmt/txmgr/txmgr_test.go b/core/ledger/kvledger/txmgmt/txmgr/txmgr_test.go index 976ca98d47f..d717c7ae28c 100644 --- a/core/ledger/kvledger/txmgmt/txmgr/txmgr_test.go +++ b/core/ledger/kvledger/txmgmt/txmgr/txmgr_test.go @@ -14,7 +14,10 @@ import ( "strings" "testing" + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/ledger/queryresult" + "github.com/hyperledger/fabric-protos-go/ledger/rwset" + "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" "github.com/hyperledger/fabric/common/ledger/testutil" "github.com/hyperledger/fabric/core/ledger" "github.com/hyperledger/fabric/core/ledger/internal/version" @@ -922,6 +925,31 @@ func TestConstructUniquePvtData(t *testing.T) { // ns1-coll1-key1 should be accepted pvtDataBlk3Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{v3}) + pvtDataBlk3Tx2WriteSetBytes, err := proto.Marshal( + &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + {Key: "key5", IsDelete: false, Value: nil}, + }, + }, + ) + require.NoError(t, err) + pvtDataBlk3Tx2 := &ledger.TxPvtData{ + SeqInBlock: 2, + WriteSet: &rwset.TxPvtReadWriteSet{ + NsPvtRwset: []*rwset.NsPvtReadWriteSet{ + { + Namespace: "ns1", + CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ + { + CollectionName: "coll1", + Rwset: pvtDataBlk3Tx2WriteSetBytes, + }, + }, + }, + }, + }, + } + blocksPvtData := map[uint64][]*ledger.TxPvtData{ 1: { pvtDataBlk1Tx1, @@ -934,6 +962,7 @@ func TestConstructUniquePvtData(t *testing.T) { }, 3: { pvtDataBlk3Tx1, + pvtDataBlk3Tx2, }, } @@ -949,11 +978,15 @@ func TestConstructUniquePvtData(t *testing.T) { hashedCompositeKeyNs1Coll1Key1 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key1"))} pvtKVWriteNs1Coll1Key1 := &privacyenabledstate.PvtKVWrite{Key: "key1", IsDelete: false, Value: v3, Version: version.NewHeight(3, 1)} + hashedCompositeKeyNs1Coll1Key5 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key5"))} + pvtKVWriteNs1Coll1Key5 := &privacyenabledstate.PvtKVWrite{Key: "key5", IsDelete: true, Value: nil, Version: version.NewHeight(3, 2)} + expectedUniquePvtData := uniquePvtDataMap{ hashedCompositeKeyNs1Coll2Key3: pvtKVWriteNs1Coll2Key3, hashedCompositeKeyNs1Coll2Key4: pvtKVWriteNs1Coll2Key4, hashedCompositeKeyNs1Coll1Key2: pvtKVWriteNs1Coll1Key2, hashedCompositeKeyNs1Coll1Key1: pvtKVWriteNs1Coll1Key1, + hashedCompositeKeyNs1Coll1Key5: pvtKVWriteNs1Coll1Key5, } uniquePvtData, err := constructUniquePvtData(blocksPvtData) diff --git a/core/ledger/kvledger/txmgmt/validation/batch_preparer.go b/core/ledger/kvledger/txmgmt/validation/batch_preparer.go index ee27bcefe0f..98de8d041b5 100644 --- a/core/ledger/kvledger/txmgmt/validation/batch_preparer.go +++ b/core/ledger/kvledger/txmgmt/validation/batch_preparer.go @@ -340,7 +340,7 @@ func addPvtRWSetToPvtUpdateBatch(pvtRWSet *rwsetutil.TxPvtRwSet, pvtUpdateBatch for _, ns := range pvtRWSet.NsPvtRwSet { for _, coll := range ns.CollPvtRwSets { for _, kvwrite := range coll.KvRwSet.Writes { - if !kvwrite.IsDelete { + if !rwsetutil.IsKVWriteDelete(kvwrite) { pvtUpdateBatch.Put(ns.NameSpace, coll.CollectionName, kvwrite.Key, kvwrite.Value, ver) } else { pvtUpdateBatch.Delete(ns.NameSpace, coll.CollectionName, kvwrite.Key, ver) diff --git a/core/ledger/kvledger/txmgmt/validation/tx_ops.go b/core/ledger/kvledger/txmgmt/validation/tx_ops.go index a37ca7f37c6..d1f7af80a71 100644 --- a/core/ledger/kvledger/txmgmt/validation/tx_ops.go +++ b/core/ledger/kvledger/txmgmt/validation/tx_ops.go @@ -74,7 +74,7 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { &kvrwset.KVWrite{ Key: string(hashedWrite.KeyHash), Value: hashedWrite.ValueHash, - IsDelete: hashedWrite.IsDelete, + IsDelete: rwsetutil.IsKVWriteHashDelete(hashedWrite), }, ) } @@ -96,7 +96,7 @@ func (txops txOps) applyTxRwset(rwset *rwsetutil.TxRwSet) error { // applyKVWrite records upsertion/deletion of a kvwrite func (txops txOps) applyKVWrite(ns, coll string, kvWrite *kvrwset.KVWrite) { - if kvWrite.IsDelete { + if rwsetutil.IsKVWriteDelete(kvWrite) { txops.delete(compositeKey{ns, coll, kvWrite.Key}) } else { txops.upsert(compositeKey{ns, coll, kvWrite.Key}, kvWrite.Value) diff --git a/core/ledger/kvledger/txmgmt/validation/tx_ops_test.go b/core/ledger/kvledger/txmgmt/validation/tx_ops_test.go index 7e1f37250f7..6cf3ad09bed 100644 --- a/core/ledger/kvledger/txmgmt/validation/tx_ops_test.go +++ b/core/ledger/kvledger/txmgmt/validation/tx_ops_test.go @@ -368,6 +368,96 @@ func TestTxOpsPreparationPvtdataHashes(t *testing.T) { require.Equal(t, ck4ExpectedKeyOps, txOps[ck4Hash]) } +// TestInterpretNilValueKVWritesAsDelete - See FAB-18386 +func TestInterpretNilValueKVWritesAsDelete(t *testing.T) { + testcases := []struct { + name string + rwset *rwsetutil.TxRwSet + compositeKeysToVerify []compositeKey + }{ + { + name: "public_keys_writes", + rwset: &rwsetutil.TxRwSet{ + NsRwSets: []*rwsetutil.NsRwSet{ + { + NameSpace: "ns1", + KvRwSet: &kvrwset.KVRWSet{ + Writes: []*kvrwset.KVWrite{ + { + Key: "key1", + IsDelete: true, + }, + { + Key: "key2", + IsDelete: false, + Value: []byte{}, + }, + }, + }, + }, + }, + }, + compositeKeysToVerify: []compositeKey{ + {ns: "ns1", key: "key1"}, + {ns: "ns1", key: "key2"}, + }, + }, + { + name: "private_keys_hashes_writes", + rwset: &rwsetutil.TxRwSet{ + NsRwSets: []*rwsetutil.NsRwSet{ + { + NameSpace: "ns1", + KvRwSet: &kvrwset.KVRWSet{}, + CollHashedRwSets: []*rwsetutil.CollHashedRwSet{ + { + CollectionName: "coll1", + HashedRwSet: &kvrwset.HashedRWSet{ + HashedWrites: []*kvrwset.KVWriteHash{ + { + KeyHash: util.ComputeStringHash("key1"), + IsDelete: true, + }, + { + KeyHash: util.ComputeStringHash("key2"), + IsDelete: false, + }, + { + KeyHash: util.ComputeStringHash("key3"), + IsDelete: false, + ValueHash: util.ComputeHash([]byte{}), + }, + }, + }, + }, + }, + }, + }, + }, + compositeKeysToVerify: []compositeKey{ + {ns: "ns1", coll: "coll1", key: string(util.ComputeStringHash("key1"))}, + {ns: "ns1", coll: "coll1", key: string(util.ComputeStringHash("key2"))}, + {ns: "ns1", coll: "coll1", key: string(util.ComputeStringHash("key3"))}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + txOps := txOps{} + err := txOps.applyTxRwset(tc.rwset) + require.NoError(t, err) + + for _, keyToVerify := range tc.compositeKeysToVerify { + require.Equal(t, + &keyOps{flag: keyDelete}, + txOps[keyToVerify], + ) + } + }) + } +} + func testutilBuildRwset(t *testing.T, kvWrites map[compositeKey][]byte, metadataWrites map[compositeKey]map[string][]byte) *rwsetutil.TxRwSet { diff --git a/core/peer/config.go b/core/peer/config.go index ba66a7d935a..4d3144098e5 100644 --- a/core/peer/config.go +++ b/core/peer/config.go @@ -28,6 +28,7 @@ import ( "runtime" "time" + "github.com/hyperledger/fabric/common/viperutil" "github.com/hyperledger/fabric/core/config" "github.com/hyperledger/fabric/internal/pkg/comm" "github.com/pkg/errors" @@ -276,10 +277,12 @@ func (c *Config) load() error { c.ChaincodePull = viper.GetBool("chaincode.pull") var externalBuilders []ExternalBuilder - err = viper.UnmarshalKey("chaincode.externalBuilders", &externalBuilders) + + err = viper.UnmarshalKey("chaincode.externalBuilders", &externalBuilders, viper.DecodeHook(viperutil.YamlStringToStructHook(externalBuilders))) if err != nil { return err } + c.ExternalBuilders = externalBuilders for builderIndex, builder := range c.ExternalBuilders { if builder.Path == "" { @@ -407,6 +410,16 @@ func GetServerConfig() (comm.ServerConfig, error) { if viper.IsSet("peer.keepalive.minInterval") { serverConfig.KaOpts.ServerMinInterval = viper.GetDuration("peer.keepalive.minInterval") } + + serverConfig.MaxRecvMsgSize = comm.DefaultMaxRecvMsgSize + serverConfig.MaxSendMsgSize = comm.DefaultMaxSendMsgSize + + if viper.IsSet("peer.maxRecvMsgSize") { + serverConfig.MaxRecvMsgSize = int(viper.GetInt32("peer.maxRecvMsgSize")) + } + if viper.IsSet("peer.maxSendMsgSize") { + serverConfig.MaxSendMsgSize = int(viper.GetInt32("peer.maxSendMsgSize")) + } return serverConfig, nil } diff --git a/core/peer/config_test.go b/core/peer/config_test.go index 6db5da4784a..11e00f348a9 100644 --- a/core/peer/config_test.go +++ b/core/peer/config_test.go @@ -7,6 +7,7 @@ package peer import ( "crypto/tls" + "io/ioutil" "net" "os" "path/filepath" @@ -14,6 +15,7 @@ import ( "testing" "time" + "github.com/hyperledger/fabric/common/crypto/tlsgen" "github.com/hyperledger/fabric/internal/pkg/comm" "github.com/spf13/viper" "github.com/stretchr/testify/require" @@ -94,10 +96,15 @@ func TestPeerAddress(t *testing.T) { } func TestGetServerConfig(t *testing.T) { + tempdir, err := ioutil.TempDir("", "peer-clientcert") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + // good config without TLS viper.Set("peer.tls.enabled", false) viper.Set("peer.connectiontimeout", "7s") - sc, _ := GetServerConfig() + sc, err := GetServerConfig() + require.NoError(t, err) require.Equal(t, false, sc.SecOpts.UseTLS, "ServerConfig.SecOpts.UseTLS should be false") require.Equal(t, sc.ConnectionTimeout, 7*time.Second, "ServerConfig.ConnectionTimeout should be 7 seconds") @@ -114,27 +121,56 @@ func TestGetServerConfig(t *testing.T) { require.Equal(t, time.Duration(2)*time.Minute, sc.KaOpts.ServerMinInterval, "ServerConfig.KaOpts.ServerMinInterval should be set to 2 min") // good config with TLS + org1CA, err := tlsgen.NewCA() + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-ca-cert.pem"), org1CA.CertBytes(), 0o644) + require.NoError(t, err) + org2CA, err := tlsgen.NewCA() + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org2-ca-cert.pem"), org2CA.CertBytes(), 0o644) + require.NoError(t, err) + + org1ServerKP, err := org1CA.NewServerCertKeyPair("localhost") + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-server1-cert.pem"), org1ServerKP.Cert, 0o644) + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-server1-key.pem"), org1ServerKP.Key, 0o600) + require.NoError(t, err) + viper.Set("peer.tls.enabled", true) - viper.Set("peer.tls.cert.file", filepath.Join("testdata", "Org1-server1-cert.pem")) - viper.Set("peer.tls.key.file", filepath.Join("testdata", "Org1-server1-key.pem")) - viper.Set("peer.tls.rootcert.file", filepath.Join("testdata", "Org1-cert.pem")) - sc, _ = GetServerConfig() + viper.Set("peer.tls.cert.file", filepath.Join(tempdir, "org1-server1-cert.pem")) + viper.Set("peer.tls.key.file", filepath.Join(tempdir, "org1-server1-key.pem")) + viper.Set("peer.tls.rootcert.file", filepath.Join(tempdir, "org1-ca-cert.pem")) + + sc, err = GetServerConfig() + require.NoError(t, err, "failed to build server config") require.Equal(t, true, sc.SecOpts.UseTLS, "ServerConfig.SecOpts.UseTLS should be true") require.Equal(t, false, sc.SecOpts.RequireClientCert, "ServerConfig.SecOpts.RequireClientCert should be false") viper.Set("peer.tls.clientAuthRequired", true) viper.Set("peer.tls.clientRootCAs.files", []string{ - filepath.Join("testdata", "Org1-cert.pem"), - filepath.Join("testdata", "Org2-cert.pem"), + filepath.Join(tempdir, "org1-ca-cert.pem"), + filepath.Join(tempdir, "org2-ca-cert.pem"), }) sc, _ = GetServerConfig() require.Equal(t, true, sc.SecOpts.RequireClientCert, "ServerConfig.SecOpts.RequireClientCert should be true") require.Equal(t, 2, len(sc.SecOpts.ClientRootCAs), "ServerConfig.SecOpts.ClientRootCAs should contain 2 entries") + // GRPC max message size options + require.Equal(t, comm.DefaultMaxRecvMsgSize, sc.MaxRecvMsgSize, "ServerConfig.MaxRecvMsgSize should be set to default value %v", comm.DefaultMaxRecvMsgSize) + require.Equal(t, comm.DefaultMaxSendMsgSize, sc.MaxSendMsgSize, "ServerConfig.MaxSendMsgSize should be set to default value %v", comm.DefaultMaxSendMsgSize) + viper.Set("peer.maxRecvMsgSize", "1024") + viper.Set("peer.maxSendMsgSize", "1024") + sc, _ = GetServerConfig() + require.Equal(t, 1024, sc.MaxRecvMsgSize, "ServerConfig.MaxRecvMsgSize should be set to custom value 1024") + require.Equal(t, 1024, sc.MaxSendMsgSize, "ServerConfig.MaxSendMsgSize should be set to custom value 1024") + // bad config with TLS - viper.Set("peer.tls.rootcert.file", filepath.Join("testdata", "Org11-cert.pem")) - _, err := GetServerConfig() + viper.Set("peer.tls.rootcert.file", "non-existent-file.pem") + _, err = GetServerConfig() require.Error(t, err, "GetServerConfig should return error with bad root cert path") - viper.Set("peer.tls.cert.file", filepath.Join("testdata", "Org11-cert.pem")) + + viper.Set("peer.tls.rootcert.file", filepath.Join(tempdir, "org1-ca-cert.pem")) + viper.Set("peer.tls.cert.file", "non-existent-file.pem") _, err = GetServerConfig() require.Error(t, err, "GetServerConfig should return error with bad tls cert path") @@ -144,22 +180,35 @@ func TestGetServerConfig(t *testing.T) { } func TestGetClientCertificate(t *testing.T) { + tempdir, err := ioutil.TempDir("", "peer-clientcert") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + ca, err := tlsgen.NewCA() + require.NoError(t, err) + kp, err := ca.NewServerCertKeyPair("localhost") + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "server1-cert.pem"), kp.Cert, 0o644) + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "server1-key.pem"), kp.Key, 0o600) + require.NoError(t, err) + viper.Set("peer.tls.key.file", "") viper.Set("peer.tls.cert.file", "") viper.Set("peer.tls.clientKey.file", "") viper.Set("peer.tls.clientCert.file", "") // neither client nor server key pairs set - expect error - _, err := GetClientCertificate() + _, err = GetClientCertificate() require.Error(t, err) viper.Set("peer.tls.key.file", "") - viper.Set("peer.tls.cert.file", filepath.Join("testdata", "Org1-server1-cert.pem")) + viper.Set("peer.tls.cert.file", filepath.Join(tempdir, "server1-cert.pem")) // missing server key file - expect error _, err = GetClientCertificate() require.Error(t, err) - viper.Set("peer.tls.key.file", filepath.Join("testdata", "Org1-server1-key.pem")) + viper.Set("peer.tls.key.file", filepath.Join(tempdir, "server1-key.pem")) viper.Set("peer.tls.cert.file", "") // missing server cert file - expect error _, err = GetClientCertificate() @@ -167,29 +216,29 @@ func TestGetClientCertificate(t *testing.T) { // set server TLS settings to ensure we get the client TLS settings // when they are set properly - viper.Set("peer.tls.key.file", filepath.Join("testdata", "Org1-server1-key.pem")) - viper.Set("peer.tls.cert.file", filepath.Join("testdata", "Org1-server1-cert.pem")) + viper.Set("peer.tls.key.file", filepath.Join(tempdir, "server1-key.pem")) + viper.Set("peer.tls.cert.file", filepath.Join(tempdir, "server1-cert.pem")) // peer.tls.clientCert.file not set - expect error - viper.Set("peer.tls.clientKey.file", filepath.Join("testdata", "Org2-server1-key.pem")) + viper.Set("peer.tls.clientKey.file", filepath.Join(tempdir, "server1-key.pem")) _, err = GetClientCertificate() require.Error(t, err) // peer.tls.clientKey.file not set - expect error viper.Set("peer.tls.clientKey.file", "") - viper.Set("peer.tls.clientCert.file", filepath.Join("testdata", "Org2-server1-cert.pem")) + viper.Set("peer.tls.clientCert.file", filepath.Join(tempdir, "server1-cert.pem")) _, err = GetClientCertificate() require.Error(t, err) // client auth required and clientKey/clientCert set expected, err := tls.LoadX509KeyPair( - filepath.Join("testdata", "Org2-server1-cert.pem"), - filepath.Join("testdata", "Org2-server1-key.pem"), + filepath.Join(tempdir, "server1-cert.pem"), + filepath.Join(tempdir, "server1-key.pem"), ) if err != nil { t.Fatalf("Failed to load test certificate (%s)", err) } - viper.Set("peer.tls.clientKey.file", filepath.Join("testdata", "Org2-server1-key.pem")) + viper.Set("peer.tls.clientKey.file", filepath.Join(tempdir, "server1-key.pem")) cert, err := GetClientCertificate() require.NoError(t, err) require.Equal(t, expected, cert) @@ -199,12 +248,10 @@ func TestGetClientCertificate(t *testing.T) { viper.Set("peer.tls.clientKey.file", "") viper.Set("peer.tls.clientCert.file", "") expected, err = tls.LoadX509KeyPair( - filepath.Join("testdata", "Org1-server1-cert.pem"), - filepath.Join("testdata", "Org1-server1-key.pem"), + filepath.Join(tempdir, "server1-cert.pem"), + filepath.Join(tempdir, "server1-key.pem"), ) - if err != nil { - t.Fatalf("Failed to load test certificate (%s)", err) - } + require.NoError(t, err, "failed to load test certificate") cert, err = GetClientCertificate() require.NoError(t, err) require.Equal(t, expected, cert) @@ -407,6 +454,26 @@ func TestPropagateEnvironment(t *testing.T) { require.Equal(t, expectedConfig, coreConfig) } +func TestExternalBuilderConfigAsEnvVar(t *testing.T) { + defer viper.Reset() + viper.Set("peer.address", "localhost:8080") + viper.Set("chaincode.externalBuilders", "[{name: relative, path: relative/plugin_dir, propagateEnvironment: [ENVVAR_NAME_TO_PROPAGATE_FROM_PEER, GOPROXY]}, {name: absolute, path: /absolute/plugin_dir}]") + coreConfig, err := GlobalConfig() + require.NoError(t, err) + + require.Equal(t, []ExternalBuilder{ + { + Path: "relative/plugin_dir", + Name: "relative", + PropagateEnvironment: []string{"ENVVAR_NAME_TO_PROPAGATE_FROM_PEER", "GOPROXY"}, + }, + { + Path: "/absolute/plugin_dir", + Name: "absolute", + }, + }, coreConfig.ExternalBuilders) +} + func TestMissingExternalBuilderPath(t *testing.T) { defer viper.Reset() viper.Set("peer.address", "localhost:8080") diff --git a/core/peer/deliverevents_test.go b/core/peer/deliverevents_test.go index 74e5a5b3b2b..7b9c6430c14 100644 --- a/core/peer/deliverevents_test.go +++ b/core/peer/deliverevents_test.go @@ -96,6 +96,11 @@ func (m *mockReader) Height() uint64 { return args.Get(0).(uint64) } +func (m *mockReader) RetrieveBlockByNumber(blockNum uint64) (*common.Block, error) { + args := m.Called() + return args.Get(0).(*common.Block), args.Error(1) +} + // mockChainSupport type mockChainSupport struct { mock.Mock diff --git a/core/peer/peer.go b/core/peer/peer.go index 64f1c4493d1..de82667d493 100644 --- a/core/peer/peer.go +++ b/core/peer/peer.go @@ -545,3 +545,7 @@ func (p *Peer) Initialize( p.initChannel(cid) } } + +func (flbs fileLedgerBlockStore) RetrieveBlockByNumber(blockNum uint64) (*common.Block, error) { + return flbs.GetBlockByNumber(blockNum) +} diff --git a/core/peer/peer_test.go b/core/peer/peer_test.go index feb5142912d..3679ff9ace0 100644 --- a/core/peer/peer_test.go +++ b/core/peer/peer_test.go @@ -20,6 +20,7 @@ import ( pb "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/bccsp/sw" configtxtest "github.com/hyperledger/fabric/common/configtx/test" + "github.com/hyperledger/fabric/common/crypto/tlsgen" "github.com/hyperledger/fabric/common/metrics/disabled" "github.com/hyperledger/fabric/core/committer/txvalidator/plugin" "github.com/hyperledger/fabric/core/deliverservice" @@ -67,8 +68,8 @@ func NewTestPeer(t *testing.T) (*Peer, func()) { defaultDeliverClientDialOpts, grpc.WithBlock(), grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), - grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize), + grpc.MaxCallRecvMsgSize(comm.DefaultMaxRecvMsgSize), + grpc.MaxCallSendMsgSize(comm.DefaultMaxSendMsgSize), ), ) defaultDeliverClientDialOpts = append( @@ -124,27 +125,23 @@ func TestInitialize(t *testing.T) { peerInstance, cleanup := NewTestPeer(t) defer cleanup() - org1CA, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-cert.pem")) + org1CA, err := tlsgen.NewCA() require.NoError(t, err) - org1Server1Key, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-server1-key.pem")) - require.NoError(t, err) - org1Server1Cert, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-server1-cert.pem")) + org1Server1KeyPair, err := org1CA.NewServerCertKeyPair("localhost", "127.0.0.1", "::1") require.NoError(t, err) + serverConfig := comm.ServerConfig{ SecOpts: comm.SecureOptions{ UseTLS: true, - Certificate: org1Server1Cert, - Key: org1Server1Key, - ServerRootCAs: [][]byte{org1CA}, + Certificate: org1Server1KeyPair.Cert, + Key: org1Server1KeyPair.Key, + ServerRootCAs: [][]byte{org1CA.CertBytes()}, RequireClientCert: true, }, } server, err := comm.NewGRPCServer("localhost:0", serverConfig) - if err != nil { - t.Fatalf("NewGRPCServer failed with error [%s]", err) - return - } + require.NoError(t, err, "failed to create gRPC server") peerInstance.Initialize( nil, diff --git a/core/peer/pkg_test.go b/core/peer/pkg_test.go index 815d9ecc717..d42751d8aed 100644 --- a/core/peer/pkg_test.go +++ b/core/peer/pkg_test.go @@ -11,31 +11,27 @@ import ( "crypto/tls" "crypto/x509" "errors" - "io/ioutil" "net" - "path/filepath" "testing" "time" - "github.com/golang/protobuf/proto" cb "github.com/hyperledger/fabric-protos-go/common" mspproto "github.com/hyperledger/fabric-protos-go/msp" pb "github.com/hyperledger/fabric-protos-go/peer" configtxtest "github.com/hyperledger/fabric/common/configtx/test" + "github.com/hyperledger/fabric/common/crypto/tlsgen" "github.com/hyperledger/fabric/core/ledger/mock" "github.com/hyperledger/fabric/core/peer" "github.com/hyperledger/fabric/internal/pkg/comm" "github.com/hyperledger/fabric/internal/pkg/comm/testpb" "github.com/hyperledger/fabric/internal/pkg/txflags" "github.com/hyperledger/fabric/msp" + "github.com/hyperledger/fabric/protoutil" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) -// default timeout for grpc connections -var timeout = time.Second * 1 - // test server to be registered with the GRPCServer type testServiceServer struct{} @@ -58,9 +54,10 @@ func createCertPool(rootCAs [][]byte) (*x509.CertPool, error) { func invokeEmptyCall(address string, dialOptions []grpc.DialOption) (*testpb.Empty, error) { //add DialOptions dialOptions = append(dialOptions, grpc.WithBlock()) - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - //create GRPC client conn + + // create GRPC client conn clientConn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { return nil, err @@ -80,9 +77,7 @@ func invokeEmptyCall(address string, dialOptions []grpc.DialOption) (*testpb.Emp } // helper function to build an MSPConfig given root certs -func createMSPConfig(rootCerts, tlsRootCerts, tlsIntermediateCerts [][]byte, - mspID string) (*mspproto.MSPConfig, error) { - +func createMSPConfig(mspID string, rootCerts, tlsRootCerts, tlsIntermediateCerts [][]byte) (*mspproto.MSPConfig, error) { fmspconf := &mspproto.FabricMSPConfig{ RootCerts: rootCerts, TlsRootCerts: tlsRootCerts, @@ -102,12 +97,10 @@ func createMSPConfig(rootCerts, tlsRootCerts, tlsIntermediateCerts [][]byte, }, } - fmpsjs, err := proto.Marshal(fmspconf) - if err != nil { - return nil, err - } - mspconf := &mspproto.MSPConfig{Config: fmpsjs, Type: int32(msp.FABRIC)} - return mspconf, nil + return &mspproto.MSPConfig{ + Config: protoutil.MarshalOrPanic(fmspconf), + Type: int32(msp.FABRIC), + }, nil } func createConfigBlock(channelID string, appMSPConf, ordererMSPConf *mspproto.MSPConfig, @@ -124,40 +117,34 @@ func createConfigBlock(channelID string, appMSPConf, ordererMSPConf *mspproto.MS } func TestUpdateRootsFromConfigBlock(t *testing.T) { - // load test certs from testdata - org1CA, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-cert.pem")) - require.NoError(t, err) - org1Server1Key, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-server1-key.pem")) + org1CA, err := tlsgen.NewCA() require.NoError(t, err) - org1Server1Cert, err := ioutil.ReadFile(filepath.Join("testdata", "Org1-server1-cert.pem")) + org1Server1KeyPair, err := org1CA.NewServerCertKeyPair("localhost", "127.0.0.1", "::1") require.NoError(t, err) - org2CA, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-cert.pem")) - require.NoError(t, err) - org2Server1Key, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-server1-key.pem")) - require.NoError(t, err) - org2Server1Cert, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-server1-cert.pem")) - require.NoError(t, err) - org2IntermediateCA, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-child1-cert.pem")) + + org2CA, err := tlsgen.NewCA() require.NoError(t, err) - org2IntermediateServer1Key, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-child1-server1-key.pem")) + org2Server1KeyPair, err := org2CA.NewServerCertKeyPair("localhost", "127.0.0.1", "::1") require.NoError(t, err) - org2IntermediateServer1Cert, err := ioutil.ReadFile(filepath.Join("testdata", "Org2-child1-server1-cert.pem")) + + org2IntermediateCA, err := org2CA.NewIntermediateCA() require.NoError(t, err) - ordererOrgCA, err := ioutil.ReadFile(filepath.Join("testdata", "Org3-cert.pem")) + org2IntermediateServer1KeyPair, err := org2IntermediateCA.NewServerCertKeyPair("localhost", "127.0.0.1", "::1") require.NoError(t, err) - ordererOrgServer1Key, err := ioutil.ReadFile(filepath.Join("testdata", "Org3-server1-key.pem")) + + ordererOrgCA, err := tlsgen.NewCA() require.NoError(t, err) - ordererOrgServer1Cert, err := ioutil.ReadFile(filepath.Join("testdata", "Org3-server1-cert.pem")) + ordererOrgServer1KeyPair, err := ordererOrgCA.NewServerCertKeyPair("localhost", "127.0.0.1", "::1") require.NoError(t, err) // create test MSPConfigs - org1MSPConf, err := createMSPConfig([][]byte{org2CA}, [][]byte{org1CA}, [][]byte{}, "Org1MSP") + org1MSPConf, err := createMSPConfig("Org1MSP", [][]byte{org2CA.CertBytes()}, [][]byte{org1CA.CertBytes()}, [][]byte{}) require.NoError(t, err) - org2MSPConf, err := createMSPConfig([][]byte{org1CA}, [][]byte{org2CA}, [][]byte{}, "Org2MSP") + org2MSPConf, err := createMSPConfig("Org2MSP", [][]byte{org1CA.CertBytes()}, [][]byte{org2CA.CertBytes()}, [][]byte{}) require.NoError(t, err) - org2IntermediateMSPConf, err := createMSPConfig([][]byte{org1CA}, [][]byte{org2CA}, [][]byte{org2IntermediateCA}, "Org2IntermediateMSP") + org2IntermediateMSPConf, err := createMSPConfig("Org2IntermediateMSP", [][]byte{org1CA.CertBytes()}, [][]byte{org2CA.CertBytes()}, [][]byte{org2IntermediateCA.CertBytes()}) require.NoError(t, err) - ordererOrgMSPConf, err := createMSPConfig([][]byte{org1CA}, [][]byte{ordererOrgCA}, [][]byte{}, "OrdererOrgMSP") + ordererOrgMSPConf, err := createMSPConfig("OrdererOrgMSP", [][]byte{org1CA.CertBytes()}, [][]byte{ordererOrgCA.CertBytes()}, [][]byte{}) require.NoError(t, err) // create test channel create blocks @@ -171,9 +158,9 @@ func TestUpdateRootsFromConfigBlock(t *testing.T) { serverConfig := comm.ServerConfig{ SecOpts: comm.SecureOptions{ UseTLS: true, - Certificate: org1Server1Cert, - Key: org1Server1Key, - ServerRootCAs: [][]byte{org1CA}, + Certificate: org1Server1KeyPair.Cert, + Key: org1Server1KeyPair.Key, + ServerRootCAs: [][]byte{org1CA.CertBytes()}, RequireClientCert: true, }, } @@ -184,17 +171,15 @@ func TestUpdateRootsFromConfigBlock(t *testing.T) { createChannel := func(t *testing.T, cid string, block *cb.Block) { err = peerInstance.CreateChannel(cid, block, &mock.DeployedChaincodeInfoProvider{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create config block (%s)", err) - } + require.NoError(t, err, "failed to create channel from block") t.Logf("Channel %s MSPIDs: (%s)", cid, peerInstance.GetMSPIDs(cid)) } - org1CertPool, err := createCertPool([][]byte{org1CA}) + org1CertPool, err := createCertPool([][]byte{org1CA.CertBytes()}) require.NoError(t, err) // use server cert as client cert - org1ClientCert, err := tls.X509KeyPair(org1Server1Cert, org1Server1Key) + org1ClientCert, err := tls.X509KeyPair(org1Server1KeyPair.Cert, org1Server1KeyPair.Key) require.NoError(t, err) org1Creds := credentials.NewTLS(&tls.Config{ @@ -202,21 +187,21 @@ func TestUpdateRootsFromConfigBlock(t *testing.T) { RootCAs: org1CertPool, }) - org2ClientCert, err := tls.X509KeyPair(org2Server1Cert, org2Server1Key) + org2ClientCert, err := tls.X509KeyPair(org2Server1KeyPair.Cert, org2Server1KeyPair.Key) require.NoError(t, err) org2Creds := credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{org2ClientCert}, RootCAs: org1CertPool, }) - org2IntermediateClientCert, err := tls.X509KeyPair(org2IntermediateServer1Cert, org2IntermediateServer1Key) + org2IntermediateClientCert, err := tls.X509KeyPair(org2IntermediateServer1KeyPair.Cert, org2IntermediateServer1KeyPair.Key) require.NoError(t, err) org2IntermediateCreds := credentials.NewTLS(&tls.Config{ Certificates: []tls.Certificate{org2IntermediateClientCert}, RootCAs: org1CertPool, }) - ordererOrgClientCert, err := tls.X509KeyPair(ordererOrgServer1Cert, ordererOrgServer1Key) + ordererOrgClientCert, err := tls.X509KeyPair(ordererOrgServer1KeyPair.Cert, ordererOrgServer1KeyPair.Key) require.NoError(t, err) ordererOrgCreds := credentials.NewTLS(&tls.Config{ @@ -274,18 +259,13 @@ func TestUpdateRootsFromConfigBlock(t *testing.T) { for _, test := range tests { test := test t.Run(test.name, func(t *testing.T) { - t.Logf("Running test %s ...", test.name) server, err := comm.NewGRPCServer("localhost:0", test.serverConfig) - if err != nil { - t.Fatalf("NewGRPCServer failed with error [%s]", err) - return - } + require.NoError(t, err, "failed to create gRPC server") + require.NotNil(t, server) peerInstance.SetServer(server) peerInstance.ServerConfig = test.serverConfig - require.NoError(t, err, "NewGRPCServer should not have returned an error") - require.NotNil(t, server, "NewGRPCServer should have created a server") // register a GRPC test service testpb.RegisterTestServiceServer(server.Server(), &testServiceServer{}) go server.Start() @@ -293,12 +273,8 @@ func TestUpdateRootsFromConfigBlock(t *testing.T) { // extract dynamic listen port _, port, err := net.SplitHostPort(server.Listener().Addr().String()) - if err != nil { - t.Fatal(err) - } - t.Logf("listenAddress: %s", server.Listener().Addr()) + require.NoError(t, err, "unable to extract listener port") testAddress := "localhost:" + port - t.Logf("testAddress: %s", testAddress) // invoke the EmptyCall service with good options but should fail // until channel is created and root CAs are updated diff --git a/core/peer/testdata/Org1-cert.pem b/core/peer/testdata/Org1-cert.pem deleted file mode 100644 index 5b6923bb878..00000000000 --- a/core/peer/testdata/Org1-cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB8TCCAZegAwIBAgIQU59imQ+xl+FmwuiFyUgFezAKBggqhkjOPQQDAjBYMQsw -CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy -YW5jaXNjbzENMAsGA1UEChMET3JnMTENMAsGA1UEAxMET3JnMTAeFw0xNzA1MDgw -OTMwMzRaFw0yNzA1MDYwOTMwMzRaMFgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpD -YWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRPcmcx -MQ0wCwYDVQQDEwRPcmcxMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFkpP6EqE -87ghFi25UWLvgPatxDiYKYaVSPvpo/XDJ0+9uUmK/C2r5Bvvxx1t8eTROwN77tEK -r+jbJIxX3ZYQMKNDMEEwDgYDVR0PAQH/BAQDAgGmMA8GA1UdJQQIMAYGBFUdJQAw -DwYDVR0TAQH/BAUwAwEB/zANBgNVHQ4EBgQEAQIDBDAKBggqhkjOPQQDAgNIADBF -AiEA1Xkrpq+wrmfVVuY12dJfMQlSx+v0Q3cYce9BE1i2mioCIAzqyduK/lHPI81b -nWiU9JF9dRQ69dEV9dxd/gzamfFU ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org1-server1-cert.pem b/core/peer/testdata/Org1-server1-cert.pem deleted file mode 100644 index 169d8269812..00000000000 --- a/core/peer/testdata/Org1-server1-cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICCjCCAbGgAwIBAgIQOcq9Om9VwUe9hGN0TTGw1DAKBggqhkjOPQQDAjBYMQsw -CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy -YW5jaXNjbzENMAsGA1UEChMET3JnMTENMAsGA1UEAxMET3JnMTAeFw0xNzA1MDgw -OTMwMzRaFw0yNzA1MDYwOTMwMzRaMGUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpD -YWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRUwEwYDVQQKEwxPcmcx -LXNlcnZlcjExEjAQBgNVBAMTCWxvY2FsaG9zdDBZMBMGByqGSM49AgEGCCqGSM49 -AwEHA0IABAm+2CZhbmsnA+HKQynXKz7fVZvvwlv/DdNg3Mdg7lIcP2z0b07/eAZ5 -0chdJNcjNAd/QAj/mmGG4dObeo4oTKGjUDBOMA4GA1UdDwEB/wQEAwIFoDAdBgNV -HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAPBgNVHSME -CDAGgAQBAgMEMAoGCCqGSM49BAMCA0cAMEQCIG55RvN4Boa0WS9UcIb/tI2YrAT8 -EZd/oNnZYlbxxyvdAiB6sU9xAn4oYIW9xtrrOISv3YRg8rkCEATsagQfH8SiLg== ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org1-server1-key.pem b/core/peer/testdata/Org1-server1-key.pem deleted file mode 100644 index ddd8ce52f16..00000000000 --- a/core/peer/testdata/Org1-server1-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEICfXQtVmdQAlp/l9umWJqCXNTDurmciDNmGHPpxHwUK/oAoGCCqGSM49 -AwEHoUQDQgAECb7YJmFuaycD4cpDKdcrPt9Vm+/CW/8N02Dcx2DuUhw/bPRvTv94 -BnnRyF0k1yM0B39ACP+aYYbh05t6jihMoQ== ------END EC PRIVATE KEY----- diff --git a/core/peer/testdata/Org2-cert.pem b/core/peer/testdata/Org2-cert.pem deleted file mode 100644 index 106252261fe..00000000000 --- a/core/peer/testdata/Org2-cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB8jCCAZigAwIBAgIRANxd4D3sY0656NqOh8Rha0AwCgYIKoZIzj0EAwIwWDEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xDTALBgNVBAoTBE9yZzIxDTALBgNVBAMTBE9yZzIwHhcNMTcwNTA4 -MDkzMDM0WhcNMjcwNTA2MDkzMDM0WjBYMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMET3Jn -MjENMAsGA1UEAxMET3JnMjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABDYy+qzS -J/8CMfhpBFhUhhz+7up4+lwjBWDSS01koszNh8camHTA8vS4ZsN+DZ2DRsSmRZgs -tG2oogLLIdh6Z1CjQzBBMA4GA1UdDwEB/wQEAwIBpjAPBgNVHSUECDAGBgRVHSUA -MA8GA1UdEwEB/wQFMAMBAf8wDQYDVR0OBAYEBAECAwQwCgYIKoZIzj0EAwIDSAAw -RQIgWnMmH0yxAjub3qfzxQioHKQ8+WvUjAXm0ejId9Q+rDICIQDr30UCPj+SXzOb -Cu4psMMBfLujKoiBNdLE1KEpt8lN1g== ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org2-child1-cert.pem b/core/peer/testdata/Org2-child1-cert.pem deleted file mode 100644 index 0feba1a64b6..00000000000 --- a/core/peer/testdata/Org2-child1-cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICETCCAbagAwIBAgIQNpgoASE9fi0ooZVKcnwnZzAKBggqhkjOPQQDAjBYMQsw -CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy -YW5jaXNjbzENMAsGA1UEChMET3JnMjENMAsGA1UEAxMET3JnMjAeFw0xNzA1MDgw -OTMwMzRaFw0yNzA1MDYwOTMwMzRaMGYxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpD -YWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtPcmcy -LWNoaWxkMTEUMBIGA1UEAxMLT3JnMi1jaGlsZDEwWTATBgcqhkjOPQIBBggqhkjO -PQMBBwNCAARTBJ8/o1tpHPwuixYDgRwcrzAru0cWJJhE6KWHAa0vBCG4nl0zjjRS -og+iAuUcY4Z/gJoHol6dKSHk9h5jrqtEo1QwUjAOBgNVHQ8BAf8EBAMCAaYwDwYD -VR0lBAgwBgYEVR0lADAPBgNVHRMBAf8EBTADAQH/MA0GA1UdDgQGBAQBAgMEMA8G -A1UdIwQIMAaABAECAwQwCgYIKoZIzj0EAwIDSQAwRgIhAIkPzk7ORV/WhfG7QY/6 -/OJg4++ftz2SZc44NIuogMArAiEAqbnpnmmHnzo2Qc6gnliCegpGnJ18RUT/jZlj -1qXHcvg= ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org2-child1-key.pem b/core/peer/testdata/Org2-child1-key.pem deleted file mode 100644 index 7b59a5ccd93..00000000000 --- a/core/peer/testdata/Org2-child1-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEILECltESx3k5sQRtCt5rQEAo9cvTDyPxjv2UT092SY2NoAoGCCqGSM49 -AwEHoUQDQgAEUwSfP6NbaRz8LosWA4EcHK8wK7tHFiSYROilhwGtLwQhuJ5dM440 -UqIPogLlHGOGf4CaB6JenSkh5PYeY66rRA== ------END EC PRIVATE KEY----- diff --git a/core/peer/testdata/Org2-child1-server1-cert.pem b/core/peer/testdata/Org2-child1-server1-cert.pem deleted file mode 100644 index a406cc1de52..00000000000 --- a/core/peer/testdata/Org2-child1-server1-cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIDCCAcegAwIBAgIRAO3pYorhuGiPnJJphdKeQwAwCgYIKoZIzj0EAwIwZjEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC09yZzItY2hpbGQxMRQwEgYDVQQDEwtPcmcyLWNo -aWxkMTAeFw0xNzA1MDgwOTMwMzRaFw0yNzA1MDYwOTMwMzRaMGwxCzAJBgNVBAYT -AlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2Nv -MRwwGgYDVQQKExNPcmcyLWNoaWxkMS1zZXJ2ZXIxMRIwEAYDVQQDEwlsb2NhbGhv -c3QwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASaWwX7EMRpgVQ4Jasr5GmskiCq -SP1VZA0LjiOSSVjUsCQR73Wuvx+LzRx7xLccSy3w9bAJOh32tTLqt+6XtXNlo1Aw -TjAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC -MAwGA1UdEwEB/wQCMAAwDwYDVR0jBAgwBoAEAQIDBDAKBggqhkjOPQQDAgNHADBE -AiBxvv8tzyNwzQOQhP6MmSZ4zJGtFgX7nfUqjEYA8N9qBAIgCsKiCgLQrLwg3mld -DKAU4r/3+400yzXPgD+fQ3T6u8k= ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org2-child1-server1-key.pem b/core/peer/testdata/Org2-child1-server1-key.pem deleted file mode 100644 index 156f7816d27..00000000000 --- a/core/peer/testdata/Org2-child1-server1-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIPOWGbAnyV/ubULozdRQKt+kMPrz5l3LVxz80uYpufjQoAoGCCqGSM49 -AwEHoUQDQgAEmlsF+xDEaYFUOCWrK+RprJIgqkj9VWQNC44jkklY1LAkEe91rr8f -i80ce8S3HEst8PWwCTod9rUy6rful7VzZQ== ------END EC PRIVATE KEY----- diff --git a/core/peer/testdata/Org2-server1-cert.pem b/core/peer/testdata/Org2-server1-cert.pem deleted file mode 100644 index e8623bfaf30..00000000000 --- a/core/peer/testdata/Org2-server1-cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICCzCCAbKgAwIBAgIRAII9kVh6i4X9wdur6UepocUwCgYIKoZIzj0EAwIwWDEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xDTALBgNVBAoTBE9yZzIxDTALBgNVBAMTBE9yZzIwHhcNMTcwNTA4 -MDkzMDM0WhcNMjcwNTA2MDkzMDM0WjBlMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEVMBMGA1UEChMMT3Jn -Mi1zZXJ2ZXIxMRIwEAYDVQQDEwlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjO -PQMBBwNCAAT+6mAyGB0VBaGCSiMHLKASS3/q8hUbBYXmjo11mgDMIkkGwyqRvSUI -5EsMb6XRS4UlH8Xt1NsA+9m4Vj1KWl5ro1AwTjAOBgNVHQ8BAf8EBAMCBaAwHQYD -VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDwYDVR0j -BAgwBoAEAQIDBDAKBggqhkjOPQQDAgNHADBEAiBq7AvYBrh7S1dtU+kPmX1fGUX0 -pvBC+ngwNwboxhfD+wIgXy0t7sff31QGb56CsszSTarOMZOK8hsNoOtvnqgAX6Q= ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org2-server1-key.pem b/core/peer/testdata/Org2-server1-key.pem deleted file mode 100644 index f0ea1f9b1c7..00000000000 --- a/core/peer/testdata/Org2-server1-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIHpb9jJemQ/0ODY4gM1fN+42SQ3+fAoU5vbiWFbFZ4i7oAoGCCqGSM49 -AwEHoUQDQgAE/upgMhgdFQWhgkojByygEkt/6vIVGwWF5o6NdZoAzCJJBsMqkb0l -CORLDG+l0UuFJR/F7dTbAPvZuFY9Slpeaw== ------END EC PRIVATE KEY----- diff --git a/core/peer/testdata/Org3-cert.pem b/core/peer/testdata/Org3-cert.pem deleted file mode 100644 index a48e1a7f6f1..00000000000 --- a/core/peer/testdata/Org3-cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIB8TCCAZegAwIBAgIQWRj024bZNzkNfYVJzZNi1jAKBggqhkjOPQQDAjBYMQsw -CQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZy -YW5jaXNjbzENMAsGA1UEChMET3JnMzENMAsGA1UEAxMET3JnMzAeFw0xNzA1MDgw -OTMwMzRaFw0yNzA1MDYwOTMwMzRaMFgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpD -YWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRPcmcz -MQ0wCwYDVQQDEwRPcmczMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2GUkjUoa -BXjNL0gKTWEEpXyCTrkoHSvsbxHkSKmNT8VEZ24Vqi+SbBcMW7IckMHt+k7ofrrj -c9hyReUSFE1RDaNDMEEwDgYDVR0PAQH/BAQDAgGmMA8GA1UdJQQIMAYGBFUdJQAw -DwYDVR0TAQH/BAUwAwEB/zANBgNVHQ4EBgQEAQIDBDAKBggqhkjOPQQDAgNIADBF -AiEAs7Y179Bhufjj/FcEph65BOiZRxf1o1ggPPcoS2KxWlICIH0gSlbfIFovbOSp -0iSYRkv2NHQ2W9ZGb+KEhIB76Fkb ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org3-server1-cert.pem b/core/peer/testdata/Org3-server1-cert.pem deleted file mode 100644 index 4895ac96fe2..00000000000 --- a/core/peer/testdata/Org3-server1-cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICDTCCAbKgAwIBAgIRAPIConsgRjgkCZ98EpK+B2gwCgYIKoZIzj0EAwIwWDEL -MAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xDTALBgNVBAoTBE9yZzMxDTALBgNVBAMTBE9yZzMwHhcNMTcwNTA4 -MDkzMDM0WhcNMjcwNTA2MDkzMDM0WjBlMQswCQYDVQQGEwJVUzETMBEGA1UECBMK -Q2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEVMBMGA1UEChMMT3Jn -My1zZXJ2ZXIxMRIwEAYDVQQDEwlsb2NhbGhvc3QwWTATBgcqhkjOPQIBBggqhkjO -PQMBBwNCAARfWHB6mV/8JHCGMcFO88qIIsKjOj4R3zdSuqVdef36DYPQsrfm/RCl -Ck0SMEEaOcgTRieDsVFBmglUVtA1bhlxo1AwTjAOBgNVHQ8BAf8EBAMCBaAwHQYD -VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDwYDVR0j -BAgwBoAEAQIDBDAKBggqhkjOPQQDAgNJADBGAiEA6GCe3Y1xSyBFsl1NSj104Agt -tka0e1pYiFVRyhc2VsICIQCX+KvCtJG52+Us5QiMj3JDRT9v4Awt3SyIYgLvdoiW -7w== ------END CERTIFICATE----- diff --git a/core/peer/testdata/Org3-server1-key.pem b/core/peer/testdata/Org3-server1-key.pem deleted file mode 100644 index ccad2689f21..00000000000 --- a/core/peer/testdata/Org3-server1-key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIGTkwJcw9zftKoRk4Qo/74MFO3a+3Wu/E2s58uVDeudgoAoGCCqGSM49 -AwEHoUQDQgAEX1hweplf/CRwhjHBTvPKiCLCozo+Ed83UrqlXXn9+g2D0LK35v0Q -pQpNEjBBGjnIE0Yng7FRQZoJVFbQNW4ZcQ== ------END EC PRIVATE KEY----- diff --git a/core/peer/testdata/generate.go b/core/peer/testdata/generate.go deleted file mode 100644 index 73e0923bdd9..00000000000 --- a/core/peer/testdata/generate.go +++ /dev/null @@ -1,12 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -// +build ignore - -//go:generate -command gencerts go run github.com/hyperledger/fabric/core/comm/testdata/certs -//go:generate gencerts -orgs 3 -child-orgs 1 -servers 1 -clients 0 - -package testdata diff --git a/core/scc/cscc/configure_test.go b/core/scc/cscc/configure_test.go index a085ffd4434..ba4aa1073a7 100644 --- a/core/scc/cscc/configure_test.go +++ b/core/scc/cscc/configure_test.go @@ -586,7 +586,7 @@ func newPeerConfiger(t *testing.T, ledgerMgr *ledgermgmt.LedgerMgr, grpcServer * defaultDeliverClientDialOpts = append( defaultDeliverClientDialOpts, grpc.WithBlock(), - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.DefaultMaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.DefaultMaxSendMsgSize)), ) defaultDeliverClientDialOpts = append( defaultDeliverClientDialOpts, diff --git a/core/scc/lscc/deployedcc_infoprovider.go b/core/scc/lscc/deployedcc_infoprovider.go index 144aceaa850..8970539bfc4 100644 --- a/core/scc/lscc/deployedcc_infoprovider.go +++ b/core/scc/lscc/deployedcc_infoprovider.go @@ -14,6 +14,7 @@ import ( "github.com/hyperledger/fabric/core/common/ccprovider" "github.com/hyperledger/fabric/core/common/privdata" "github.com/hyperledger/fabric/core/ledger" + "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" "github.com/pkg/errors" ) @@ -37,7 +38,7 @@ func (p *DeployedCCInfoProvider) UpdatedChaincodes(stateUpdates map[string][]*kv updatedCCNames := map[string]bool{} for _, kvWrite := range lsccUpdates { - if kvWrite.IsDelete { + if rwsetutil.IsKVWriteDelete(kvWrite) { // lscc namespace is not expected to have deletes continue } diff --git a/docs/source/CONTRIBUTING.rst b/docs/source/CONTRIBUTING.rst index df81d2f277d..593cdf33c25 100644 --- a/docs/source/CONTRIBUTING.rst +++ b/docs/source/CONTRIBUTING.rst @@ -19,10 +19,6 @@ As a user: - `Making Feature/Enhancement Proposals`_ - `Reporting bugs`_ -- Help test an upcoming Epic on the - `release roadmap `_. - Contact the Epic assignee via the Jira work item or on - `RocketChat `_. As a writer or information developer: @@ -49,14 +45,14 @@ Jump to `Contributing documentation`_ to get started on your journey. As a developer: - If you only have a little time, consider picking up a - `"help-wanted" `_ task, + `"good first issue" `_ task, see `Fixing issues and working stories`_. - If you can commit to full-time development, either propose a new feature (see `Making Feature/Enhancement Proposals`_) and bring a team to implement it, or join one of the teams working on an existing Epic. If you see an Epic that interests you on the - `release roadmap `_, - contact the Epic assignee via the Jira work item or on `RocketChat `__. + `GitHub epic backlog `_, + contact the Epic assignee via the GitHub issue. Getting a Linux Foundation account ---------------------------------- @@ -65,7 +61,6 @@ In order to participate in the development of the Hyperledger Fabric project, you will need a Linux Foundation account. Once you have a LF ID you will be able to access all the Hyperledger community tools, including -`Jira issue management `__, `RocketChat `__, and the `Wiki `__ (for editing, only). @@ -86,8 +81,7 @@ already have one. 5. Verify that your browser displays the message ``You have successfully validated your e-mail address``. -6. Access `Jira issue management `__, or - `RocketChat `__. +6. Access `RocketChat `__ to confirm access. Contributing documentation -------------------------- @@ -152,7 +146,7 @@ Releases ~~~~~~~~ Fabric provides a release approximately once every four months with new features and improvements. -New feature work is merged to the Fabric master branch on `Github `__. +New feature work is merged to the Fabric main branch on `GitHub `__. Releases branches are created prior to each release so that the code can stabilize while new features continue to get merged to the master branch. Important fixes will also be backported to the most recent LTS (long-term support) release branch, @@ -167,8 +161,8 @@ Minor improvements can be implemented and reviewed via the normal `GitHub pull r This process is intended to provide a consistent and controlled path for major changes to Fabric and other official project components, so that all stakeholders can be confident about the direction in which Fabric is evolving. -To propose a new feature, first, check -`JIRA `__ and the `Fabric RFC repository `__ to be sure that there isn't already an open (or recently closed) proposal for the same functionality. If there isn't, follow `the RFC process `__ to make a proposal. +To propose a new feature, first, check the +`GitHub issues backlog `__ and the `Fabric RFC repository `__ to be sure that there isn't already an open (or recently closed) proposal for the same functionality. If there isn't, follow `the RFC process `__ to make a proposal. Contributor meeting ~~~~~~~~~~~~~~~~~~~ @@ -188,18 +182,18 @@ maintainers meeting for consideration, feedback and acceptance. Release roadmap ~~~~~~~~~~~~~~~ -The Fabric release roadmap of epics is maintained in -`JIRA `__. +The Fabric release roadmap is managed as a list of +`GitHub issues with Epic label `__. Communications ~~~~~~~~~~~~~~ We use `RocketChat `__ for communication and Google Hangouts™ for screen sharing between developers. Our -development planning and prioritization is done in -`JIRA `__, and we take longer running -discussions/decisions to the `mailing -list `__. +development planning and prioritization is done using a +`GitHub Issues ZenHub board `__, and we take longer running +discussions/decisions to the `Fabric contributor meeting `__ +or `mailing list `__. Contribution guide ------------------ @@ -230,8 +224,8 @@ Reporting bugs ~~~~~~~~~~~~~~ If you are a user and you have found a bug, please submit an issue using -`JIRA `__. -Before you create a new JIRA issue, please try to search the existing items to +`GitHub Issues `__. +Before you create a new GitHub issue, please try to search the existing issues to be sure no one else has previously reported it. If it has been previously reported, then you might add a comment that you also are interested in seeing the defect fixed. @@ -241,7 +235,7 @@ the defect fixed. If it has not been previously reported, you may either submit a PR with a well documented commit message describing the defect and the fix, or you -may create a new JIRA. Please try to provide +may create a new GitHub issue. Please try to provide sufficient information for someone else to reproduce the issue. One of the project's maintainers should respond to your issue within 24 hours. If not, please bump the issue with a comment and request that it be @@ -253,18 +247,18 @@ and so on... Submitting your fix ~~~~~~~~~~~~~~~~~~~ -If you just submitted a JIRA for a bug you've discovered, and would like to -provide a fix, we would welcome that gladly! Please assign the JIRA issue to +If you just submitted a GitHub issue for a bug you've discovered, and would like to +provide a fix, we would welcome that gladly! Please assign the GitHub issue to yourself, then submit a pull request (PR). Please refer to :doc:`github/github` for a detailed workflow. Fixing issues and working stories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Fabric issues and bugs are managed in `JIRA `__. +Fabric issues and bugs are managed in `GitHub issues `__. Review the list of issues and find something that interests you. You could also check the -`"help-wanted" `__ +`"good first issue" `__ list. It is wise to start with something relatively straight forward and achievable, and that no one is already assigned. If no one is assigned, then assign the issue to yourself. Please be considerate and rescind the @@ -272,9 +266,9 @@ assignment if you cannot finish in a reasonable time, or add a comment saying that you are still actively working the issue if you need a little more time. -While Jira tracks a backlog of known issues that could be worked in the future, -if you intend to immediately work on a change that does not yet have a corresponding Jira issue, -you can submit a pull request to `Github `__ without linking to an existing Jira issue. +While GitHub issues tracks a backlog of known issues that could be worked in the future, +if you intend to immediately work on a change that does not yet have a corresponding issue, +you can submit a pull request to `Github `__ without linking to an existing issue. Reviewing submitted Pull Requests (PRs) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -321,7 +315,7 @@ hours (3 days), it will be flagged to the #fabric-pr-review channel daily until it receives a review comment(s). This policy applies to all official Fabric projects (fabric, fabric-ca, -fabric-samples, fabric-test, fabric-sdk-node, fabric-sdk-java, fabric-gateway-java, +fabric-samples, fabric-test, fabric-sdk-node, fabric-sdk-java, fabric-sdk-go, fabric-gateway-java, fabric-chaincode-node, fabric-chaincode-java, fabric-chaincode-evm, fabric-baseimage, and fabric-amcl). @@ -339,12 +333,13 @@ What makes a good pull request? regression, it is much easier to identify the culprit commit than if we have some composite change that impacts more of the code. -- If there is a corresponding Jira issue or bug, include a link to the - Jira issue in the PR summary and commit message. - Why? Because the maintainer that merges the PR will need to close - any corresponding Jira issue. - Also, in many cases, there will be additional discussion around - a proposed change or bug in Jira. +- If there is a corresponding GitHub issue, include a link to the + GitHub issue in the PR summary and commit message. + Why? Because there will often be additional discussion around + a proposed change or bug in the GitHub issue. + Additionally, if you use syntax like "Resolves #" + in the PR summary and commit message, the GitHub issue will + automatically be closed when the PR is merged. - Include unit and integration tests (or changes to existing tests) with every change. This does not mean just happy path testing, diff --git a/docs/source/Fabric-FAQ.rst b/docs/source/Fabric-FAQ.rst index 8503dc8f6fd..e4864c5ae8e 100644 --- a/docs/source/Fabric-FAQ.rst +++ b/docs/source/Fabric-FAQ.rst @@ -56,7 +56,12 @@ Security & Access Control Do the orderers see the transaction data? :Answer: - No, the orderers only order transactions, they do not open the transactions. + Orderers receive endorsed transactions that are submitted from application + clients. The endorsed payload contains the chaincode execution results + including the ReadSet and WriteSet information. The orderers only validate + the submitter's identity and order transactions, they do not open the + endorsed transactions. + If you do not want the data to go through the orderers at all, then utilize the private data feature of Fabric. Alternatively, you can hash or encrypt the data in the client application before calling chaincode. If you encrypt @@ -197,33 +202,6 @@ Ordering Service .. -:Question: - **What is the orderer system channel?** - -:Answer: - The orderer system channel (sometimes called ordering system channel) is the - channel the orderer is initially bootstrapped with. It is used to orchestrate - channel creation. The orderer system channel defines consortia and the initial - configuration for new channels. At channel creation time, the organization - definition in the consortium, the ``/Channel`` group's values and policies, as - well as the ``/Channel/Orderer`` group's values and policies, are all combined - to form the new initial channel definition. - -.. - -:Question: - **If I update my application channel, should I update my orderer system - channel?** - -:Answer: - Once an application channel is created, it is managed independently of any - other channel (including the orderer system channel). Depending on the - modification, the change may or may not be desirable to port to other - channels. In general, MSP changes should be synchronized across all channels, - while policy changes are more likely to be specific to a particular channel. - -.. - :Question: **Can I have an organization act both in an ordering and application role?** diff --git a/docs/source/_templates/footer.html b/docs/source/_templates/footer.html index 9dcf981285b..3c07bc26938 100644 --- a/docs/source/_templates/footer.html +++ b/docs/source/_templates/footer.html @@ -18,7 +18,7 @@ {%- if hasdoc('copyright') %} {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}.{% endtrans %} {%- else %} - {% trans copyright=copyright|e %}© Copyright Hyperledger 2020.{% endtrans %} + {% trans copyright=copyright|e %}© Copyright Hyperledger 2020-2022.{% endtrans %} {%- endif %} {%- endif %}
diff --git a/docs/source/advice_for_writers.md b/docs/source/advice_for_writers.md index bfbf058345a..f8f3933e910 100644 --- a/docs/source/advice_for_writers.md +++ b/docs/source/advice_for_writers.md @@ -17,7 +17,7 @@ In this topic, we're going to cover: Before you make a documentation change, you might like to connect with other people working on the Fabric documentation. Your [Linux Foundation -account](./contributing.html#getting-a-linux-foundation-account) will give you +account](./CONTRIBUTING.html#getting-a-linux-foundation-account) will give you access to many different resources to help you connect with other contributors to the documentation. @@ -59,7 +59,7 @@ wiki](https://wiki.hyperledger.org/display/fabric/Documentation+Working+Group). Each of the international languages has a welcoming workgroup that you are encouraged to join. View the [list of international -workgroups](https://wiki.hyperledger.org/display/fabric/International+groups). +workgroups](https://wiki.hyperledger.org/display/I18N/International+groups). See what your favorite workgroup is doing, and get connected with them. Each workgroup has a list of members and their contact information. @@ -67,7 +67,7 @@ Each workgroup has a list of members and their contact information. Hyperledger Fabric has many other collaboration mechanisms such as mailing lists, contributor meetings and maintainer meetings. Find out about these and -more [here](./contributing.html). +more [here](./CONTRIBUTING.html). Good luck getting started and thanks for your contribution. diff --git a/docs/source/blockchain.rst b/docs/source/blockchain.rst index 8bf2109a815..c3faf71c362 100644 --- a/docs/source/blockchain.rst +++ b/docs/source/blockchain.rst @@ -181,8 +181,7 @@ interact with the ledger. In most cases, chaincode interacts only with the database component of the ledger, the world state (querying it, for example), and not the transaction log. -Chaincode can be implemented in several programming languages. Currently, Go and -Node are supported. +Chaincode can be implemented in several programming languages. Currently, Go, Node.js, and Java chaincode are supported. **Privacy** diff --git a/docs/source/capabilities_concept.md b/docs/source/capabilities_concept.md index 4420c9f1602..7b31ba7eccf 100644 --- a/docs/source/capabilities_concept.md +++ b/docs/source/capabilities_concept.md @@ -6,7 +6,7 @@ Because Fabric is a distributed system that will usually involve multiple organizations, it is possible (and typical) that different versions of Fabric code will exist on different nodes within the network as well as on the channels in that network. Fabric allows this --- it is not necessary for every peer and ordering node to be at the same version level. In fact, supporting different version levels is what enables rolling upgrades of Fabric nodes. -What **is** important is that networks and channels process things in the same way, creating deterministic results for things like channel configuration updates and chaincode invocations. Without deterministic results, one peer on a channel might invalidate a transaction while another peer may validate it. +What **is** important is that networks and channels process things in the same way, creating deterministic results for things like channel configuration updates and chaincode invocations. Without deterministic results, one peer on a channel might invalidate a transaction while another peer validates it. To that end, Fabric defines levels of what are called "capabilities". These capabilities, which are defined in the configuration of each channel, ensure determinism by defining a level at which behaviors produce consistent results. As you'll see, these capabilities have versions which are closely related to node binary versions. Capabilities enable nodes running at different version levels to behave in a compatible and consistent way given the channel configuration at a specific block height. You will also see that capabilities exist in many parts of the configuration tree, defined along the lines of administration for particular tasks. @@ -14,12 +14,10 @@ As you'll see, sometimes it is necessary to update your channel to a new capabil ## Node versions and capability versions -If you're familiar with Hyperledger Fabric, you're aware that it follows a typical versioning pattern: v1.1, v1.2.1, v2.0, etc. These versions refer to releases and their related binary versions. - -Capabilities follow the same versioning convention. There are v1.1 capabilities and v1.2 capabilities and 2.0 capabilities and so on. But it's important to note a few distinctions. +If you're familiar with Hyperledger Fabric, you're aware that it follows a typical versioning pattern: v1.1, v1.2.1, v2.0, etc. These versions refer to releases and their related binary versions. Capabilities follow the same versioning convention. There are v1.1 capabilities and v1.2 capabilities and 2.0 capabilities and so on. But it's important to note a few distinctions. * **There is not necessarily a new capability level with each release**. - The need to establish a new capability is determined on a case by case basis and relies chiefly on the backwards compatibility of new features and older binary versions. Adding Raft ordering services in v1.4.1, for example, did not change the way either transactions or ordering service functions were handled and thus did not require the establishment of any new capabilities. [Private Data](./private-data/private-data.html), on the other hand, could not be handled by peers before v1.2, requiring the establishment of a v1.2 capability level. Because not every release contains a new feature (or a bug fix) that changes the way transactions are processed, certain releases will not require any new capabilities (for example, v1.4) while others will only have new capabilities at particular levels (such as v1.2 and v1.3). We'll discuss the "levels" of capabilities and where they reside in the configuration tree later. + The need to establish a new capability is determined on a case by case basis and relies chiefly on the backwards compatibility of new features and older binary versions. Allowing channels to be created [without the use of a system channel](./create_channel/create_channel_participation.html), a new feature in v2.3, did not change the way either transactions or ordering service functions were handled and thus did not require the establishment of any new capabilities. [Private Data](./private-data/private-data.html), on the other hand, could not be handled by peers before v1.2, requiring the establishment of a v1.2 capability level. Because not every release contains a new feature (or a bug fix) that changes the way transactions are processed, certain releases will not require any new capabilities (for example, v1.4) while others will only have new capabilities at particular levels (such as v1.2 and v1.3). We'll discuss the "levels" of capabilities and where they reside in the configuration tree later. * **Nodes must be at least at the level of certain capabilities in a channel**. When a peer joins a channel, it reads all of the blocks in the ledger sequentially, starting with the genesis block of the channel and continuing through the transaction blocks and any subsequent configuration blocks. If a node, for example a peer, attempts to read a block containing an update to a capability it doesn't understand (for example, a v1.4.x peer trying to read a block containing a v2.0 application capability), **the peer will crash**. This crashing behavior is intentional, as a v1.4.x peer should not attempt validate or commit any transactions past this point. Before joining a channel, **make sure the node is at least the Fabric version (binary) level of the capabilities specified in the channel config relevant to the node**. We'll discuss which capabilities are relevant to which nodes later. However, because no user wants their nodes to crash, it is strongly recommended to update all nodes to the required level (preferably, to the latest release) before attempting to update capabilities. This is in line with the default Fabric recommendation to **always** be at the latest binary and capability levels. @@ -36,11 +34,13 @@ As we discussed earlier, there is not a single capability level encompassing an * **Channel**: This grouping encompasses tasks that are **jointly administered** by the peer organizations and the ordering service. For example, this is the capability that defines the level at which channel configuration updates, which are initiated by peer organizations and orchestrated by the ordering service, are processed. On a practical level, **this grouping defines the minimum level for all of the binaries in a channel, as both ordering nodes and peers must be at least at the binary level corresponding to this capability in order to process the capability**. -The **orderer** and **channel** capabilities of a channel are inherited by default from the ordering system channel, where modifying them are the exclusive purview of ordering service admins. As a result, peer organizations should inspect the genesis block of a channel prior to joining their peers to that channel. Although the channel capability is administered by the orderers in the orderer system channel (just as the consortium membership is), it is typical and expected that the ordering admins will coordinate with the consortium admins to ensure that the channel capability is only upgraded when the consortium is ready for it. +While it is possible to create an ordering service using a legacy process in which a "system channel" administered by the ordering service is created before any application channels are created, the recommended path is to create a channel without using a system channel. + +If you do use the [legacy system channel process](./create_channel/create_channel_test_net.html), the **orderer** and **channel** capabilities of a channel are inherited by default from the ordering system channel, where modifying them are the exclusive purview of ordering service admins. As a result, peer organizations should inspect the genesis block of a channel prior to joining their peers to that channel. Although the channel capability is administered by the orderers in the orderer system channel (just as the consortium membership is), it is typical and expected that the ordering admins will coordinate with the consortium admins to ensure that the channel capability is only upgraded when the consortium is ready for it. Note: because the ordering system channel does not define an **application** capability, this capability must be specified in the channel profile when creating the genesis block for the channel. -Because the ordering system channel does not define an **application** capability, this capability must be specified in the channel profile when creating the genesis block for the channel. +If you use the recommended process to [Create a channel](./create_channel/create_channel_participation.html), all these capability levels are specified in the application channel genesis block that is created. -**Take caution** when specifying or modifying an application capability. Because the ordering service does not validate that the capability level exists, it will allow a channel to be created (or modified) to contain, for example, a v1.8 application capability even if no such capability exists. Any peer attempting to read a configuration block with this capability would, as we have shown, crash, and even if it was possible to modify the channel once again to a valid capability level, it would not matter, as no peer would be able to get past the block with the invalid v1.8 capability. +**Take caution** when modifying an application capability. Because the ordering service does not validate that the capability level exists, it will allow a channel to be modified to contain, for example, a v1.8 application capability even if no such capability exists. Any peer attempting to read a configuration block with this capability would, as we have shown, crash, and even if it was possible to modify the channel once again to a valid capability level, it would not matter, as no peer would be able to get past the block with the invalid v1.8 capability. For a full look at the current valid orderer, application, and channel capabilities check out a [sample `configtx.yaml` file](http://github.com/hyperledger/fabric/blob/{BRANCH}/sampleconfig/configtx.yaml), which lists them in the "Capabilities" section. diff --git a/docs/source/capability_requirements.rst b/docs/source/capability_requirements.rst index 6be355fd1d9..f4b44370805 100644 --- a/docs/source/capability_requirements.rst +++ b/docs/source/capability_requirements.rst @@ -1,6 +1,14 @@ Defining capability requirements ================================ +.. note:: This topic describes a network that does not use a "system channel", a + channel that the ordering service is bootstrapped with and the ordering + service exclusively controls. Since the release of v2.3, using a system + channel is now considered the legacy process as compared to the process + to :doc:`create_channel_participation`. For a version of this topic that + includes information about the system channel, check out + `Capability requirements `_ from the v2.2 documentation. + As discussed in :doc:`capabilities_concept`, capability requirements are defined per channel in the channel configuration (found in the channel’s most recent configuration block). The channel configuration contains three locations, each @@ -27,11 +35,6 @@ reconfiguration). .. note:: For more information about how to update a channel configuration, check out :doc:`config_update`. -Because new channels copy the configuration of the ordering system channel by -default, new channels will automatically be configured to work with the orderer -and channel capabilities of the ordering system channel and the application -capabilities specified by the channel creation transaction. - Capabilities in an Initial Configuration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -42,8 +45,5 @@ for each capability type (Channel, Orderer, and Application). Note that there is a ``Capabilities`` section defined at the root level (for the channel capabilities), and at the Orderer level (for orderer capabilities). -When defining the orderer system channel there is no Application section, as those -capabilities are defined during the creation of an application channel. - .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ diff --git a/docs/source/certs_management.md b/docs/source/certs_management.md new file mode 100644 index 00000000000..15af03fab6d --- /dev/null +++ b/docs/source/certs_management.md @@ -0,0 +1,381 @@ +# Certificates Management Guide + +**Audience**: Hyperledger Fabric network admins + +This guide provides overview information and details for a network administrator to manage certificates (certs) in Hyperledger Fabric. + +## Prerequisites and Resources + +The following Fabric documentation resources on identities, Membership Service Providers (MSPs) and Certificate Authorities (CAs) provide context for understanding certificate management: + +* [Identity](./identity/identity.html#identity) +* [MSP Overview](./membership/membership.html) +* [MSP Configuration](./msp.html) +* [Registration and Enrollment](https://hyperledger-fabric-ca.readthedocs.io/en/latest/deployguide/use_CA.html#overview-of-registration-and-enrollment) +* [Registering an Identity](https://hyperledger-fabric-ca.readthedocs.io/en/latest/deployguide/use_CA.html#register-an-identity) +* [Enrolling an Identity](https://hyperledger-fabric-ca.readthedocs.io/en/latest/deployguide/use_CA.html#enroll-an-identity) + + +## Key Concepts + +**Registration** – A username and password pair, stored in the Certificate Authority (CA). This registration is created by a CA admin user, has no expiration, and contains any required roles and attributes. + +**Enrollment** – A public/private key pair and an X.509 certificate issued by the organization's Certificate Authority (CA). The certificate encodes roles, attributes, and metadata, which represent an identity in a Fabric network. An enrollment is associated with a CA registration by username and password. + +**Identity** - A public certificate and its private key used for encryption. The public certificate is the X.509 certificate issued by the CA, while the private key is stored out of band, on a secure storage. + +**TLS** - A public Transport Layer Security (TLS) Certificate that authorizes client and node communications. On Fabric, registration and enrollment are the same for X.509 Certificates and TLS Certificates. + + +## Certificate Types + +Hyperledger Fabric implements two types of certificates: 1) **Enrollment** Certificates for identities and 2) **TLS** Certificates for node and client communications. + +### Enrollment Certificates + +Enrollment Certificates are classed into four types: + +* **Admin** +* **Peer** +* **Orderer** +* **Client** + +Each Enrollment Certificate type has a specific role: + +**Admin:** X.509 Certificates used to authenticate admin identities, which are required to make changes to Fabric configurations. + +**Peer:** X.509 Certificates used to enroll peer nodes, located physically on the node or mapped to the node. For a Fabric peer node to start, it must have a valid Enrollment Certificate with the required attributes. + +**Orderer:** X.509 Certificates used to enroll orderer nodes, located physically on the node or mapped to the node. For a Fabric orderer node to start, it must have a valid Enrollment Certificate with the required attributes. + +**Client:** X.509 Certificates that allow signed requests to be passed from clients to Fabric nodes. Client certs define the identities of client applications submitting transactions to a Fabric network. + + +### TLS Certificates + +TLS Certificates allow Fabric nodes and clients to sign and encrypt communications. A valid TLS Certificate is required for any channel communication. + +### Certificate Expiration + +Enrollment and TLS Certificates are assigned an expiration date by the issuing Certificate Authority (CA). Expiration dates must be monitored, and certificates must be re-enrolled before expiration. The most important certificate parameter is the **Not After** element, which indicates its expiration date. + + +## Certificates and Locations + +Organization CAs supply X.509 Enrollment Certificates for identities and the TLS CAs supply TLS Certificates for securing node and client communications. + + +### Organization CA Certificates + +Organization CA Root Certificates and Organization CA Admin Certificates provide authorization to interact with the certificate authority for the organization, as described below. + +#### Organization CA Root Certificate + +**Description**: Public Certificate that permits verification of all certificates issued by the Organization CA. Organization CA Root Certificates are self-signed certificates if creating a new Certificate Authority (CA), or provided by an external CA. + +**Location**: Stored on disk in the Organization CA directory (ca-cert.pem), and copied into the channel configuration to verify identifies for the organization. + +**Impact if expired**: A new Organization CA Root Certificate must be issued. Organization CA Root Certificates are valid for 15 years. + + +#### Organization CA Admin Certificate + +**Description**: Certificate used when making admin requests to the Organization CA. + +**Location**: Dependent on implementation: + +
**Note**: Each identity has a local **msp** directory structure which contains its certificate in the **signcerts** directory and its private key in the **keystore** directory. For details on the **msp** directory, refer to [MSP Structure](https://hyperledger-fabric.readthedocs.io/en/latest/membership/membership.html#msp-structure).
+ +
+msp
+ ├── IssuerPublicKey
+ ├── IssuerRevocationPublicKey
+ ├── cacerts
+ │   └── localhost-7053.pem
+ ├── keystore
+ │   └── key.pem
+ ├── signcerts
+ │   └── cert.pem
+ └── user
+
+ +**Impact if expired**: The Organization Administrator cannot register new identities with the CA, but transaction traffic does not stop. + +[Reference - Enroll Orderer Org CA Admin](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-orderer-org-s-ca-admin) + + +### TLS CA Certificates + +TLS CA Root Certificates and TLS CA Admin Certificates provide authorization to interact with the certificate authority for the TLS, as described below. + + +#### TLS CA Root Certificate + +**Description**: Public certificate that permits verification of all certificates issued by the TLS CA. TLS CA Root Certificates are self-signed certificates if creating a new Certificate Authority (CA), or provided by an external CA. + +**Location**: Stored on disk in the TLS CA directory (ca-cert.pem), and copied into the channel configuration to verify TLS Certificates for the organization. + +**Impact if expired**: A new TLS CA Root Certificate must be issued. TLS CA Root Certificates are valid for 15 years. + + +#### TLS CA Admin Certificate + +**Description**: Certificate used for admin requests to the TLS CA. + +**Location**: Dependent on implementation: + +
+msp
+ ├── IssuerPublicKey
+ ├── IssuerRevocationPublicKey
+ ├── cacerts
+ │   └── localhost-7053.pem
+ ├── keystore
+ │   └── key.pem
+ ├── signcerts
+ │   └── cert.pem
+ └── user
+
+ +**Impact if expired**: The Fabric Administrator will no longer be able to register TLS certificates in the TLS CA for nodes in the network. + +[Reference - Enroll TLS CA Admin](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-tls-ca-s-admin) + + +### Peer Certificates + +A Peer Enrollment Certificate and a Peer TLS Certificate are issued for each peer in an organization. + +#### Peer Enrollment Certificate + +**Description**: Authenticates the identity of the peer node when endorsing transactions. + +**Location**: Dependent on implementation: + +
+org1ca
+└── peer1
+    ├── msp
+    │    ├── admincerts
+    │    │   └── cert.pem
+    │    ├── cacerts
+    │    │   └── localhost-7053.pem
+    │    ├── keystore
+    │    │   └── key.pem
+    │    ├── signcerts
+    │    │   └── cert.pem
+    │    └── user
+    |── tls
+
+ +**Impact if expired**: Production outage. Peers do not start without a valid Enrollment Certificate. + +[Reference - Enroll peer](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-peer1) + + +#### Peer TLS Certificate + +**Description**: Authenticates node component communication on the channel. + +**Location**: Dependent on implementation: + +
+org1ca/
+└── peer1
+    ├── msp
+    └── tls
+        ├── cacerts
+        ├── keystore
+        │   └── key.pem
+        ├── signcerts
+        │   └── cert.pem
+        ├── tlscacerts
+        │   └── tls-localhost-7053.pem
+        └── user
+
+ +**Impact if expired**: Production outage. No communication to the peer is possible. + +[Reference - Enroll peer](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-peer1) + + +### Orderer Certificates + +Orderer Enrollment Certificates and Orderer TLS Certificates are issued for each ordering service node in an organization. + +#### Orderer Enrollment Certificate + +**Description**: The public key that the orderer uses to sign blocks. + +**Location**: Dependent on implementation: + +
+ └── orderer1
+     ├── msp
+     │   ├── admincerts
+     │   │   └── cert.pem
+     │   ├── cacerts
+     │   │   └── localhost-7053.pem
+     │   ├── keystore
+     │   │   └── key.pem
+     │   ├── signcerts
+     │   │   └── cert.pem
+     │   |── user
+     └── tls
+
+ +**Impact if expired**: Production outage. Orderers do not start without a valid Enrollment Certificate. + +[Reference - Enroll orderer](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-orderer) + + +#### Orderer TLS Certificate + +**Description**: TLS Certificate for the ordering node communication. + +**Location**: Dependent on implementation: + +
+ordererca/
+└── orderer1
+    ├── msp
+    └── tls
+        ├── cacerts
+        ├── keystore
+        |   └── key.pem
+        ├── signcerts
+        │   └── cert.pem
+        ├── tlscacerts
+        │   └── tls-localhost-7053.pem
+        └── user
+  
+ +**Impact if expired**: Production outage. Ordering nodes are no longer allowed to participate in cluster. + +[Reference - Enroll orderer](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-orderer) + + +### Admin Certificates + +Ordering Service Organization Channel Admin Certificates and Peer Service Organization Channel Admin Certificates are issued for each organization. + +#### Ordering Service Organization Channel Admin Certificate + +**Description**: Certificate for an organization administrator to manage ordering service and channel updates. + +**Location**: Dependent on implementation: + +
+ordererca/
+└── ordereradmin
+└── msp
+    ├── admincerts
+    │   └── cert.pem
+    ├── cacerts
+    │   └── localhost-7053.pem
+    ├── keystore
+    │   └── key.pem
+    ├── signcerts
+    │   └── cert.pem
+    └── user
+
+ +**Impact if expired**: Transactions can continue to work successfully. Cannot modify channels from a client application or manage the orderer from the console. + +[Reference - Enroll Org Admin](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-org0-s-admin) + + +#### Peer Service Organization Channel Admin Certificate + +**Description** - Certificate for an organization administrator to manage a peer, including channel and chaincode services. + +**Location** - Dependent on implementation: + +
+org1ca/
+└── org1admin
+└── msp
+├── admincerts
+│     └── cert.pem
+├── cacerts
+│     └── localhost-7053.pem
+├── keystore
+│     └── key.pem
+├── signcerts
+│     └── cert.pem
+└── user
+
+ +**Impact if expired**: Transactions can continue to work successfully. Cannot install new smart contracts from a client application or manage the peer from the console. + +[Reference - Enroll Org Admin](https://hyperledger-fabric-ca.readthedocs.io/en/latest/operations_guide.html#enroll-org1-s-admin) + + +### Client Certificates + +**Description**: Two types of Client Certificates are issued for each organization: + +1. **Organization Enrollment Certificate** - Authenticates the client identity for interactions with peers and orderers. +2. **TLS Certificate** - Authenticates client communications, and only required if mutual TLS is configured. + +Client Certificates expire after one year, using the Hyperledger Fabric CA default settings. Client Certificates can be re-enrolled using either command line Hyperledger Fabric CA utilities or the Fabric CA client SDK. + +**Impact if expired**: Client Certificates must be re-enrolled before expiration or the client application will not be able to interact with the Fabric nodes. + +[Reference - Re-enroll user](https://hyperledger.github.io/fabric-sdk-node/release-2.2/FabricCAClient.html#reenroll__anchor) + + +### Certificate Decoding + +X.509 Certificates are created by an enrollment of the certificate, based on its registration. The X.509 Certificate contains metadata describing its purpose and identifying the parent CA. The cert expiration is specified in the **Not After** field. + +The certificate details can be decoded using the OpenSSL utility: + +``` +# openssl x509 -in cert.pem -text -noout +``` + +The following example shows a decoded certificate: + +``` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 47:4d:5d:f6:db:92:6b:54:98:8d:9c:44:0c:ad:b6:77:c5:de:d2:ed + Signature Algorithm: ecdsa-with-SHA256 + Issuer: C = US, ST = North Carolina, O = Hyperledger, OU = Fabric, CN = orderer1ca + Validity + Not Before: Feb 4 14:55:00 2022 GMT + Not After : Feb 4 15:51:00 2023 GMT + Subject: C = US, ST = North Carolina, O = Hyperledger, OU = orderer, CN = orderer1 + Subject Public Key Info: + Public Key Algorithm: id-ecPublicKey + Public-Key: (256 bit) + pub: + 04:29:ec:d5:53:3e:03:9d:64:a4:a4:28:a5:fe:12: + e2:f0:dd:e4:ee:b9:3f:3e:01:b2:3a:d4:68:b1:b2: + 4f:82:1a:3a:33:db:92:6d:10:c9:c2:3b:3d:fc:7a: + f0:fa:cc:8b:44:e8:03:cb:a1:6e:eb:b3:6c:05:a2: + f8:fc:3c:af:24 + ASN1 OID: prime256v1 + NIST CURVE: P-256 + X509v3 extensions: + X509v3 Key Usage: critical + Digital Signature + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Subject Key Identifier: + 63:97:F5:CA:BB:B7:4B:26:84:D9:65:40:E3:43:14:A4:7B:EE:79:FF + X509v3 Authority Key Identifier: + keyid:BA:2A:F8:EA:A5:7D:DF:1D:0F:CF:47:37:41:82:03:7E:04:61:D0:D8 + X509v3 Subject Alternative Name: + DNS:server1.testorg.com + 1.2.3.4.5.6.7.8.1: + {"attrs":{"hf.Affiliation":"","hf.EnrollmentID":"orderer1","hf.Type":"orderer"}} + Signature Algorithm: ecdsa-with-SHA256 + 30:45:02:21:00:e1:93:f6:3c:08:f2:b9:fb:06:c9:02:d0:cf: + e1:a6:23:a3:05:78:10:d9:41:2c:1e:2c:91:80:fd:52:ad:62: + 9c:02:20:51:33:42:5e:a0:8a:2a:ec:f5:83:46:f0:99:6a:7e: + eb:a8:97:1f:30:99:9d:ae:8d:ef:36:07:da:bb:67:ed:80 +``` diff --git a/docs/source/channel_update_tutorial.rst b/docs/source/channel_update_tutorial.rst index c03c7c8e0f1..e348573a213 100644 --- a/docs/source/channel_update_tutorial.rst +++ b/docs/source/channel_update_tutorial.rst @@ -39,19 +39,18 @@ previous environments: ./network.sh down You can now use the script to bring up the test network with one channel named -``mychannel``: +``channel1``: .. code:: bash - ./network.sh up createChannel + ./network.sh up createChannel -c channel1 If the command was successful, you can see the following message printed in your logs: .. code:: bash - ========= Channel successfully joined =========== - + Channel 'channel1' joined Now that you have a clean version of the test network running on your machine, we can start the process of adding a new org to the channel we created. First, we are @@ -68,7 +67,7 @@ the following commands: .. code:: bash cd addOrg3 - ./addOrg3.sh up + ./addOrg3.sh up -c channel1 The output here is well worth reading. You'll see the Org3 crypto material being generated, the Org3 organization definition being created, and then the channel @@ -78,7 +77,7 @@ If everything goes well, you'll get this message: .. code:: bash - ========= Finished adding Org3 to your test network! ========= + Org3 peer successfully added to network Now that we have confirmed we can add Org3 to our channel, we can go through the steps to update the channel configuration that the script completed behind the @@ -93,15 +92,14 @@ material for all organizations: .. code:: bash - cd addOrg3 - ./addOrg3.sh down + cd .. + ./network.sh down After the network is brought down, bring it back up again: .. code:: bash - cd .. - ./network.sh up createChannel + ./network.sh up createChannel -c channel1 This will bring your network back to the same state it was in before you executed the ``addOrg3.sh`` script. @@ -158,69 +156,34 @@ Bring up Org3 components ~~~~~~~~~~~~~~~~~~~~~~~~ After we have created the Org3 certificate material, we can now bring up the -Org3 peer. From the ``addOrg3`` directory, issue the following command: +Org3 peer. From the ``addOrg3`` directory, issue the following command if you are using docker: .. code:: bash - docker-compose -f docker/docker-compose-org3.yaml up -d + docker-compose -f compose/compose-org3.yaml -f compose/docker/docker-compose-org3.yaml up -d + +If you are using podman change the second file argument to `compose/podman/docker-compose-org3.yaml` -If the command is successful, you will see the creation of the Org3 peer and -an instance of the Fabric tools container named Org3CLI: +If the command is successful, you will see the creation of the Org3 peer: .. code:: bash Creating peer0.org3.example.com ... done - Creating Org3cli ... done This Docker Compose file has been configured to bridge across our initial network, -so that the Org3 peer and Org3CLI resolve with the existing peers and ordering -node of the test network. We will use the Org3CLI container to communicate with -the network and issue the peer commands that will add Org3 to the channel. - - -Prepare the CLI Environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The update process makes use of the configuration translator tool -- configtxlator. -This tool provides a stateless REST API independent of the SDK. Additionally it -provides a CLI tool that can be used to simplify configuration tasks in Fabric -networks. The tool allows for the easy conversion between different equivalent -data representations/formats (in this case, between protobufs and JSON). -Additionally, the tool can compute a configuration update transaction based on -the differences between two channel configurations. - -Use the following command to exec into the Org3CLI container: - -.. code:: bash +so that the Org3 peer resolves with the existing peers and ordering +node of the test network. - docker exec -it Org3cli bash - -This container has been mounted with the ``organizations`` folder, giving us -access to the crypto material and TLS certificates for all organizations and the -Orderer Org. We can use environment variables to operate the Org3CLI container -as the admin of Org1, Org2, or Org3. First, we need to set the environment -variables for the orderer TLS certificate and the channel name: - -.. code:: bash - - export ORDERER_CA=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem - export CHANNEL_NAME=mychannel - -Check to make sure the variables have been properly set: - -.. code:: bash - - echo $ORDERER_CA && echo $CHANNEL_NAME - -.. note:: If for any reason you need to restart the Org3CLI container, you will also need to - re-export the two environment variables -- ``ORDERER_CA`` and ``CHANNEL_NAME``. +.. note:: the `./addOrg3.sh up` command uses a `fabric-tools` CLI container to perform + the channel configuration update process demonstrated below. This is to avoid the + `jq` dependency requirement for first-time users. However, it is recommended to + follow the process below directly on your local machine instead of using the unnecessary + CLI container. Fetch the Configuration ~~~~~~~~~~~~~~~~~~~~~~~ -Now we have the Org3CLI container with our two key environment variables -- ``ORDERER_CA`` -and ``CHANNEL_NAME`` exported. Let's go fetch the most recent config block for the -channel -- ``mychannel``. +Let's go fetch the most recent config block for the channel -- ``channel1``. The reason why we have to pull the latest version of the config is because channel config elements are versioned. Versioning is important for several reasons. It prevents @@ -230,6 +193,11 @@ want to remove an Org from your channel, for example, after a new Org has been a versioning will help prevent you from removing both Orgs, instead of just the Org you want to remove). +Navigate back to the ``test-network`` directory. + +.. code:: bash + cd .. + Because Org3 is not yet a member of the channel, we need to operate as the admin of another organization to fetch the channel config. Because Org1 is a member of the channel, the Org1 admin has permission to fetch the channel config from the ordering service. @@ -239,16 +207,20 @@ Issue the following commands to operate as the Org1 admin. # you can issue all of these commands at once + export PATH=${PWD}/../bin:$PATH + export FABRIC_CFG_PATH=${PWD}/../config/ + export CORE_PEER_TLS_ENABLED=true export CORE_PEER_LOCALMSPID="Org1MSP" - export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt - export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp - export CORE_PEER_ADDRESS=peer0.org1.example.com:7051 + export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt + export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp + export CORE_PEER_ADDRESS=localhost:7051 We can now issue the command to fetch the latest config block: .. code:: bash - peer channel fetch config config_block.pb -o orderer.example.com:7050 -c $CHANNEL_NAME --tls --cafile $ORDERER_CA + peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem + This command saves the binary protobuf channel configuration block to ``config_block.pb``. Note that the choice of name and file extension is arbitrary. @@ -260,9 +232,9 @@ displayed in your logs: .. code:: bash - 2017-11-07 17:17:57.383 UTC [channelCmd] readBlock -> DEBU 011 Received block: 2 + 2021-01-07 18:46:33.687 UTC [cli.common] readBlock -> INFO 004 Received block: 2 -This is telling us that the most recent configuration block for ``mychannel`` is +This is telling us that the most recent configuration block for ``channel1`` is actually block 2, **NOT** the genesis block. By default, the ``peer channel fetch config`` command returns the most **recent** configuration block for the targeted channel, which in this case is the third block. This is because the test network script, ``network.sh``, defined anchor @@ -276,15 +248,23 @@ transactions. As a result, we have the following configuration sequence: Convert the Configuration to JSON and Trim It Down ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The channel configuration block was stored in the ``channel-artifacts`` folder to keep +the update process separate from other artifacts. Change into the ``channel-artifacts`` +folder to complete the next steps: + +.. code:: bash + cd channel-artifacts + Now we will make use of the ``configtxlator`` tool to decode this channel configuration block into JSON format (which can be read and modified by humans). We also must strip away all of the headers, metadata, creator signatures, and so on that are irrelevant to the change we want to make. We accomplish this by -means of the ``jq`` tool: +means of the ``jq`` tool (you will need to install the `jq tool `_ on your local machine): .. code:: bash - configtxlator proto_decode --input config_block.pb --type common.Block | jq .data.data[0].payload.data.config > config.json + configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json + jq .data.data[0].payload.data.config config_block.json > config.json This command leaves us with a trimmed down JSON object -- ``config.json`` -- which will serve as the baseline for our config update. @@ -308,12 +288,13 @@ We'll use the ``jq`` tool once more to append the Org3 configuration definition .. code:: bash - jq -s '.[0] * {"channel_group":{"groups":{"Application":{"groups": {"Org3MSP":.[1]}}}}}' config.json ./organizations/peerOrganizations/org3.example.com/org3.json > modified_config.json + jq -s '.[0] * {"channel_group":{"groups":{"Application":{"groups": {"Org3MSP":.[1]}}}}}' config.json ../organizations/peerOrganizations/org3.example.com/org3.json > modified_config.json -Now, within the Org3CLI container we have two JSON files of interest -- ``config.json`` -and ``modified_config.json``. The initial file contains only Org1 and Org2 material, -whereas the "modified" file contains all three Orgs. At this point it's simply -a matter of re-encoding these two JSON files and calculating the delta. +Now we have two JSON files of interest -- ``config.json`` and +``modified_config.json``. The initial file contains only Org1 and Org2 +material, whereas the "modified" file contains all three Orgs. At this +point it's simply a matter of re-encoding these two JSON files and calculating +the delta. First, translate ``config.json`` back into a protobuf called ``config.pb``: @@ -332,7 +313,7 @@ protobufs. This command will output a new protobuf binary named ``org3_update.pb .. code:: bash - configtxlator compute_update --channel_id $CHANNEL_NAME --original config.pb --updated modified_config.pb --output org3_update.pb + configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_config.pb --output org3_update.pb This new proto -- ``org3_update.pb`` -- contains the Org3 definitions and high level pointers to the Org1 and Org2 material. We are able to forgo the extensive @@ -345,7 +326,7 @@ let's decode this object into editable JSON format and call it ``org3_update.jso .. code:: bash - configtxlator proto_decode --input org3_update.pb --type common.ConfigUpdate | jq . > org3_update.json + configtxlator proto_decode --input org3_update.pb --type common.ConfigUpdate --output org3_update.json Now, we have a decoded update file -- ``org3_update.json`` -- that we need to wrap in an envelope message. This step will give us back the header field that we stripped away @@ -353,7 +334,7 @@ earlier. We'll name this file ``org3_update_in_envelope.json``: .. code:: bash - echo '{"payload":{"header":{"channel_header":{"channel_id":"'$CHANNEL_NAME'", "type":2}},"data":{"config_update":'$(cat org3_update.json)'}}}' | jq . > org3_update_in_envelope.json + echo '{"payload":{"header":{"channel_header":{"channel_id":"'channel1'", "type":2}},"data":{"config_update":'$(cat org3_update.json)'}}}' | jq . > org3_update_in_envelope.json Using our properly formed JSON -- ``org3_update_in_envelope.json`` -- we will leverage the ``configtxlator`` tool one last time and convert it into the @@ -369,22 +350,20 @@ Sign and Submit the Config Update Almost done! -We now have a protobuf binary -- ``org3_update_in_envelope.pb`` -- within the -Org3CLI container. However, we need signatures from the requisite Admin users -before the config can be written to the ledger. The modification policy (mod_policy) -for our channel Application group is set to the default of "MAJORITY", which means that -we need a majority of existing org admins to sign it. Because we have only two orgs -- -Org1 and Org2 -- and the majority of two is two, we need both of them to sign. Without -both signatures, the ordering service will reject the transaction for failing to -fulfill the policy. - -First, let's sign this update proto as Org1. Remember that we exported the -necessary environment variables to operate the Org3CLI container as the Org1 admin. +We now have a protobuf binary -- ``org3_update_in_envelope.pb``. However, we need signatures from the requisite Admin users before the config can be written to the ledger. The modification policy (mod_policy) for our channel Application group is set to the default of "MAJORITY", which means that we need a majority of existing org admins to sign it. Because we have only two orgs -- Org1 and Org2 -- and the majority of two is two, we need both of them to sign. Without both signatures, the ordering service will reject the transaction for failing to fulfill the policy. + +First, let's sign this update proto as Org1. Navigate back to the ``test-network`` +directory: + +.. code:: bash + cd .. + +Remember that we exported the necessary environment variables to operate as the Org1 admin. As a result, the following ``peer channel signconfigtx`` command will sign the update as Org1. .. code:: bash - peer channel signconfigtx -f org3_update_in_envelope.pb + peer channel signconfigtx -f channel-artifacts/org3_update_in_envelope.pb The final step is to switch the container's identity to reflect the Org2 Admin user. We do this by exporting four environment variables specific to the Org2 MSP. @@ -401,10 +380,11 @@ Export the Org2 environment variables: # you can issue all of these commands at once + export CORE_PEER_TLS_ENABLED=true export CORE_PEER_LOCALMSPID="Org2MSP" - export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt - export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp - export CORE_PEER_ADDRESS=peer0.org2.example.com:9051 + export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt + export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp + export CORE_PEER_ADDRESS=localhost:9051 Lastly, we will issue the ``peer channel update`` command. The Org2 Admin signature will be attached to this call so there is no need to manually sign the protobuf a @@ -413,28 +393,26 @@ second time: .. note:: The upcoming update call to the ordering service will undergo a series of systematic signature and policy checks. As such you may find it useful to stream and inspect the ordering node's logs. You can issue a - ``docker logs -f orderer.example.com`` command from a terminal outside - the Org3CLI container to display them. + ``docker logs -f orderer.example.com`` command to display them. Send the update call: .. code:: bash - peer channel update -f org3_update_in_envelope.pb -c $CHANNEL_NAME -o orderer.example.com:7050 --tls --cafile $ORDERER_CA + peer channel update -f channel-artifacts/org3_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem You should see a message similar to the following if your update has been submitted successfully: .. code:: bash - 2020-01-09 21:30:45.791 UTC [channelCmd] update -> INFO 002 Successfully submitted channel update + 2021-01-07 18:51:48.015 UTC [channelCmd] update -> INFO 002 Successfully submitted channel update The successful channel update call returns a new block -- block 3 -- to all of the peers on the channel. If you remember, blocks 0-2 are the initial channel configurations. Block 3 serves as the most recent channel configuration with Org3 now defined on the channel. -You can inspect the logs for ``peer0.org1.example.com`` by navigating to a terminal -outside the Org3CLI container and issuing the following command: +You can inspect the logs for ``peer0.org1.example.com`` by issuing the following command: .. code:: bash @@ -445,25 +423,25 @@ Join Org3 to the Channel ~~~~~~~~~~~~~~~~~~~~~~~~ At this point, the channel configuration has been updated to include our new -organization -- Org3 -- meaning that peers attached to it can now join ``mychannel``. +organization -- Org3 -- meaning that peers attached to it can now join ``channel1``. -Inside the Org3CLI container, export the following environment variables to operate -as the Org3 Admin: +Export the following environment variables to operate as the Org3 Admin: .. code:: bash # you can issue all of these commands at once + export CORE_PEER_TLS_ENABLED=true export CORE_PEER_LOCALMSPID="Org3MSP" - export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt - export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org3.example.com/users/Admin@org3.example.com/msp - export CORE_PEER_ADDRESS=peer0.org3.example.com:11051 + export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt + export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org3.example.com/users/Admin@org3.example.com/msp + export CORE_PEER_ADDRESS=localhost:11051 -Org3 peers can join ``mychannel`` by either the genesis block or a snapshot that is created +Org3 peers can join ``channel1`` by either the genesis block or a snapshot that is created after Org3 has joined the channel. To join by the genesis block, send a call to the ordering service asking for the genesis block of -``mychannel``. As a result of the successful channel update, the ordering service +``channel1``. As a result of the successful channel update, the ordering service will verify that Org3 can pull the genesis block and join the channel. If Org3 had not been successfully appended to the channel config, the ordering service would reject this request. @@ -475,7 +453,7 @@ Use the ``peer channel fetch`` command to retrieve this block: .. code:: bash - peer channel fetch 0 mychannel.block -o orderer.example.com:7050 -c $CHANNEL_NAME --tls --cafile $ORDERER_CA + peer channel fetch 0 channel-artifacts/channel1.block -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem Notice, that we are passing a ``0`` to indicate that we want the first block on the channel's ledger; the genesis block. If we simply passed the @@ -483,17 +461,17 @@ the channel's ledger; the genesis block. If we simply passed the updated config with Org3 defined. However, we can't begin our ledger with a downstream block -- we must start with block 0. -If successful, the command returned the genesis block to a file named ``mychannel.block``. +If successful, the command returned the genesis block to a file named ``channel1.block``. We can now use this block to join the peer to the channel. Issue the ``peer channel join`` command and pass in the genesis block to join the Org3 peer to the channel: .. code:: bash - peer channel join -b mychannel.block + peer channel join -b channel-artifacts/channel1.block To join by a snapshot, follow the instruction in `Taking a snapshot `__ -to take a snapshot on an existing peer. The snapshot should be taken after Org3 has been added to ``mychannel`` +to take a snapshot on an existing peer. The snapshot should be taken after Org3 has been added to ``channel1`` to ensure that the snapshot contains the updated channel configuration including Org3. Locate the snapshot directory, copy it to the filesystem of the new Org3 peer, and issue the ``peer channel joinbysnapshot`` command using the path to the snapshot on your file system. @@ -553,7 +531,7 @@ configure the peer to use leader election: Install, define, and invoke chaincode ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -We can confirm that Org3 is a member of ``mychannel`` by installing and invoking +We can confirm that Org3 is a member of ``channel1`` by installing and invoking a chaincode on the channel. If the existing channel members have already committed a chaincode definition to the channel, a new organization can start using the chaincode by approving the chaincode definition. @@ -564,27 +542,25 @@ chaincode by approving the chaincode definition. `Adding an org to a channel tutorial `__. Before we install a chaincode as Org3, we can use the ``./network.sh`` script to -deploy the Basic chaincode on the channel. Open a new terminal outside the -Org3CLI container and navigate to the ``test-network`` directory. You can then use +deploy the Basic chaincode on the channel. Open a new terminal and navigate to the ``test-network`` directory. You can then use use the ``test-network`` script to deploy the Basic chaincode: .. code:: bash cd fabric-samples/test-network - ./network.sh deployCC -ccn basic -ccl go + ./network.sh deployCC -ccn basic -ccp ../asset-transfer-basic/chaincode-go/ -ccl go -c channel1 The script will install the Basic chaincode on the Org1 and Org2 peers, approve the chaincode definition for Org1 and Org2, and then commit the chaincode definition to the channel. Once the chaincode definition has been committed to the channel, the Basic chaincode is initialized and invoked to put initial data on the ledger. The commands below assume that we are still using the channel -``mychannel``. +``channel1``. After the chaincode has been to deployed we can use the following steps to use -invoke Basic chaincode as Org3. These steps can be completed from the -``test-network`` directory, without having to exec into Org3CLI container. Copy -and paste the following environment variables in your terminal in order to interact -with the network as the Org3 admin: +invoke Basic chaincode as Org3. Copy and paste the following environment +variables in your terminal in order to interact with the network as the Org3 +admin: .. code:: bash @@ -646,7 +622,7 @@ for Org3: # use the --package-id flag to provide the package identifier # use the --init-required flag to request the ``Init`` function be invoked to initialize the chaincode - peer lifecycle chaincode approveformyorg -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem --channelID mychannel --name basic --version 1.0 --package-id $CC_PACKAGE_ID --sequence 1 + peer lifecycle chaincode approveformyorg -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem --channelID channel1 --name basic --version 1.0 --package-id $CC_PACKAGE_ID --sequence 1 You can use the ``peer lifecycle chaincode querycommitted`` command to check if @@ -656,13 +632,13 @@ channel. .. code:: bash # use the --name flag to select the chaincode whose definition you want to query - peer lifecycle chaincode querycommitted --channelID mychannel --name basic --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem + peer lifecycle chaincode querycommitted --channelID channel1 --name basic --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem A successful command will return information about the committed definition: .. code:: bash - Committed chaincode definition for chaincode 'basic' on channel 'mychannel': + Committed chaincode definition for chaincode 'basic' on channel 'channel1': Version: 1.0, Sequence: 1, Endorsement Plugin: escc, Validation Plugin: vscc, Approvals: [Org1MSP: true, Org2MSP: true, Org3MSP: true] Org3 can use the basic chaincode after it approves the chaincode definition @@ -678,13 +654,13 @@ and the new Org3 peer so that the endorsement policy is satisfied. .. code:: bash - peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -C mychannel -n basic --peerAddresses localhost:9051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt --peerAddresses localhost:11051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt -c '{"function":"InitLedger","Args":[]}' + peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -C channel1 -n basic --peerAddresses localhost:9051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt --peerAddresses localhost:11051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt -c '{"function":"InitLedger","Args":[]}' You can query the chaincode to ensure that the Org3 peer committed the data. .. code:: bash - peer chaincode query -C mychannel -n basic -c '{"Args":["GetAllAssets"]}' + peer chaincode query -C channel1 -n basic -c '{"Args":["GetAllAssets"]}' You should see the initial list of assets that were added to the ledger as a response. @@ -713,42 +689,34 @@ directly discover an Org3 peer. In this section, we will make a channel configuration update to define an Org3 anchor peer. The process will be similar to the previous configuration update, therefore we'll go faster this time. -If you don't have it open, exec back into the Org3CLI container: - -.. code:: bash - - docker exec -it Org3cli bash - -Export the $ORDERER_CA and $CHANNEL_NAME variables if they are not already set: - -.. code:: bash - - export ORDERER_CA=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem - export CHANNEL_NAME=mychannel - As before, we will fetch the latest channel configuration to get started. -Inside the Org3CLI container, fetch the most recent config block for the channel, -using the ``peer channel fetch`` command. +Fetch the most recent config block for the channel, using the ``peer channel fetch`` command. .. code:: bash - peer channel fetch config config_block.pb -o orderer.example.com:7050 -c $CHANNEL_NAME --tls --cafile $ORDERER_CA + peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem After fetching the config block we will want to convert it into JSON format. To do this we will use the configtxlator tool, as done previously when adding Org3 to the -channel. When converting it we need to remove all the headers, metadata, and signatures -that are not required to update Org3 to include an anchor peer by using the jq +channel. First, change into the ``channel-artifacts`` folder: + +.. code:: bash + cd channel-artifacts + +When converting it we need to remove all the headers, metadata, and signatures +that are not required to update Org3 to include an anchor peer by using the ``jq`` tool. This information will be reincorporated later before we proceed to update the channel configuration. .. code:: bash - configtxlator proto_decode --input config_block.pb --type common.Block | jq .data.data[0].payload.data.config > config.json + configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json + jq .data.data[0].payload.data.config config_block.json > config.json The ``config.json`` is the now trimmed JSON representing the latest channel configuration that we will update. -Using the jq tool again, we will update the configuration JSON with the Org3 anchor peer we +Using the ``jq`` tool again, we will update the configuration JSON with the Org3 anchor peer we want to add. .. code:: bash @@ -775,24 +743,24 @@ Calculate the delta between the two protobuf formatted configurations. .. code:: bash - configtxlator compute_update --channel_id $CHANNEL_NAME --original config.pb --updated modified_anchor_config.pb --output anchor_update.pb + configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_anchor_config.pb --output anchor_update.pb Now that we have the desired update to the channel we must wrap it in an envelope message so that it can be properly read. To do this we must first convert the protobuf back into a JSON that can be wrapped. -We will use the configtxlator command again to convert ``anchor_update.pb`` into ``anchor_update.json`` +We will use the ``configtxlator`` command again to convert ``anchor_update.pb`` into ``anchor_update.json`` .. code:: bash - configtxlator proto_decode --input anchor_update.pb --type common.ConfigUpdate | jq . > anchor_update.json + configtxlator proto_decode --input anchor_update.pb --type common.ConfigUpdate --output anchor_update.json Next we will wrap the update in an envelope message, restoring the previously stripped away header, outputting it to ``anchor_update_in_envelope.json`` .. code:: bash - echo '{"payload":{"header":{"channel_header":{"channel_id":"'$CHANNEL_NAME'", "type":2}},"data":{"config_update":'$(cat anchor_update.json)'}}}' | jq . > anchor_update_in_envelope.json + echo '{"payload":{"header":{"channel_header":{"channel_id":"channel1", "type":2}},"data":{"config_update":'$(cat anchor_update.json)'}}}' | jq . > anchor_update_in_envelope.json Now that we have reincorporated the envelope we need to convert it to a protobuf so it can be properly signed and submitted to the orderer for the update. @@ -801,8 +769,15 @@ to a protobuf so it can be properly signed and submitted to the orderer for the configtxlator proto_encode --input anchor_update_in_envelope.json --type common.Envelope --output anchor_update_in_envelope.pb -Now that the update has been properly formatted it is time to sign off and submit it. Since this -is only an update to Org3 we only need to have Org3 sign off on the update. Run the following +Now that the update has been properly formatted it is time to sign off and submit it. + +Navigate back to the ``test-network`` directory: + +.. code:: bash + cd .. + + +Since this is only an update to Org3 we only need to have Org3 sign off on the update. Run the following commands to make sure that we are operating as the Org3 admin: .. code:: bash @@ -810,16 +785,16 @@ commands to make sure that we are operating as the Org3 admin: # you can issue all of these commands at once export CORE_PEER_LOCALMSPID="Org3MSP" - export CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt - export CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/organizations/peerOrganizations/org3.example.com/users/Admin@org3.example.com/msp - export CORE_PEER_ADDRESS=peer0.org3.example.com:11051 + export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/tls/ca.crt + export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org3.example.com/users/Admin@org3.example.com/msp + export CORE_PEER_ADDRESS=localhost:11051 We can now just use the ``peer channel update`` command to sign the update as the Org3 admin before submitting it to the orderer. .. code:: bash - peer channel update -f anchor_update_in_envelope.pb -c $CHANNEL_NAME -o orderer.example.com:7050 --tls --cafile $ORDERER_CA + peer channel update -f channel-artifacts/anchor_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem The orderer receives the config update request and cuts a block with the updated configuration. As peers receive the block, they will process the configuration updates. @@ -834,9 +809,9 @@ that the configuration update has been successfully applied! .. code:: bash - 2019-06-12 17:08:57.924 UTC [gossip.gossip] learnAnchorPeers -> INFO 89a Learning about the configured anchor peers of Org1MSP for channel mychannel : [{peer0.org1.example.com 7051}] - 2019-06-12 17:08:57.926 UTC [gossip.gossip] learnAnchorPeers -> INFO 89b Learning about the configured anchor peers of Org2MSP for channel mychannel : [{peer0.org2.example.com 9051}] - 2019-06-12 17:08:57.926 UTC [gossip.gossip] learnAnchorPeers -> INFO 89c Learning about the configured anchor peers of Org3MSP for channel mychannel : [{peer0.org3.example.com 11051}] + 2021-01-07 19:07:01.244 UTC [gossip.gossip] learnAnchorPeers -> INFO 05a Learning about the configured anchor peers of Org1MSP for channel channel1: [{peer0.org1.example.com 7051}] + 2021-01-07 19:07:01.243 UTC [gossip.gossip] learnAnchorPeers -> INFO 05b Learning about the configured anchor peers of Org2MSP for channel channel1: [{peer0.org2.example.com 9051}] + 2021-01-07 19:07:01.244 UTC [gossip.gossip] learnAnchorPeers -> INFO 05c Learning about the configured anchor peers of Org3MSP for channel channel1: [{peer0.org3.example.com 11051}] Congratulations, you have now made two configuration updates --- one to add Org3 to the channel, and a second to define an anchor peer for Org3. diff --git a/docs/source/commands/osnadminchannel.md b/docs/source/commands/osnadminchannel.md index 31cef838872..0b77d0764a8 100644 --- a/docs/source/commands/osnadminchannel.md +++ b/docs/source/commands/osnadminchannel.md @@ -27,7 +27,7 @@ Channel actions Flags: --help Show context-sensitive help (also try --help-long and --help-man). - -o, --orderer-address=ORDERER-ADDRESS + -o, --orderer-address=ORDERER-ADDRESS Admin endpoint of the OSN --ca-file=CA-FILE Path to file containing PEM-encoded TLS CA certificate(s) for the OSN @@ -39,23 +39,23 @@ Flags: OSN Subcommands: - channel join --channel-id=CHANNEL-ID --config-block=CONFIG-BLOCK + channel join --channelID=CHANNELID --config-block=CONFIG-BLOCK Join an Ordering Service Node (OSN) to a channel. If the channel does not yet exist, it will be created. channel list [] List channel information for an Ordering Service Node (OSN). If the - channel-id flag is set, more detailed information will be provided for that + channelID flag is set, more detailed information will be provided for that channel. - channel remove --channel-id=CHANNEL-ID + channel remove --channelID=CHANNELID Remove an Ordering Service Node (OSN) from a channel. ``` ## osnadmin channel join ``` -usage: osnadmin channel join --channel-id=CHANNEL-ID --config-block=CONFIG-BLOCK +usage: osnadmin channel join --channelID=CHANNELID --config-block=CONFIG-BLOCK Join an Ordering Service Node (OSN) to a channel. If the channel does not yet exist, it will be created. @@ -63,7 +63,7 @@ exist, it will be created. Flags: --help Show context-sensitive help (also try --help-long and --help-man). - -o, --orderer-address=ORDERER-ADDRESS + -o, --orderer-address=ORDERER-ADDRESS Admin endpoint of the OSN --ca-file=CA-FILE Path to file containing PEM-encoded TLS CA certificate(s) for the OSN @@ -73,8 +73,8 @@ Flags: --client-key=CLIENT-KEY Path to file containing PEM-encoded private key to use for mutual TLS communication with the OSN - -c, --channel-id=CHANNEL-ID Channel ID - -b, --config-block=CONFIG-BLOCK + -c, --channelID=CHANNELID Channel ID + -b, --config-block=CONFIG-BLOCK Path to the file containing an up-to-date config block for the channel ``` @@ -84,13 +84,13 @@ Flags: ``` usage: osnadmin channel list [] -List channel information for an Ordering Service Node (OSN). If the channel-id +List channel information for an Ordering Service Node (OSN). If the channelID flag is set, more detailed information will be provided for that channel. Flags: --help Show context-sensitive help (also try --help-long and --help-man). - -o, --orderer-address=ORDERER-ADDRESS + -o, --orderer-address=ORDERER-ADDRESS Admin endpoint of the OSN --ca-file=CA-FILE Path to file containing PEM-encoded TLS CA certificate(s) for the OSN @@ -100,20 +100,20 @@ Flags: --client-key=CLIENT-KEY Path to file containing PEM-encoded private key to use for mutual TLS communication with the OSN - -c, --channel-id=CHANNEL-ID Channel ID + -c, --channelID=CHANNELID Channel ID ``` ## osnadmin channel remove ``` -usage: osnadmin channel remove --channel-id=CHANNEL-ID +usage: osnadmin channel remove --channelID=CHANNELID Remove an Ordering Service Node (OSN) from a channel. Flags: --help Show context-sensitive help (also try --help-long and --help-man). - -o, --orderer-address=ORDERER-ADDRESS + -o, --orderer-address=ORDERER-ADDRESS Admin endpoint of the OSN --ca-file=CA-FILE Path to file containing PEM-encoded TLS CA certificate(s) for the OSN @@ -123,7 +123,7 @@ Flags: --client-key=CLIENT-KEY Path to file containing PEM-encoded private key to use for mutual TLS communication with the OSN - -c, --channel-id=CHANNEL-ID Channel ID + -c, --channelID=CHANNELID Channel ID ``` ## Example Usage @@ -132,13 +132,13 @@ Flags: Here's an example of the `osnadmin channel join` command. -* Create and join a sample channel `mychannel` defined by the application channel genesis +* Create and join a sample channel `mychannel` defined by the application channel genesis block contained in file `mychannel-genesis-block.pb`. Use the orderer admin endpoint at `orderer.example.com:9443`. ``` - osnadmin channel join -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel --config-block mychannel-genesis-block.pb + osnadmin channel join -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel --config-block mychannel-genesis-block.pb Status: 201 { @@ -162,7 +162,7 @@ Here are some examples of the `osnadmin channel list` command. system channel (if one exists) and all of the application channels. ``` - osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY + osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY Status: 200 { @@ -177,12 +177,12 @@ Here are some examples of the `osnadmin channel list` command. ``` - Status 200 and the list of channels are returned. + Status 200 and the list of channels are returned. -* Using the `--channel-id` flag to list more details for `mychannel`. +* Using the `--channelID` flag to list more details for `mychannel`. ``` - osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel + osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel Status: 200 { @@ -195,7 +195,7 @@ Here are some examples of the `osnadmin channel list` command. ``` - Status 200 and the details of the channels are returned. + Status 200 and the details of the channels are returned. ### osnadmin channel remove example @@ -204,11 +204,11 @@ Here's an example of the `osnadmin channel remove` command. * Removing channel `mychannel` from the orderer at `orderer.example.com:9443`. ``` - osnadmin channel remove -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel + osnadmin channel remove -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel Status: 204 ``` - Status 204 is returned upon successful removal of a channel. + Status 204 is returned upon successful removal of a channel. Creative Commons License
This work is licensed under a Creative Commons Attribution 4.0 International License. diff --git a/docs/source/commands/peerchaincode.md b/docs/source/commands/peerchaincode.md index 9e554bfc4c3..c4a9942f8d3 100644 --- a/docs/source/commands/peerchaincode.md +++ b/docs/source/commands/peerchaincode.md @@ -23,6 +23,18 @@ different chaincode operations that are relevant to a peer. For example, use the the `peer chaincode query` subcommand option to query a chaincode for the current value on a peer's ledger. +Some subcommands take flag `--ctor`, of which the value must be a JSON string +that has either key 'Args' or 'Function' and 'Args'. These keys are +case-insensitive. + +If the JSON string only has the Args key, the key value is an array, where the +first array element is the target function to call, and the subsequent elements +are arguments of the function. If the JSON string has both 'Function' and +'Args', the value of Function is the target function to call, and the value of +Args is an array of arguments of the function. For instance, +`{"Args":["GetAllAssets"]}` is equivalent to +`{"Function":"GetAllAssets", "Args":[]}`. + Each peer chaincode subcommand is described together with its options in its own section in this topic. diff --git a/docs/source/commands/peerlifecycle.md b/docs/source/commands/peerlifecycle.md index ec48e73a2bc..132f83e7311 100644 --- a/docs/source/commands/peerlifecycle.md +++ b/docs/source/commands/peerlifecycle.md @@ -595,7 +595,7 @@ also outputs which organizations have approved the chaincode definition. If an organization has approved the chaincode definition specified in the command, the command will return a value of true. You can use this command to learn whether enough channel members have approved a chaincode definition to meet the -`Application/Channel/Endorsement` policy (a majority by default) before the +`/Channel/Application/Endorsement` policy (a majority by default) before the definition can be committed to a channel. * Here is an example of the `peer lifecycle chaincode checkcommitreadiness` command, diff --git a/docs/source/config_update.md b/docs/source/config_update.md index f26c85abcf5..c4ca4fa4773 100644 --- a/docs/source/config_update.md +++ b/docs/source/config_update.md @@ -2,6 +2,8 @@ *Audience: network administrators, node administrators* +Note: this topic describes the process for updating a channel on a network that does not have a system channel. For a version of this topic that includes information about the system channel, check out [Updating a channel configuration](https://hyperledger-fabric.readthedocs.io/en/release-2.2/config_update.html). + ## What is a channel configuration? Like many complex systems, Hyperledger Fabric networks are comprised of both **structure** and a number related of **processes**. @@ -9,7 +11,7 @@ Like many complex systems, Hyperledger Fabric networks are comprised of both **s * **Structure**: encompassing users (like admins), organizations, peers, ordering nodes, CAs, smart contracts, and applications. * **Process**: the way these structures interact. Most important of these are [Policies](./policies/policies.html), the rules that govern which users can do what, and under what conditions. -Information identifying the structure of blockchain networks and the processes governing how structures interact are contained in **channel configurations**. These configurations are collectively decided upon by the members of channels and are contained in blocks that are committed to the ledger of a channel. Channel configurations can be built using a tool called `configtxgen`, which uses a `configtx.yaml` file as its input. You can look at a [sample `configtx.yaml` file here](http://github.com/hyperledger/fabric/blob/release-2.0/sampleconfig/configtx.yaml). +Information identifying the structure of blockchain networks and the processes governing how structures interact are contained in **channel configurations**. These configurations are collectively decided upon by the members of channels and are contained in blocks that are committed to the ledger of a channel. Channel configurations can be built using a tool called `configtxgen`, which uses a `configtx.yaml` file as its input. You can look at a [sample `configtx.yaml` file here](http://github.com/hyperledger/fabric/blob/{BRANCH}/sampleconfig/configtx.yaml). Because configurations are contained in blocks (the first of these is known as the genesis block with the latest representing the current configuration of the channel), the process for updating a channel configuration (changing the structure by adding members, for example, or processes by modifying channel policies) is known as a **configuration update transaction**. @@ -33,11 +35,9 @@ In this section, we'll look a sample channel configuration and show the configur To see what the configuration file of an application channel looks like after it has been pulled and scoped, click **Click here to see the config** below. For ease of readability, it might be helpful to put this config into a viewer that supports JSON folding, like atom or Visual Studio. -Note: for simplicity, we are only showing an application channel configuration here. The configuration of the orderer system channel is very similar, but not identical, to the configuration of an application channel. However, the same basic rules and structure apply, as do the commands to pull and edit a configuration, as you can see in our topic on [Updating the capability level of a channel](./updating_capabilities.html). -
- **Click here to see the config**. Note that this is the configuration of an application channel, not the orderer system channel. + **Click here to see the config** ``` { @@ -880,8 +880,6 @@ A config might look intimidating in this form, but once you study it you’ll se For example, let's take a look at the config with a few of the tabs closed. -Note that this is the configuration of an application channel, not the orderer system channel. - ![Sample config simplified](./images/sample_config.png) The structure of the config should now be more obvious. You can see the config groupings: `Channel`, `Application`, and `Orderer`, and the configuration parameters related to each config grouping (we'll talk more about these in the next section), but also where the MSPs representing organizations are. Note that the `Channel` config grouping is below the `Orderer` group config values. @@ -906,7 +904,7 @@ Governs the configuration parameters unique to application channels (for example #### `Channel/Orderer` -Governs configuration parameters unique to the ordering service or the orderer system channel, requires a majority of the ordering organizations’ admins (by default there is only one ordering organization, though more can be added, for example when multiple organizations contribute nodes to the ordering service). +Governs configuration parameters unique to the ordering service and requires a majority of the ordering organizations’ admins (by default there is only one ordering organization, though more can be added, for example when multiple organizations contribute nodes to the ordering service). * **Batch size**. These parameters dictate the number and size of transactions in a block. No block will appear larger than `absolute_max_bytes` large or with more than `max_message_count` transactions inside the block. If it is possible to construct a block under `preferred_max_bytes`, then a block will be cut prematurely, and transactions larger than this size will appear in their own block. @@ -930,14 +928,6 @@ Governs configuration parameters that both the peer orgs and the ordering servic * **Hashing algorithm**. The algorithm used for computing the hash values encoded into the blocks of the blockchain. In particular, this affects the data hash, and the previous block hash fields of the block. Note, this field currently only has one valid value (`SHA256`) and should not be changed. -#### System channel configuration parameters - -Certain configuration values are unique to the orderer system channel. - -* **Channel creation policy.** Defines the policy value which will be set as the mod_policy for the Application group of new channels for the consortium it is defined in. The signature set attached to the channel creation request will be checked against the instantiation of this policy in the new channel to ensure that the channel creation is authorized. Note that this config value is only set in the orderer system channel. - -* **Channel restrictions.** Only editable in the orderer system channel. The total number of channels the orderer is willing to allocate may be specified as `max_count`. This is primarily useful in pre-production environments with weak consortium `ChannelCreation` policies. - ## Editing a config Updating a channel configuration is a three step operation that's conceptually simple: @@ -951,13 +941,13 @@ However, as you'll see, this conceptual simplicity is wrapped in a somewhat conv We have two tutorials that deal specifically with editing a channel configuration to achieve a specific end: * [Adding an Org to a Channel](./channel_update_tutorial.html): shows the process for adding an additional organization to an existing channel. -* [Updating channel capabilities](./updating_a_channel.html): shows how to update channel capabilities. +* [Updating channel capabilities](./updating_capabilities.html): shows how to update channel capabilities. In this topic, we'll show the process of editing a channel configuration independent of the end goal of the configuration update. ### Set environment variables for your config update -Before you attempt to use the sample commands, make sure to export the following environment variables, which will depend on the way you have structured your deployment. Note that the channel name, `CH_NAME` will have to be set for every channel being updated, as channel configuration updates only apply to the configuration of the channel being updated (with the exception of the ordering system channel, whose configuration is copied into the configuration of application channels by default). +Before you attempt to use the sample commands, make sure to export the following environment variables, which will depend on the way you have structured your deployment. Note that the channel name, `CH_NAME` will have to be set for every channel being updated, as channel configuration updates only apply to the configuration of the channel being updated. * `CH_NAME`: the name of the channel being updated. * `TLS_ROOT_CA`: the path to the root CA cert of the TLS CA of the organization proposing the update. @@ -1004,7 +994,7 @@ At this point, you have two options of how you want to modify the config. 1. Open ``modified_config.json`` using the text editor of your choice and make edits. Online tutorials exist that describe how to copy a file from a container that does not have an editor, edit it, and add it back to the container. 2. Use ``jq`` to apply edits to the config. -Whether you choose to edit the config manually or using `jq` depends on your use case. Because `jq` is concise and scriptable (an advantage when the same configuration update will be made to multiple channels), it's the recommend method for performing a channel update. For an example on how `jq` can be used, check out [Updating channel capabilities](./updating_a_channel.html#Create-a-capabilities-config-file), which shows multiple `jq` commands leveraging a capabilities config file called `capabilities.json`. If you are updating something other than the capabilities in your channel, you will have to modify your `jq` command and JSON file accordingly. +Whether you choose to edit the config manually or using `jq` depends on your use case. Because `jq` is concise and scriptable (an advantage when the same configuration update will be made to multiple channels), it's the recommend method for performing a channel update. For an example on how `jq` can be used, check out [Updating channel capabilities](./updating_capabilities.html#Create-a-capabilities-config-file), which shows multiple `jq` commands leveraging a capabilities config file called `capabilities.json`. If you are updating something other than the capabilities in your channel, you will have to modify your `jq` command and JSON file accordingly. For more information about the content and structure of a channel configuration, check out our [sample channel config](#Sample-channel-configuration) above. diff --git a/docs/source/configtx.rst b/docs/source/configtx.rst index 29412ea3fc5..fe8ff2bcdb2 100644 --- a/docs/source/configtx.rst +++ b/docs/source/configtx.rst @@ -1,6 +1,13 @@ Channel Configuration (configtx) ================================ +.. note:: This topic describes how channels are configured when the network has + not been bootstrapped using a system channel genesis block. For + information about the structure of configurations, including the + configuration of the system channel, check out + `Channel Configuration (configtx) `_ + from the v2.2 documentation. + Shared configuration for a Hyperledger Fabric blockchain network is stored in a collection configuration transactions, one per channel. Each configuration transaction is usually referred to by the shorter name @@ -333,88 +340,8 @@ with different names. }, } -Orderer system channel configuration ------------------------------------- - -The ordering system channel needs to define ordering parameters, and -consortiums for creating channels. There must be exactly one ordering -system channel for an ordering service, and it is the first channel to -be created (or more accurately bootstrapped). It is recommended never to -define an Application section inside of the ordering system channel -genesis configuration, but may be done for testing. Note that any member -with read access to the ordering system channel may see all channel -creations, so this channel's access should be restricted. - -The ordering parameters are defined as the following subset of config: - -:: - - &ConfigGroup{ - Groups: map { - "Orderer":&ConfigGroup{ - Groups:map { - {{org_name}}:&ConfigGroup{ - Values:map{ - "MSP":msp.MSPConfig, - }, - }, - }, - - Values:map { - "ConsensusType":orderer.ConsensusType, - "BatchSize":orderer.BatchSize, - "BatchTimeout":orderer.BatchTimeout, - "KafkaBrokers":orderer.KafkaBrokers, - }, - }, - }, - -Each organization participating in ordering has a group element under -the ``Orderer`` group. This group defines a single parameter ``MSP`` -which contains the cryptographic identity information for that -organization. The ``Values`` of the ``Orderer`` group determine how the -ordering nodes function. They exist per channel, so -``orderer.BatchTimeout`` for instance may be specified differently on -one channel than another. - -At startup, the orderer is faced with a filesystem which contains -information for many channels. The orderer identifies the system channel -by identifying the channel with the consortiums group defined. The -consortiums group has the following structure. - -:: - - &ConfigGroup{ - Groups: map { - "Consortiums":&ConfigGroup{ - Groups:map { - {{consortium_name}}:&ConfigGroup{ - Groups:map { - {{org_name}}:&ConfigGroup{ - Values:map{ - "MSP":msp.MSPConfig, - }, - }, - }, - Values:map { - "ChannelCreationPolicy":common.Policy, - } - }, - }, - }, - }, - }, - -Note that each consortium defines a set of members, just like the -organizational members for the ordering orgs. Each consortium also -defines a ``ChannelCreationPolicy``. This is a policy which is applied -to authorize channel creation requests. Typically, this value will be -set to an ``ImplicitMetaPolicy`` requiring that the new members of the -channel sign to authorize the channel creation. More details about -channel creation follow later in this document. - -Application channel configuration ---------------------------------- +Channel configuration +--------------------- Application configuration is for channels which are designed for application type transactions. It is defined as follows: @@ -442,48 +369,10 @@ information, each org additionally encodes a list of ``AnchorPeers``. This list allows the peers of different organizations to contact each other for peer gossip networking. -The application channel encodes a copy of the orderer orgs and consensus -options to allow for deterministic updating of these parameters, so the -same ``Orderer`` section from the orderer system channel configuration -is included. However from an application perspective this may be largely -ignored. - Channel creation ---------------- -When the orderer receives a ``CONFIG_UPDATE`` for a channel which does -not exist, the orderer assumes that this must be a channel creation -request and performs the following. - -1. The orderer identifies the consortium which the channel creation - request is to be performed for. It does this by looking at the - ``Consortium`` value of the top level group. -2. The orderer verifies that the organizations included in the - ``Application`` group are a subset of the organizations included in - the corresponding consortium and that the ``ApplicationGroup`` is set - to ``version`` ``1``. -3. The orderer verifies that if the consortium has members, that the new - channel also has application members (creation consortiums and - channels with no members is useful for testing only). -4. The orderer creates a template configuration by taking the - ``Orderer`` group from the ordering system channel, and creating an - ``Application`` group with the newly specified members and specifying - its ``mod_policy`` to be the ``ChannelCreationPolicy`` as specified - in the consortium config. Note that the policy is evaluated in the - context of the new configuration, so a policy requiring ``ALL`` - members, would require signatures from all the new channel members, - not all the members of the consortium. -5. The orderer then applies the ``CONFIG_UPDATE`` as an update to this - template configuration. Because the ``CONFIG_UPDATE`` applies - modifications to the ``Application`` group (its ``version`` is - ``1``), the config code validates these updates against the - ``ChannelCreationPolicy``. If the channel creation contains any other - modifications, such as to an individual org's anchor peers, the - corresponding mod policy for the element will be invoked. -6. The new ``CONFIG`` transaction with the new channel config is wrapped - and sent for ordering on the ordering system channel. After ordering, - the channel is created. +For information about how to create a channel, check out :doc:`create_channel/create_channel_participation`. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ - diff --git a/docs/source/couchdb_as_state_database.rst b/docs/source/couchdb_as_state_database.rst index ec1d5350f28..5375df47b85 100644 --- a/docs/source/couchdb_as_state_database.rst +++ b/docs/source/couchdb_as_state_database.rst @@ -8,7 +8,7 @@ The current options for the peer state database are LevelDB and CouchDB. LevelDB key-value state database embedded in the peer process. CouchDB is an alternative external state database. Like the LevelDB key-value store, CouchDB can store any binary data that is modeled in chaincode (CouchDB attachments are used internally for non-JSON data). As a document object store, -CouchDB allows you to store data in JSON format, issue rich queries against your data, +CouchDB allows you to store data in JSON format, issue JSON queries against your data, and use indexes to support your queries. Both LevelDB and CouchDB support core chaincode operations such as getting and setting a key @@ -18,7 +18,7 @@ key of ``owner,asset_id`` can be used to query all assets owned by a certain ent queries can be used for read-only queries against the ledger, as well as in transactions that update the ledger. -Modeling your data in JSON allows you to issue rich queries against the values of your data, +Modeling your data in JSON allows you to issue JSON queries against the values of your data, instead of only being able to query the keys. This makes it easier for your applications and chaincode to read the data stored on the blockchain ledger. Using CouchDB can help you meet auditing and reporting requirements for many use cases that are not supported by LevelDB. If you use @@ -27,10 +27,8 @@ Using indexes makes queries more flexible and efficient and enables you to query datasets from chaincode. CouchDB runs as a separate database process alongside the peer, therefore there are additional -considerations in terms of setup, management, and operations. You may consider starting with the -default embedded LevelDB, and move to CouchDB if you require the additional complex rich queries. -It is a good practice to model asset data as JSON, so that you have the option to perform -complex rich queries if needed in the future. +considerations in terms of setup, management, and operations. It is a good practice to model +asset data as JSON, so that you have the option to perform complex JSON queries if needed in the future. .. note:: The key for a CouchDB JSON document can only contain valid UTF-8 strings and cannot begin with an underscore ("_"). Whether you are using CouchDB or LevelDB, you should avoid using @@ -42,34 +40,50 @@ complex rich queries if needed in the future. - ``Any field beginning with an underscore, "_"`` - ``~version`` + Because of these data incompatibilities between LevelDB and CouchDB, the database choice + must be finalized prior to deploying a production peer. The database cannot be converted at a + later time. + Using CouchDB from Chaincode ---------------------------- +Reading and writing JSON data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When writing JSON data values to CouchDB (e.g. using ``PutState``) and reading +JSON back in later chaincode requests (e.g. using ``GetState``), the format of the JSON and +the order of the JSON fields are not guaranteed, based on the JSON specification. Your chaincode +should therefore unmarshall the JSON before working with the data. Similarly, when marshaling +JSON, utilize a library that guarantees deterministic results, so that proposed chaincode writes +and responses to clients will be identical across endorsing peers (note that Go ``json.Marshal()`` +does in fact sort keys deterministically, but in other languages you may need to utilize a canonical +JSON library). + Chaincode queries ~~~~~~~~~~~~~~~~~ Most of the `chaincode shim APIs `__ can be utilized with either LevelDB or CouchDB state database, e.g. ``GetState``, ``PutState``, ``GetStateByRange``, ``GetStateByPartialCompositeKey``. Additionally when you utilize CouchDB as -the state database and model assets as JSON in chaincode, you can perform rich queries against -the JSON in the state database by using the ``GetQueryResult`` API and passing a CouchDB query string. +the state database and model assets as JSON in chaincode, you can perform JSON queries against +the data in the state database by using the ``GetQueryResult`` API and passing a CouchDB query string. The query string follows the `CouchDB JSON query syntax `__. -The `marbles02 fabric sample `__ -demonstrates use of CouchDB queries from chaincode. It includes a ``queryMarblesByOwner()`` function +The `asset transfer Fabric sample `__ +demonstrates use of CouchDB queries from chaincode. It includes a ``queryAssetsByOwner()`` function that demonstrates parameterized queries by passing an owner id into chaincode. It then queries the -state data for JSON documents matching the docType of “marble” and the owner id using the JSON query +state data for JSON documents matching the docType of "asset" and the owner id using the JSON query syntax: .. code:: bash - {"selector":{"docType":"marble","owner":}} + {"selector":{"docType":"asset","owner":}} -The responses to rich queries are useful for understanding the data on the ledger. However, -there is no guarantee that the result set for a rich query will be stable between -the chaincode execution and commit time. As a result, you should not use a rich query and +The responses to JSON queries are useful for understanding the data on the ledger. However, +there is no guarantee that the result set for a JSON query will be stable between +the chaincode execution and commit time. As a result, you should not use a JSON query and update the channel ledger in a single transaction. For example, if you perform a -rich query for all assets owned by Alice and transfer them to Bob, a new asset may +JSON query for all assets owned by Alice and transfer them to Bob, a new asset may be assigned to Alice by another transaction between chaincode execution time and commit time. @@ -79,9 +93,9 @@ and commit time. CouchDB pagination ^^^^^^^^^^^^^^^^^^ -Fabric supports paging of query results for rich queries and range based queries. +Fabric supports paging of query results for JSON queries and key range based queries. APIs supporting pagination allow the use of page size and bookmarks to be used for -both range and rich queries. To support efficient pagination, the Fabric +both key range and JSON queries. To support efficient pagination, the Fabric pagination APIs must be used. Specifically, the CouchDB ``limit`` keyword will not be honored in CouchDB queries since Fabric itself manages the pagination of query results and implicitly sets the pageSize limit that is passed to CouchDB. @@ -118,7 +132,7 @@ any JSON query with a sort. Indexes enable you to query data from chaincode when a large amount of data on your ledger. Indexes can be packaged alongside chaincode in a ``/META-INF/statedb/couchdb/indexes`` directory. Each index must be defined in its own text file with extension ``*.json`` with the index definition formatted in JSON -following the `CouchDB index JSON syntax `__. +following the `CouchDB index JSON syntax `__. For example, to support the above marble query, a sample index on the ``docType`` and ``owner`` fields is provided: @@ -126,7 +140,7 @@ fields is provided: {"index":{"fields":["docType","owner"]},"ddoc":"indexOwnerDoc", "name":"indexOwner","type":"json"} -The sample index can be found `here `__. +The sample index can be found `here `__. Any index in the chaincode’s ``META-INF/statedb/couchdb/indexes`` directory will be packaged up with the chaincode for deployment. The index will be deployed @@ -160,7 +174,7 @@ CouchDB Configuration CouchDB is enabled as the state database by changing the ``stateDatabase`` configuration option from goleveldb to CouchDB. Additionally, the ``couchDBAddress`` needs to configured to point to the CouchDB to be used by the peer. The username and password properties should be populated with -an admin username and password if CouchDB is configured with a username and password. Additional +an admin username and password. Additional options are provided in the ``couchDBConfig`` section and are documented in place. Changes to the *core.yaml* will be effective immediately after restarting the peer. @@ -221,7 +235,7 @@ variables using Docker Compose scripting. For CouchDB installations outside of the docker images supplied with Fabric, the `local.ini file of that installation -`__ +`__ must be edited to set the admin username and password. Docker compose scripts only set the username and password at the creation of diff --git a/docs/source/couchdb_tutorial.rst b/docs/source/couchdb_tutorial.rst index 7b95adfc2b7..f2c331c782a 100644 --- a/docs/source/couchdb_tutorial.rst +++ b/docs/source/couchdb_tutorial.rst @@ -311,7 +311,7 @@ Run the following command to deploy the smart contract to `mychannel`: .. code:: bash - ./network.sh deployCC -ccn ledger -ccep "OR('Org1MSP.peer','Org2MSP.peer')" + ./network.sh deployCC -ccn ledger -ccp ../asset-transfer-ledger-queries/chaincode-go/ -ccl go -ccep "OR('Org1MSP.peer','Org2MSP.peer')" Note that we are using the `-ccep` flag to deploy the smart contract with an endorsement policy of `"OR('Org1MSP.peer','Org2MSP.peer')"`. This allows either organization to create an asset without @@ -517,7 +517,7 @@ previous example. A query that does not include all fields in the index will have to scan the full database instead. For example, the query below searches for the owner, without -specifying the type of item owned. Since the ownerIndexDoc contains both +specifying the type of item owned. Since the indexOwnerDoc contains both the ``owner`` and ``docType`` fields, this query will not be able to use the index. diff --git a/docs/source/create_channel/create_channel.md b/docs/source/create_channel/create_channel.md deleted file mode 100644 index 8034b71d343..00000000000 --- a/docs/source/create_channel/create_channel.md +++ /dev/null @@ -1,396 +0,0 @@ -# Create a channel with a system channel (legacy) - -You can use this tutorial to learn how to create new channels using the [configtxgen](../commands/configtxgen.html) CLI tool and then use the [peer channel](../commands/peerchannel.html) commands to join a channel with your peers. While this tutorial will leverage the Fabric test network to create the new channel, the steps in this tutorial can also be used by network operators in a production environment. - -Fabric v2.3 introduces the capability to create a channel without requiring a system channel, removing an extra layer of administration from the process. Check out the [Create a channel without a system channel](create_channel_participation.html) tutorial to learn more about the steps required. - -In the process of creating the channel, this tutorial will take you through the following steps and concepts: - -- [Setting up the configtxgen tool](#setting-up-the-configtxgen-tool) -- [Using the configtx.yaml file](#the-configtx-yaml-file) -- [The orderer system channel](#the-orderer-system-channel) -- [Creating an application channel](#creating-an-application-channel) -- [Joining peers to the channel](#join-peers-to-the-channel) -- [Setting anchor peers](#set-anchor-peers) - -## Setting up the configtxgen tool - -Channels are created by building a channel creation transaction and submitting the transaction to the ordering service. The channel creation transaction specifies the initial configuration of the channel and is used by the ordering service to write the channel genesis block. While it is possible to build the channel creation transaction file manually, it is easier to use the [configtxgen](../commands/configtxgen.html) tool. The tool works by reading a `configtx.yaml` file that defines the configuration of your channel, and then writing the relevant information into the channel creation transaction. Before we discuss the `configtx.yaml` file in the next section, we can get started by downloading and setting up the `configtxgen` tool. - -You can download the `configtxgen` binaries by following the steps to [install the samples, binaries and Docker images](../install.html). `configtxgen` will be downloaded to the `bin` folder of your local clone of the `fabric-samples` repository along with other Fabric tools. - -For the purposes of this tutorial, we will want to operate from the `test-network` directory inside `fabric-samples`. Navigate to that directory using the following command: -``` -cd fabric-samples/test-network -``` -We will operate from the `test-network` directory for the remainder of the tutorial. Use the following command to add the configtxgen tool to your CLI path: -``` -export PATH=${PWD}/../bin:$PATH -``` - -In order to use `configtxgen`, you need to the set the `FABRIC_CFG_PATH` environment variable to the path of the directory that contains your local copy of the `configtx.yaml` file. For this tutorial, we will reference the `configtx.yaml` used to setup the Fabric test network in the `configtx` folder: -``` -export FABRIC_CFG_PATH=${PWD}/configtx -``` - -You can check that you can are able to use the tool by printing the `configtxgen` help text: -``` -configtxgen --help -``` - - -## The configtx.yaml file - -The `configtx.yaml` file specifies the **channel configuration** of new channels. The information that is required to build the channel configuration is specified in a readable and editable form in the `configtx.yaml` file. The `configtxgen` tool uses the channel profiles defined in the `configtx.yaml` file to create the channel configuration and write it to the [protobuf format](https://developers.google.com/protocol-buffers) that can be read by Fabric. - -You can find the `configtx.yaml` file that is used to deploy the test network in the `configtx` folder in the `test-network` directory. The file contains the following information that we will use to create our new channel: - -- **Organizations:** The organizations that can become members of your channel. Each organization has a reference to the cryptographic material that is used to build the [channel MSP](../membership/membership.html). -- **Ordering service:** Which ordering nodes will form the ordering service of the network, and consensus method they will use to agree to a common order of transactions. The file also contains the organizations that will become the ordering service administrators. -- **Channel policies** Different sections of the file work together to define the policies that will govern how organizations interact with the channel and which organizations need to approve channel updates. For the purposes of this tutorial, we will use the default policies used by Fabric. -- **Channel profiles** Each channel profile references information from other sections of the `configtx.yaml` file to build a channel configuration. The profiles are used the create the genesis block of the orderer system channel and the channels that will be used by peer organizations. To distinguish them from the system channel, the channels used by peer organizations are often referred to as application channels. - - The `configtxgen` tool uses `configtx.yaml` file to create a complete genesis block for the system channel. As a result, the system channel profile needs to specify the full system channel configuration. The channel profile used to create the channel creation transaction only needs to contain the additional configuration information required to create an application channel. - -You can visit the [Using configtx.yaml to create a channel genesis block](create_channel_config.html) tutorial to learn more about this file. For now, we will return to the operational aspects of creating the channel, though we will reference parts of this file in future steps. - -## Start the network - -We will use a running instance of the Fabric test network to create the new channel. For the sake of this tutorial, we want to operate from a known initial state. The following command will kill any active containers and remove any previously generated artifacts. Make sure that you are still operating from the `test-network` directory of your local clone of `fabric-samples`. -``` -./network.sh down -``` -You can then use the following command to start the test network: -``` -./network.sh up -``` -This command will create a Fabric network with the two peer organizations and the single ordering organization defined in the `configtx.yaml` file. The peer organizations will operate one peer each, while the ordering service administrator will operate a single ordering node. When you run the command, the script will print out logs of the nodes being created: -``` -Creating network "net_test" with the default driver -Creating volume "net_orderer.example.com" with default driver -Creating volume "net_peer0.org1.example.com" with default driver -Creating volume "net_peer0.org2.example.com" with default driver -Creating orderer.example.com ... done -Creating peer0.org2.example.com ... done -Creating peer0.org1.example.com ... done -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -8d0c74b9d6af hyperledger/fabric-orderer:latest "orderer" 4 seconds ago Up Less than a second 0.0.0.0:7050->7050/tcp orderer.example.com -ea1cf82b5b99 hyperledger/fabric-peer:latest "peer node start" 4 seconds ago Up Less than a second 0.0.0.0:7051->7051/tcp peer0.org1.example.com -cd8d9b23cb56 hyperledger/fabric-peer:latest "peer node start" 4 seconds ago Up 1 second 7051/tcp, 0.0.0.0:9051->9051/tcp peer0.org2.example.com -``` - -Our instance of the test network was deployed without creating an application channel. However, the test network script creates the system channel when you issue the `./network.sh up` command. Under the covers, the script uses the `configtxgen` tool and the `configtx.yaml` file to build the genesis block of the system channel. Because the system channel is used to create other channels, we need to take some time to understand the orderer system channel before we can create an application channel. - -## The orderer system channel - -The first channel that is created in a Fabric network is the system channel. The system channel defines the set of ordering nodes that form the ordering service and the set of organizations that serve as ordering service administrators. - -The system channel also includes the organizations that are members of blockchain [consortium](../glossary.html#consortium). -The consortium is a set of peer organizations that belong to the system channel, but are not administrators of the ordering service. Consortium members have the ability to create new channels and include other consortium organizations as channel members. - -The genesis block of the system channel is required to deploy a new ordering service. The test network script already created the system channel genesis block when you issued the `./network.sh up` command. The genesis block was used to deploy the single ordering node, which used the block to create the system channel and form the ordering service of the network. If you examine the output of the `./network.sh` script, you can find the command that created the genesis block in your logs: -``` -configtxgen -profile TwoOrgsOrdererGenesis -channelID system-channel -outputBlock ./system-genesis-block/genesis.block -``` - -The `configtxgen` tool used the `TwoOrgsOrdererGenesis` channel profile from `configtx.yaml` to write the genesis block and store it in the `system-genesis-block` folder. You can see the `TwoOrgsOrdererGenesis` profile below: -```yaml -TwoOrgsOrdererGenesis: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults - Organizations: - - *OrdererOrg - Capabilities: - <<: *OrdererCapabilities - Consortiums: - SampleConsortium: - Organizations: - - *Org1 - - *Org2 -``` - -The `Orderer:` section of the profile creates the single node Raft ordering service used by the test network, with the `OrdererOrg` as the ordering service administrator. The `Consortiums` section of the profile creates a consortium of peer organizations named `SampleConsortium:`. Both peer organizations, Org1 and Org2, are members of the consortium. As a result, we can include both organizations in new channels created by the test network. If we wanted to add another organization as a channel member without adding that organization to the consortium, we would first need to create the channel with Org1 and Org2, and then add the other organization by [updating the channel configuration](../channel_update_tutorial.html). - -## Creating an application channel - -Now that we have deployed the nodes of the network and created the orderer system channel using the `network.sh` script, we can start the process of creating a new channel for our peer organizations. We have already set the environment variables that are required to use the `configtxgen` tool. Run the following command to create a channel creation transaction for `channel1`: -``` -configtxgen -profile TwoOrgsChannel -outputCreateChannelTx ./channel-artifacts/channel1.tx -channelID channel1 -``` - -The `-channelID` will be the name of the future channel. Channel names must be all lower case, less than 250 characters long and match the regular expression ``[a-z][a-z0-9.-]*``. The command uses the uses the `-profile` flag to reference the `TwoOrgsChannel:` profile from `configtx.yaml` that is used by the test network to create application channels: -```yaml -TwoOrgsChannel: - Consortium: SampleConsortium - <<: *ChannelDefaults - Application: - <<: *ApplicationDefaults - Organizations: - - *Org1 - - *Org2 - Capabilities: - <<: *ApplicationCapabilities -``` - -The profile references the name of the `SampleConsortium` from the system channel, and includes both peer organizations from the consortium as channel members. Because the system channel is used as a template to create the application channel, the ordering nodes defined in the system channel become the default [consenter set](../glossary.html#consenter-set) of the new channel, while the administrators of the ordering service become the orderer administrators of the channel. Ordering nodes and ordering organizations can be added or removed from the consenter set using channel updates. - -If the command successful, you will see logs of `configtxgen` loading the `configtx.yaml` file and printing a channel creation transaction: -``` -2020-03-11 16:37:12.695 EDT [common.tools.configtxgen] main -> INFO 001 Loading configuration -2020-03-11 16:37:12.738 EDT [common.tools.configtxgen.localconfig] Load -> INFO 002 Loaded configuration: /Usrs/fabric-samples/test-network/configtx/configtx.yaml -2020-03-11 16:37:12.740 EDT [common.tools.configtxgen] doOutputChannelCreateTx -> INFO 003 Generating new channel configtx -2020-03-11 16:37:12.789 EDT [common.tools.configtxgen] doOutputChannelCreateTx -> INFO 004 Writing new channel tx -``` - -We can use the `peer` CLI to submit the channel creation transaction to the ordering service. To use the `peer` CLI, we need to set the `FABRIC_CFG_PATH` to the `core.yaml` file located in the `fabric-samples/config` directory. Set the `FABRIC_CFG_PATH` environment variable by running the following command: -``` -export FABRIC_CFG_PATH=$PWD/../config/ -``` - -Before the ordering service creates the channel, the ordering service will check the permission of the identity that submitted the request. By default, only admin identities of organizations that belong to the system channel consortium can create a new channel. Issue the commands below to operate the `peer` CLI as the admin user from Org1: -``` -export CORE_PEER_TLS_ENABLED=true -export CORE_PEER_LOCALMSPID="Org1MSP" -export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt -export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp -export CORE_PEER_ADDRESS=localhost:7051 -``` - -You can now create the channel by using the following command: -``` -peer channel create -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 -f ./channel-artifacts/channel1.tx --outputBlock ./channel-artifacts/channel1.block --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` - -The command above provides the path to the channel creation transaction file using the `-f` flag and uses the `-c` flag to specify the channel name. The `-o` flag is used to select the ordering node that will be used to create the channel. The `--cafile` is the path to the TLS certificate of the ordering node. When you run the `peer channel create` command, the `peer` CLI will generate the following response: -``` -2020-03-06 17:33:49.322 EST [channelCmd] InitCmdFactory -> INFO 00b Endorser and orderer connections initialized -2020-03-06 17:33:49.550 EST [cli.common] readBlock -> INFO 00c Received block: 0 -``` -Because we are using a Raft ordering service, you may get some status unavailable messages that you can safely ignore. The command will return the genesis block of the new channel to the location specified by the `--outputBlock` flag. - -## Join peers to the channel - -After the channel has been created, we can join the channel with our peers. Organizations that are members of the channel can fetch the channel genesis block from the ordering service using the [peer channel fetch](../commands/peerchannel.html#peer-channel-fetch) command. The organization can then use the genesis block to join the peer to the channel using the [peer channel join](../commands/peerchannel.html#peer-channel-join) command. Once the peer is joined to the channel, the peer will build the blockchain ledger by retrieving the other blocks on the channel from the ordering service. - -Since we are already operating the `peer` CLI as the Org1 admin, let's join the Org1 peer to the channel. Since Org1 submitted the channel creation transaction, we already have the channel genesis block on our file system. Join the Org1 peer to the channel using the command below. -``` -peer channel join -b ./channel-artifacts/channel1.block -``` - -The `CORE_PEER_ADDRESS` environment variable has been set to target ``peer0.org1.example.com``. A successful command will generate a response from ``peer0.org1.example.com`` joining the channel: -``` -2020-03-06 17:49:09.903 EST [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized -2020-03-06 17:49:10.060 EST [channelCmd] executeJoin -> INFO 002 Successfully submitted proposal to join channel -``` - -You can verify that the peer has joined the channel using the [peer channel getinfo](../commands/peerchannel.html#peer-channel-getinfo) command: -``` -peer channel getinfo -c channel1 -``` -The command will list the block height of the channel and the hash of the most recent block. Because the genesis block is the only block on the channel, the height of the channel will be 1: -``` -2020-03-13 10:50:06.978 EDT [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized -Blockchain info: {"height":1,"currentBlockHash":"kvtQYYEL2tz0kDCNttPFNC4e6HVUFOGMTIDxZ+DeNQM="} -``` - -We can now join the Org2 peer to the channel. Set the following environment variables to operate the `peer` CLI as the Org2 admin. The environment variables will also set the Org2 peer, ``peer0.org1.example.com``, as the target peer. -``` -export CORE_PEER_TLS_ENABLED=true -export CORE_PEER_LOCALMSPID="Org2MSP" -export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt -export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp -export CORE_PEER_ADDRESS=localhost:9051 -``` - -While we still have the channel genesis block on our file system, in a more realistic scenario, Org2 would have the fetch the block from the ordering service. As an example, we will use the `peer channel fetch` command to get the genesis block for Org2: -``` -peer channel fetch 0 ./channel-artifacts/channel_org2.block -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` - -The command uses `0` to specify that it needs to fetch the genesis block that is required to join the channel. If the command is successful, you should see the following output: -``` -2020-03-13 11:32:06.309 EDT [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized -2020-03-13 11:32:06.336 EDT [cli.common] readBlock -> INFO 002 Received block: 0 -``` - -The command returns the channel genesis block and names it `channel_org2.block` to distinguish it from the block pulled by org1. You can now use the block to join the Org2 peer to the channel: -``` -peer channel join -b ./channel-artifacts/channel_org2.block -``` - -## Set anchor peers - -After an organizations has joined their peers to the channel, they should select at least one of their peers to become an anchor peer. [Anchor peers](../gossip.html#anchor-peers) are required in order to take advantage of features such as private data and service discovery. Each organization should set multiple anchor peers on a channel for redundancy. For more information about gossip and anchor peers, see the [Gossip data dissemination protocol](../gossip.html). - -The endpoint information of the anchor peers of each organization is included in the channel configuration. Each channel member can specify their anchor peers by updating the channel. We will use the [configtxlator](../commands/configtxlator.html) tool to update the channel configuration and select an anchor peer for Org1 and Org2. The process for setting an anchor peer is similar to the steps that are required to make other channel updates and provides an introduction to how to use `configtxlator` to [update a channel configuration](../config_update.html). You will also need to install the [jq tool](https://stedolan.github.io/jq/) on your local machine. - -We will start by selecting an anchor peer as Org1. The first step is to pull the most recent channel configuration block using the `peer channel fetch` command. Set the following environment variables to operate the `peer` CLI as the Org1 admin: -``` -export FABRIC_CFG_PATH=$PWD/../config/ -export CORE_PEER_TLS_ENABLED=true -export CORE_PEER_LOCALMSPID="Org1MSP" -export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt -export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp -export CORE_PEER_ADDRESS=localhost:7051 -``` - -You can use the following command to fetch the channel configuration: -``` -peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` -Because the most recent channel configuration block is the channel genesis block, you will see the command return block 0 from the channel. -``` -2020-04-15 20:41:56.595 EDT [channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized -2020-04-15 20:41:56.603 EDT [cli.common] readBlock -> INFO 002 Received block: 0 -2020-04-15 20:41:56.603 EDT [channelCmd] fetch -> INFO 003 Retrieving last config block: 0 -2020-04-15 20:41:56.608 EDT [cli.common] readBlock -> INFO 004 Received block: 0 -``` - -The channel configuration block was stored in the `channel-artifacts` folder to keep the update process separate from other artifacts. Change into the `channel-artifacts` folder to complete the next steps: -``` -cd channel-artifacts -``` -We can now start using the `configtxlator` tool to start working with the channel configuration. The first step is to decode the block from protobuf into a JSON object that can be read and edited. We also strip away the unnecessary block data, leaving only the channel configuration. - -``` -configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json -jq .data.data[0].payload.data.config config_block.json > config.json -``` - -These commands convert the channel configuration block into a streamlined JSON, `config.json`, that will serve as the baseline for our update. Because we don't want to edit this file directly, we will make a copy that we can edit. We will use the original channel config in a future step. -``` -cp config.json config_copy.json -``` - -You can use the `jq` tool to add the Org1 anchor peer to the channel configuration. -``` -jq '.channel_group.groups.Application.groups.Org1MSP.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "peer0.org1.example.com","port": 7051}]},"version": "0"}}' config_copy.json > modified_config.json -``` - -After this step, we have an updated version of channel configuration in JSON format in the `modified_config.json` file. We can now convert both the original and modified channel configurations back into protobuf format and calculate the difference between them. -``` -configtxlator proto_encode --input config.json --type common.Config --output config.pb -configtxlator proto_encode --input modified_config.json --type common.Config --output modified_config.pb -configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_config.pb --output config_update.pb -``` - -The new protobuf named `channel_update.pb` contains the anchor peer update that we need to apply to the channel configuration. We can wrap the configuration update in a transaction envelope to create the channel configuration update transaction. - -``` -configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json -echo '{"payload":{"header":{"channel_header":{"channel_id":"channel1", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . > config_update_in_envelope.json -configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output config_update_in_envelope.pb -``` - -We can now use the final artifact, `config_update_in_envelope.pb`, that can be used to update the channel. Navigate back to the `test-network` directory: -``` -cd .. -``` - -We can add the anchor peer by providing the new channel configuration to the `peer channel update` command. Because we are updating a section of the channel configuration that only affects Org1, other channel members do not need to approve the channel update. -``` -peer channel update -f channel-artifacts/config_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` - -When the channel update is successful, you should see the following response: -``` -2020-01-09 21:30:45.791 UTC [channelCmd] update -> INFO 002 Successfully submitted channel update -``` - -We can set the anchor peers for Org2. Because we are going through the process a second time, we will go through the steps more quickly. Set the environment variables to operate the `peer` CLI as the Org2 admin: -``` -export CORE_PEER_TLS_ENABLED=true -export CORE_PEER_LOCALMSPID="Org2MSP" -export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt -export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp -export CORE_PEER_ADDRESS=localhost:9051 -``` - -Pull the latest channel configuration block, which is now the second block on the channel: -``` -peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` - -Navigate back to the `channel-artifacts` directory: -``` -cd channel-artifacts -``` - -You can then decode and copy the configuration block. -``` -configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json -jq .data.data[0].payload.data.config config_block.json > config.json -cp config.json config_copy.json -``` - -Add the Org2 peer that is joined to the channel as the anchor peer in the channel configuration: -``` -jq '.channel_group.groups.Application.groups.Org2MSP.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "peer0.org2.example.com","port": 9051}]},"version": "0"}}' config_copy.json > modified_config.json -``` - -We can now convert both the original and updated channel configurations back into protobuf format and calculate the difference between them. -``` -configtxlator proto_encode --input config.json --type common.Config --output config.pb -configtxlator proto_encode --input modified_config.json --type common.Config --output modified_config.pb -configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_config.pb --output config_update.pb -``` - -Wrap the configuration update in a transaction envelope to create the channel configuration update transaction: -``` -configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json -echo '{"payload":{"header":{"channel_header":{"channel_id":"channel1", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . > config_update_in_envelope.json -configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output config_update_in_envelope.pb -``` - -Navigate back to the `test-network` directory. -``` -cd .. -``` - -Update the channel and set the Org2 anchor peer by issuing the following command: -``` -peer channel update -f channel-artifacts/config_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -``` - -You can confirm that the channel has been updated successfully by running the `peer channel info` command: -``` -peer channel getinfo -c channel1 -``` -Now that the channel has been updated by adding two channel configuration blocks to the channel genesis block, the height of the channel will have grown to three: -``` -Blockchain info: {"height":3,"currentBlockHash":"eBpwWKTNUgnXGpaY2ojF4xeP3bWdjlPHuxiPCTIMxTk=","previousBlockHash":"DpJ8Yvkg79XHXNfdgneDb0jjQlXLb/wxuNypbfHMjas="} -``` - -## Deploy a chaincode to the new channel - -We can confirm that the channel was created successfully by deploying a chaincode to the channel. We can use the `network.sh` script to deploy the Basic asset transfer chaincode to any test network channel. Deploy a chaincode to our new channel using the following command: -``` -./network.sh deployCC --ccn basic -c channel1 --cci InitLedger -``` - -After you run the command, you should see the chaincode being deployed to the channel in your logs. The chaincode is invoked to add data to the channel ledger. - -``` -2020-08-18 09:23:53.741 EDT [chaincodeCmd] chaincodeInvokeOrQuery -> INFO 001 Chaincode invoke successful. result: status:200 -===================== Invoke transaction successful on peer0.org1 peer0.org2 on channel 'channel1' ===================== -``` - -We can confirm the data was added with the below query. - -``` -peer chaincode query -C channel1 -n basic -c '{"Args":["getAllAssets"]}' -``` - -After you run the query, you should see the assets that were added to the channel ledger. -``` -[{"ID":"asset1","color":"blue","size":5,"owner":"Tomoko","appraisedValue":300}, -{"ID":"asset2","color":"red","size":5,"owner":"Brad","appraisedValue":400}, -{"ID":"asset3","color":"green","size":10,"owner":"Jin Soo","appraisedValue":500}, -{"ID":"asset4","color":"yellow","size":10,"owner":"Max","appraisedValue":600}, -{"ID":"asset5","color":"black","size":15,"owner":"Adriana","appraisedValue":700}, -{"ID":"asset6","color":"white","size":15,"owner":"Michel","appraisedValue":800}] -``` - - diff --git a/docs/source/create_channel/create_channel_config.md b/docs/source/create_channel/create_channel_config.md index 16428b2dd30..9423cc7adf2 100644 --- a/docs/source/create_channel/create_channel_config.md +++ b/docs/source/create_channel/create_channel_config.md @@ -17,7 +17,7 @@ You can use this tutorial to learn how to use the `configtx.yaml` file to build Because different sections of the file work together to create the policies that govern the channel, we will discuss channel policies in [their own tutorial](channel_policies.html). -Building off of the [Creating a channel tutorial](create_channel.html), we will use the `configtx.yaml` file that is used to deploy the Fabric test network as an example. Open a command terminal on your local machine and navigate to the `test-network` directory in your local clone of the Fabric samples: +Building off of the [Creating a channel tutorial](create_channel_participation.html), we will use the `configtx.yaml` file that is used to deploy the Fabric test network as an example. Open a command terminal on your local machine and navigate to the `test-network` directory in your local clone of the Fabric samples: ``` cd fabric-samples/test-network ``` @@ -58,14 +58,6 @@ You can see the part of `configtx.yaml` that defines Org1 of the test network be Endorsement: Type: Signature Rule: "OR('Org1MSP.peer')" - - # leave this flag set to true. - AnchorPeers: - # AnchorPeers defines the location of peers which can be used - # for cross org gossip communication. Note, this value is only - # encoded in the genesis block in the Application section context - - Host: peer0.org1.example.com - Port: 7051 ``` - The `Name` field is an informal name used to identify the organization. @@ -82,8 +74,6 @@ You can see the part of `configtx.yaml` that defines Org1 of the test network be - The `Policies` section is used to define a set of signature policies that reference the channel member. We will discuss these policies in more detail when we discuss [channel policies](channel_policies.html). - - The `AnchorPeers` field lists the anchor peers for an organization. Anchor peers are required in order to take advantage of features such as private data and service discovery. It is recommended that organizations select at least one anchor peer. While an organization can select their anchor peers on the channel for the first time using the `configtxgen` tool, it is recommended that each organization set anchor peers by using the `configtxlator` tool to [update the channel configuration](create_channel.html#set-anchor-peers). As a result, this field is not required. - ## Capabilities Fabric channels can be joined by orderer and peer nodes that are running different versions of Hyperledger Fabric. Channel capabilities allow organizations that are running different Fabric binaries to participate on the same channel by only enabling certain features. For example, organizations that are running Fabric v1.4 and organizations that are running Fabric v2.x can join the same channel as long as the channel capabilities levels are set to V1_4_X or below. None of the channel members will be able to use the features introduced in Fabric v2.0. @@ -192,7 +182,7 @@ The `TwoOrgsChannel` provides the name of the consortium, `SampleConsortium`, ho ### SampleAppChannelEtcdRaft -The `SampleAppChannelEtcdRaft` profile is provided for customers that prefer to create a channel without a system channel by using the `osnadmin CLI`. The major difference is that a consortium definition is no longer required. Check out the [Create a channel without a system channel](create_channel_participation.html) tutorial to learn more about how to use this profile. +The `SampleAppChannelEtcdRaft` profile is provided for customers that prefer to create a channel without a system channel by using the `osnadmin CLI`. The major difference is that a consortium definition is no longer required. Check out the [Create a channel](create_channel_participation.html) tutorial to learn more about how to use this profile. ``` SampleAppChannelEtcdRaft: diff --git a/docs/source/create_channel/create_channel_overview.rst b/docs/source/create_channel/create_channel_overview.rst index 049598e2fdc..cfeffc0bc5b 100644 --- a/docs/source/create_channel/create_channel_overview.rst +++ b/docs/source/create_channel/create_channel_overview.rst @@ -14,11 +14,14 @@ Because of the fundamental role that channels play in the operation and governan of Fabric, we provide a series of tutorials that cover different aspects of how channels are created. Fabric v2.3 introduces the capability to create a channel without requiring a system channel, removing an extra -layer of administration from the process. The **Create a channel without a system channel** -tutorial introduces the new flow. The legacy process for creating a channel continues to be -supported and is described in the **Create a channel with a system channel (legacy)** tutorial. -Both tutorials describe the operational steps that need to be taken -by a network administrator. The :doc:`create_channel_config` tutorial +layer of administration from the process. The **Create a channel** +tutorial introduces the new flow. If you don't yet have a network and prefer to use the +test network, see **Create a channel using the test network**. +The legacy process for creating a channel based on a system channel continues to be +supported and is described in the Fabric v2.2 +`Create a channel tutorial `_. +Each tutorial describes the operational steps that need to be taken +by a network administrator to create a channel. For a deeper dive, the :doc:`create_channel_config` tutorial introduces the conceptual aspects of creating a channel, followed by a separate discussion of :doc:`channel_policies`. @@ -27,7 +30,7 @@ separate discussion of :doc:`channel_policies`. :maxdepth: 1 create_channel_participation.md - create_channel.md + create_channel_test_net.md create_channel_config.md channel_policies.md diff --git a/docs/source/create_channel/create_channel_participation.md b/docs/source/create_channel/create_channel_participation.md index c1983f4954e..ec13ba11abf 100644 --- a/docs/source/create_channel/create_channel_participation.md +++ b/docs/source/create_channel/create_channel_participation.md @@ -1,8 +1,8 @@ -# Create a channel without a system channel +# Create a channel To simplify the channel creation process and enhance the privacy and scalability of channels, it is now possible to create application channels (where transactions involving assets happen) without first creating a “system channel” managed by the ordering service. Use this tutorial to learn how to create new channels without a system channel by using the `configtxgen` tool to create a genesis block and the `osnadmin CLI` (which runs against a REST API exposed by each ordering service node) to join ordering nodes to a channel. This process allows ordering nodes to join (or leave) any number of channels as needed, similar to how peers can participate in multiple channels. -**How this process differs from the legacy process:** +**How this process differs from the legacy Fabric v2.2 process:** * **System channel no longer required**: Besides the creation of the system channel representing an extra step (as compared to the new process), this system channel created an extra layer of administration that, for some use cases, provided no tangible benefit. * **Consortium no longer required**: You no longer need to define the set of organizations, known as the “consortium”, who are permitted to create channels on a particular ordering service. With this new process, all channels are application channels, so the concept of a list of organizations who can create channels no longer applies. Any set of organizations can get together and create a channel using a defined set of ordering nodes (which become the ordering service of that channel). @@ -29,6 +29,8 @@ While creating the channel, this tutorial will take you through the following st - [Step three: Join additional ordering nodes](#step-three-join-additional-ordering-nodes) - [Next steps](#next-steps) +**Note**: If you prefer to learn how to create a channel with the test network instead, check out the [Create a channel using the test network](create_channel_test_net.html) tutorial. + ## Folder structure This tutorial uses the following folder structure for the generated orderer organization MSP and orderer certificates, and while it is not mandatory, it is useful when referring to the certificates referenced by the commands. @@ -152,12 +154,12 @@ Before you can take advantage of this feature on a deployed ordering service, yo Now you can run `osnadmin channel remove` to remove the system channel from the node configuration: ``` - osnadmin channel remove -o [ORDERER_ADMIN_LISTENADDRESS] --channel-id syschannel --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY + osnadmin channel remove -o [ORDERER_ADMIN_LISTENADDRESS] --channelID syschannel --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` For example: ``` - osnadmin channel remove -o HOST1:7081 --channel-id syschannel --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY + osnadmin channel remove -o HOST1:7081 --channelID syschannel --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` When successful you see: ``` @@ -200,7 +202,7 @@ Because this tutorial demonstrates the process for creating a channel with **thr #### Configure the `orderer.yaml` file for each orderer -Follow the instructions in the [ordering service deployment guide](../deployorderer/ordererdeploy.html) to build an ordering service with three ordering nodes. However, because the system channel is no longer required when you start an orderer, you can skip the entire process of [generating the genesis block](../deployorderer/ordererdeploy.html#creating-the-genesis-block) in those instructions. In addition, when you configure the `orderer.yaml` file for each orderer, there are a few other modifications you need to make to leverage this feature. You can refer to the orderer [sampleconfig](https://github.com/hyperledger/fabric/blob/{BRANCH}/sampleconfig/orderer.yaml) for more information about these parameters. +Follow the instructions in the [ordering service deployment guide](../deployorderer/ordererdeploy.html) to build an ordering service with three ordering nodes. Note that when you configure the `orderer.yaml` file for each orderer, you will need to make modifications to the [`ChannelParticipation`](../deployorderer/ordererchecklist.html#channelparticipation) and [`General.BoostrapMethod`](../deployorderer/ordererchecklist.html#general-bootstrapmethod) parameters to leverage this feature. - `General.BootstrapMethod` - Set this value to `none`. Because the system channel is no longer required, the `orderer.yaml` file on each orderer needs to be configured with `BootstrapMethod: none` which means that no bootstrap block is required or used to start up the orderer. - `Admin.ListenAddress` - The orderer admin server address (host and port) that can be used by the `osnadmin` command to configure channels on the ordering service. This value should be a unique `host:port` combination to avoid conflicts. @@ -257,7 +259,7 @@ Before using `configtxgen`, confirm you have to set the `FABRIC_CFG_PATH` enviro export FABRIC_CFG_PATH=../config ``` - You can check that you can are able to use the tool by printing the `configtxgen` help text: +You can check that you are able to use the tool by printing the `configtxgen` help text: ``` configtxgen --help @@ -358,7 +360,7 @@ An unlimited number of profiles can be listed in the `Profiles` section accordin After you have completed editing the `configtx.yaml`, you can use it to create a new channel for the peer organizations. Every channel configuration starts with a genesis block. Because we previously set the environment variables for the `configtxgen` tool, you can run the following command to build the genesis block for `channel1` using the `SampleAppChannelEtcdRaft` profile: ``` -configtxgen -profile SampleAppGenesisEtcdRaft -outputBlock ./channel-artifacts/channel1.tx -channelID channel1 +configtxgen -profile SampleAppGenesisEtcdRaft -outputBlock genesis_block.pb -channelID channel1 ``` Where: @@ -388,7 +390,7 @@ export OSN_TLS_CA_ROOT_CERT=../config/organizations/ordererOrganizations/orderer export ADMIN_TLS_SIGN_CERT=../config/admin-client/client-tls-cert.pem export ADMIN_TLS_PRIVATE_KEY=../config/admin-client/client-tls-key.pem -osnadmin channel join –-channel-id [CHANNEL_NAME] --config-block [CHANNEL_CONFIG_BLOCK] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY +osnadmin channel join --channelID [CHANNEL_NAME] --config-block [CHANNEL_CONFIG_BLOCK] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` Replace: @@ -401,7 +403,7 @@ Replace: For example: ``` -osnadmin channel join –-channel-id channel1 --config-block genesis_block.pb -o OSN1.example.com:7050 --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY +osnadmin channel join --channelID channel1 --config-block genesis_block.pb -o OSN1.example.com:7050 --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` **Note:** Because the connection between the `osnadmin` CLI and the orderer requires mutual TLS, you need to pass the `--client-cert` and `--client-key` parameters on each `osadmin` command. The `--client-cert` parameter points to the admin client certificate and `--client-key` refers to the admin client private key, both issued by the admin client TLS CA. @@ -429,15 +431,15 @@ INFO 089 Start accepting requests as Raft leader at block [0] channel=channel1 n After the first orderer is added to the channel, subsequent nodes can join from either the genesis block or from the latest config block. When an orderer joins from a config block, its status is always "onboarding" while its ledger catches up to the config block that was specified in the join command, after which the status is automatically updated to "active". -Use the `osnadmin channel list` command with the `--channel-id` flag to view the detailed `status` and `consensusRelation` of any **channel** on any ordering node: +Use the `osnadmin channel list` command with the `--channelID` flag to view the detailed `status` and `consensusRelation` of any **channel** on any ordering node: ``` -osnadmin channel list –-channel-id [CHANNEL_NAME] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY +osnadmin channel list --channelID [CHANNEL_NAME] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` For example: ``` -osnadmin channel list –-channel-id channel1 -o HOST2:7081 --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY +osnadmin channel list --channelID channel1 -o HOST2:7081 --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` Replace: @@ -466,7 +468,7 @@ _You used the configtxgen command to create the channel genesis block and provid Assuming you have successfully run the `osnadmin channel join` on all three ordering nodes, you now have an active channel and the ordering service is ready to order transactions into blocks. Peers can join the channel and clients can begin to transact. -If you want to join additional ordering nodes to the consenter set of the channel, following the instructions in the next section. +If you want to join additional ordering nodes to the consenter set of the channel, follow the instructions in the next section. ## Step three: Join additional ordering nodes @@ -479,7 +481,7 @@ To simplify the tutorial, we assume this additional orderer is part of the same For this tutorial, the new orderer is not part of the consenter set. Run the following command to join the new orderer to the channel: ``` -osnadmin channel join –-channel-id [CHANNEL_NAME] --config-block [CHANNEL_CONFIG_BLOCK] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY +osnadmin channel join --channelID [CHANNEL_NAME] --config-block [CHANNEL_CONFIG_BLOCK] -o [ORDERER_ADMIN_LISTENADDRESS] --ca-file $OSN_TLS_CA_ROOT_CERT --client-cert $ADMIN_TLS_SIGN_CERT --client-key $ADMIN_TLS_PRIVATE_KEY ``` An orderer can join the channel by providing the genesis block, or the latest config block. But the value of `consensusRelation` will always be "follower" until this orderer is added to the channel's consenter set, by submitting an update to the channel configuration. @@ -541,7 +543,7 @@ After the channel has been created, you can follow the normal process to join pe ### Add or remove orderers from existing channels -You can continue to use the `osnadmin channel join` and `osnadmin channel remove` commands to add and remove orderers on each channel according to your business needs. Be aware that before you remove a channel from an orderer, it is recommended that you first remove the orderer from the channel's consenter set by submitting an channel update request. +You can continue to use the `osnadmin channel join` and `osnadmin channel remove` commands to add and remove orderers on each channel according to your business needs. Be aware that before you remove a channel from an orderer, it is recommended that you first remove the orderer from the channel's consenter set by submitting a channel update request. diff --git a/docs/source/create_channel/create_channel_test_net.md b/docs/source/create_channel/create_channel_test_net.md new file mode 100644 index 00000000000..8fa962140bc --- /dev/null +++ b/docs/source/create_channel/create_channel_test_net.md @@ -0,0 +1,448 @@ +# Create a channel using the test network + +Use this tutorial along with the test network to learn how to create a channel genesis block and then create a new application channel that the test network peers can join. Rather than requiring you to set up an orderer, or remove the system channel from an existing orderer, this tutorial leverages the nodes from the Fabric sample test network. Because the test network deploys an ordering service and peers for you, this tutorial focuses solely on the process to create a channel. It is worth noting that the test network includes a `createChannel` subcommand that can be used to create a channel, but this tutorial explains how do it manually, the process that is required when you do not use the test network. + +Fabric v2.3 introduces the capability to create a channel without requiring a system channel, removing an extra layer of administration from the process. In this tutorial, we use the [configtxgen](../commands/configtxgen.html) tool to create a channel genesis block and then use the [osnadmin channel](../commands/osnadminchannel.html) command to create the channel. + +**Note:** +- If you are _not_ using the test network, you should follow the instructions for [how to deploy an ordering service without a system channel](create_channel_participation.html#deploy-a-new-set-of-orderers). In the Fabric v2.3 test network sample, the single-node ordering service is deployed without a system channel. +- If you prefer to learn how to create a channel on an ordering service that includes the system channel, you should refer to the [Create a channel tutorial](https://hyperledger-fabric.readthedocs.io/en/release-2.2/create_channel/create_channel.html) from Fabric v2.2. In the Fabric v2.2 test network sample, the single-node ordering service is deployed with a system channel. + +To create a channel using the test network, this tutorial takes you through the following steps and concepts: +- [Prerequisites](#prerequisites) +- [Step one: Generate the genesis block of the channel](#step-one-generate-the-genesis-block-of-the-channel) +- [Step two: Create the application channel](#step-two-create-the-application-channel) +- [Next steps](#next-steps) + +## Before you begin + +To run the test network, you need to clone the `fabric-samples` +repository and download the latest production Fabric images. Make sure that you have installed +the [Prerequisites](../prereqs.html) and [Installed the Samples, Binaries, and Docker Images](../install.html). + +**Note:** After you create a channel and join peers to it, you will need to you add anchor peers to the channel, in order for service discovery and private data to work. Instructions on how to set an anchor peer on your channel are included in this tutorial, but require that the [jq tool](https://stedolan.github.io/jq/) is installed on your local machine. + +## Prerequisites + +### Start the test network + +We will use a running instance of the Fabric test network to create the new channel. Because it's important to operate from a known initial state, the following command destroys any active containers and removes any previously generated artifacts. For the purposes of this tutorial, we operate from the `test-network` directory inside `fabric-samples`. If you are not already there, navigate to that directory using the following command: +``` +cd fabric-samples/test-network +``` +Run the following command to bring down the network: +``` +./network.sh down +``` +You can then use the following command to start the test network: +``` +./network.sh up +``` +This command creates a Fabric network with the two peer organizations and the single ordering node ordering organization. The peer organizations will operate one peer each, while the ordering service administrator will operate a single ordering node. When you run the command, the script prints out the nodes being created: +``` +Creating network "net_test" with the default driver +Creating volume "net_orderer.example.com" with default driver +Creating volume "net_peer0.org1.example.com" with default driver +Creating volume "net_peer0.org2.example.com" with default driver +Creating peer0.org2.example.com ... done +Creating orderer.example.com ... done +Creating peer0.org1.example.com ... done +Creating cli ... done +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1667543b5634 hyperledger/fabric-tools:latest "/bin/bash" 1 second ago Up Less than a second cli +b6b117c81c7f hyperledger/fabric-peer:latest "peer node start" 2 seconds ago Up 1 second 0.0.0.0:7051->7051/tcp peer0.org1.example.com +703ead770e05 hyperledger/fabric-orderer:latest "orderer" 2 seconds ago Up Less than a second 0.0.0.0:7050->7050/tcp, 0.0.0.0:7053->7053/tcp orderer.example.com +718d43f5f312 hyperledger/fabric-peer:latest "peer node start" 2 seconds ago Up 1 second 7051/tcp, 0.0.0.0:9051->9051/tcp peer0.org2.example.com +``` + +Notice that the peers are running on ports `7051` and `9051`, while the orderer is running on port `7050`. We will use these ports in subsequent commands. + +By default, when you start the test network, it does not contain any channels. The following instructions demonstrate how to add a channel that is named `channel1` to this network. + +### Set up the configtxgen tool + +Channels are created by generating a channel creation transaction in a genesis block, and then passing that genesis block to an ordering service node in a join request. The channel creation transaction specifies the initial configuration of the channel and can be created by the [configtxgen](../commands/configtxgen.html) tool. The tool reads the `configtx.yaml` file that defines the configuration of our channel, and then writes the relevant information into the channel creation transaction and outputs a genesis block including the channel creation transaction. When you [installed Fabric](../install.html), the `configtxgen` tool was installed in the `fabric-samples\bin` directory for you. + +Ensure that you are still operating from the `test-network` directory of your local clone of `fabric-samples` and run this command: + +``` +export PATH=${PWD}/../bin:$PATH +``` + +Next, before you can use `configtxgen`, you need to the set the `FABRIC_CFG_PATH` environment variable to the location of the test network folder that contains the `configtx.yaml` file. Because we are using the test network, we reference the `configtx` folder: +``` +export FABRIC_CFG_PATH=${PWD}/configtx +``` + +Now verify that you can use the tool by printing the `configtxgen` help text: +``` +configtxgen --help +``` + +### The configtx.yaml file + +For the test network, the `configtxgen` tool uses the channel profiles that are defined in the `configtxt\configtx.yaml` file to create the channel configuration and write it to the [protobuf format](https://developers.google.com/protocol-buffers) that can be read by Fabric. + +This `configtx.yaml` file contains the following information that we will use to create our new channel: + +- **Organizations:** The peer and ordering organizations that can become members of your channel. Each organization has a reference to the cryptographic material that is used to build the [channel MSP](../membership/membership.html). +- **Ordering service:** Which ordering nodes will form the ordering service of the network, and consensus method they will use to agree to a common order of transactions. This section also defines the ordering nodes that are part of the ordering service consenter set. In the test network sample, there is only a single ordering node, but in a production network we recommend **five** ordering nodes to allow for two ordering nodes to go down and still maintain consensus. + ``` + EtcdRaft: + Consenters: + - Host: orderer.example.com + Port: 7050 + ClientTLSCert: ../organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.crt + ServerTLSCert: ../organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.crt + ``` +- **Channel policies** Different sections of the file work together to define the policies that will govern how organizations interact with the channel and which organizations need to approve channel updates. For the purposes of this tutorial, we will use the default policies used by Fabric. +- **Channel profiles** Each channel profile references information from other sections of the `configtx.yaml` file to build a channel configuration. The profiles are used to create the genesis block of application channel. Notice that the `configtx.yaml` file in the test network includes a single profile named `TwoOrgsApplicationGenesis` that we will use to generate the create channel transaction. + ```yaml + TwoOrgsApplicationGenesis: + <<: *ChannelDefaults + Orderer: + <<: *OrdererDefaults + Organizations: + - *OrdererOrg + Capabilities: + <<: *OrdererCapabilities + Application: + <<: *ApplicationDefaults + Organizations: + - *Org1 + - *Org2 + Capabilities: + <<: *ApplicationCapabilities + ``` + +The profile includes both peer organizations, `Org1` and `Org2` as well as the ordering organization `OrdererOrg`. Additional ordering nodes and ordering organizations can be added or removed from the consenter set at a later time using a channel update transaction. + +Want to learn more about this file and how to build your own channel application profiles? Visit [Using configtx.yaml to create a channel genesis block](create_channel_config.html) tutorial for more details. For now, we will return to the operational aspects of creating the channel, though we will reference parts of this file in future steps. + +## Step one: Generate the genesis block of the channel + +Because we have started the Fabric test network, we are ready to create a new channel. We have already set the environment variables that are required to use the `configtxgen` tool. + +Run the following command to create the channel genesis block for `channel1`: +``` +configtxgen -profile TwoOrgsApplicationGenesis -outputBlock ./channel-artifacts/channel1.block -channelID channel1 +``` + +- **`-profile`**: The command uses the `-profile` flag to reference the `TwoOrgsApplicationGenesis:` profile from `configtx.yaml` that is used by the test network to create application channels. +- **`-outputBlock`**: The output of this command is the channel genesis block that is written to `-outputBlock ./channel-artifacts/channel1.block`. +- **`-channelID`**: The `-channelID` parameter will be the name of the future channel. You can specify any name you want for your channel but for illustration purposes in this tutorial we use `channel1`. Channel names must be all lowercase, fewer than 250 characters long and match the regular expression ``[a-z][a-z0-9.-]*``. + +When the command is successful, you can see the logs of `configtxgen` loading the `configtx.yaml` file and printing a channel creation transaction: +``` +[common.tools.configtxgen] main -> INFO 001 Loading configuration +[common.tools.configtxgen.localconfig] completeInitialization -> INFO 002 orderer type: etcdraft +[common.tools.configtxgen.localconfig] completeInitialization -> INFO 003 Orderer.EtcdRaft.Options unset, setting to tick_interval:"500ms" election_tick:10 heartbeat_tick:1 max_inflight_blocks:5 snapshot_interval_size:16777216 +[common.tools.configtxgen.localconfig] Load -> INFO 004 Loaded configuration: /Users/fabric-samples/test-network/configtx/configtx.yaml +[common.tools.configtxgen] doOutputBlock -> INFO 005 Generating genesis block +[common.tools.configtxgen] doOutputBlock -> INFO 006 Creating application channel genesis block +[common.tools.configtxgen] doOutputBlock -> INFO 007 Writing genesis block +``` + +## Step two: Create the application channel + +Now that we have the channel genesis block, it is easy to use the `osnadmin channel join` command to create the channel. To simplify subsequent commands, we also need to set some environment variables to establish the locations of the certificates for the nodes in the test network: + +``` +export ORDERER_CA=${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem +export ORDERER_ADMIN_TLS_SIGN_CERT=${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.crt +export ORDERER_ADMIN_TLS_PRIVATE_KEY=${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/tls/server.key +``` + +Run the following command to create the channel named `channel1` on the ordering service. +``` +osnadmin channel join --channelID channel1 --config-block ./channel-artifacts/channel1.block -o localhost:7053 --ca-file "$ORDERER_CA" --client-cert "$ORDERER_ADMIN_TLS_SIGN_CERT" --client-key "$ORDERER_ADMIN_TLS_PRIVATE_KEY" +``` + +- **`--channelID`**: Specify the name of the application channel that you provided when you created the channel genesis block. +- **`--config-block`**: Specify the location of the channel genesis block that you created with the `configtxgen` command, or the latest config block. +- **`-o`**: Specify the hostname and port of the orderer admin endpoint. For the test network ordering node this is set to `localhost:7053`. + +In addition, because the `osnadmin channel` commands communicate with the ordering node using mutual TLS, you need to provide the following certificates: +- **`--ca-file`**: Specify the location and file name of the orderer organization TLS CA root certificate. +- **`--client-cert`**: Specify the location and file name of admin client signed certificate from the TLS CA. +- **`--client-key`**: Specify the location and file name of admin client private key from the TLS CA. + +When successful, the output of the command contains the following: +``` +Status: 201 +{ + "name": "channel1", + "url": "/participation/v1/channels/channel1", + "consensusRelation": "consenter", + "status": "active", + "height": 1 +} +``` + +The channel is active and ready for peers to join. + +### Consenter vs. Follower + +Notice the ordering node was joined to the channel with a `consensusRelation: "consenter"`. If you ran the command against an ordering node that is not included in the list of `Consenters:` in the `configtx.yaml` file (or the channel configuration consenter set), it is added to the channel as a `follower`. To learn more about considerations when joining additional ordering nodes see the topic on [Joining additional ordering nodes](create_channel_participation.html#step-three-join-additional-ordering-nodes). + +### Active vs. onboarding + +An orderer can join the channel by providing the channel **genesis block**, or the **latest config block**. If joining from the latest config block, the orderer status is set to `onboarding` until the channel ledger has caught up to the specified config block, when it becomes `active`. At this point, you could then add the orderer to the channel consenter set by submitting a channel update transaction, which will cause the `consensusRelation` to change from `follower` to `consenter`. + +## Next steps + +After you have created the channel, the next steps are to join peers to the channel and deploy smart contracts. This section walks you through those processes using the test network. + +### List channels on an orderer + +Before you join peers to the channel, you might want to try to create additional channels. As you create more channels, the `osnadmin channel list` command is useful to view the channels that this orderer is a member of. The same parameters are used here as in the `osnadmin channel join` command from the previous step: +``` +osnadmin channel list -o localhost:7053 --ca-file "$ORDERER_CA" --client-cert "$ORDERER_ADMIN_TLS_SIGN_CERT" --client-key "$ORDERER_ADMIN_TLS_PRIVATE_KEY" +``` +The output of this command looks similar to: + +``` +Status: 200 +{ + "systemChannel": null, + "channels": [ + { + "name": "channel1", + "url": "/participation/v1/channels/channel1" + } + ] +} +``` + +### Join peers to the channel + +The test network includes two peer organizations each with one peer. But before we can use the peer CLI, we need to set some environment variables to specify which user (client MSP) we are acting as and which peer we are targeting. Set the following environment variables to indicate that we are acting as the Org1 admin and targeting the Org1 peer. + +``` +export CORE_PEER_TLS_ENABLED=true +export CORE_PEER_LOCALMSPID="Org1MSP" +export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt +export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp +export CORE_PEER_ADDRESS=localhost:7051 +``` + +In order to use the peer CLI, we also need to modify the `FABRIC_CONFIG_PATH`: +``` +export FABRIC_CFG_PATH=$PWD/../config/ +``` +To join the test network peer from `Org1` to the channel `channel1` simply pass the genesis block in a join request: +``` +peer channel join -b ./channel-artifacts/channel1.block +``` +When successful, the output of this command contains the following: +``` +[channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized +[channelCmd] executeJoin -> INFO 002 Successfully submitted proposal to join channel +``` + +We repeat these steps for the `Org2` peer. Set the following environment variables to operate the `peer` CLI as the `Org2` admin. The environment variables will also set the `Org2` peer, ``peer0.org2.example.com``, as the target peer. +``` +export CORE_PEER_LOCALMSPID="Org2MSP" +export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt +export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp +export CORE_PEER_ADDRESS=localhost:9051 +``` +Now repeat the command to join the peer from `Org2` to `channel1`: +``` +peer channel join -b ./channel-artifacts/channel1.block +``` +When successful, the output of this command contains the following: +``` +[channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized +[channelCmd] executeJoin -> INFO 002 Successfully submitted proposal to join channel +``` + +## Set anchor peer + +Finally, after an organization has joined their peers to the channel, they should select **at least one** of their peers to become an anchor peer. [Anchor peers](../gossip.html#anchor-peers) are required in order to take advantage of features such as private data and service discovery. Each organization should set multiple anchor peers on a channel for redundancy. For more information about gossip and anchor peers, see the [Gossip data dissemination protocol](../gossip.html). + +The endpoint information of the anchor peers of each organization is included in the channel configuration. Each channel member can specify their anchor peers by updating the channel. We will use the [configtxlator](../commands/configtxlator.html) tool to update the channel configuration and select an anchor peer for `Org1` and `Org2`. + +**Note:** If [jq](https://stedolan.github.io/jq/) is not already installed on your local machine, you need to install it now to complete these steps. + +We will start by selecting the peer from `Org1` to be an anchor peer. The first step is to pull the most recent channel configuration block using the `peer channel fetch` command. Set the following environment variables to operate the `peer` CLI as the `Org1` admin: +``` +export CORE_PEER_LOCALMSPID="Org1MSP" +export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt +export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp +export CORE_PEER_ADDRESS=localhost:7051 +``` + +You can use the following command to fetch the channel configuration: +``` +peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile "$ORDERER_CA" +``` +Because the most recent channel configuration block is the channel genesis block, the command returns block `0` from the channel. +``` +[channelCmd] InitCmdFactory -> INFO 001 Endorser and orderer connections initialized +[cli.common] readBlock -> INFO 002 Received block: 0 +[channelCmd] fetch -> INFO 003 Retrieving last config block: 0 +[cli.common] readBlock -> INFO 004 Received block: 0 +``` + +The channel configuration block `config_block.pb` is stored in the `channel-artifacts` folder to keep the update process separate from other artifacts. Change into the `channel-artifacts` folder to complete the next steps: +``` +cd channel-artifacts +``` +We can now start using the `configtxlator` tool to start working with the channel configuration. The first step is to decode the block from protobuf into a JSON object that can be read and edited. We also strip away the unnecessary block data, leaving only the channel configuration. + +``` +configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json +jq '.data.data[0].payload.data.config' config_block.json > config.json +``` + +These commands convert the channel configuration block into a streamlined JSON, `config.json`, that will serve as the baseline for our update. Because we don't want to edit this file directly, we will make a copy that we can edit. We will use the original channel config in a future step. +``` +cp config.json config_copy.json +``` + +You can use the `jq` tool to add the `Org1` anchor peer to the channel configuration. +``` +jq '.channel_group.groups.Application.groups.Org1MSP.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "peer0.org1.example.com","port": 7051}]},"version": "0"}}' config_copy.json > modified_config.json +``` + +After this step, we have an updated version of channel configuration in JSON format in the `modified_config.json` file. We can now convert both the original and modified channel configurations back into protobuf format and calculate the difference between them. +``` +configtxlator proto_encode --input config.json --type common.Config --output config.pb +configtxlator proto_encode --input modified_config.json --type common.Config --output modified_config.pb +configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_config.pb --output config_update.pb +``` + +The new protobuf named `config_update.pb` contains the anchor peer update that we need to apply to the channel configuration. We can wrap the configuration update in a transaction envelope to create the channel configuration update transaction. + +``` +configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json +echo '{"payload":{"header":{"channel_header":{"channel_id":"channel1", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . > config_update_in_envelope.json +configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output config_update_in_envelope.pb +``` + +We can now use the final artifact, `config_update_in_envelope.pb`, that can be used to update the channel. Navigate back to the `test-network` directory: +``` +cd .. +``` + +We can add the anchor peer by providing the new channel configuration to the `peer channel update` command. Because we are updating a section of the channel configuration that only affects `Org1`, other channel members do not need to approve the channel update. +``` +peer channel update -f channel-artifacts/config_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile "${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem" +``` + +When the channel update is successful, you should see the following response: +``` +[channelCmd] update -> INFO 002 Successfully submitted channel update +``` + +We can also set the peer from `Org2` to be an anchor peer. Because we are going through the process a second time, we will go through the steps more quickly. Set the environment variables to operate the `peer` CLI as the `Org2` admin: +``` +export CORE_PEER_LOCALMSPID="Org2MSP" +export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt +export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp +export CORE_PEER_ADDRESS=localhost:9051 +``` + +Pull the latest channel configuration block, which is now the second block on the channel: +``` +peer channel fetch config channel-artifacts/config_block.pb -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com -c channel1 --tls --cafile "${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem" +``` + +Navigate back to the `channel-artifacts` directory: +``` +cd channel-artifacts +``` + +You can then decode and copy the configuration block. +``` +configtxlator proto_decode --input config_block.pb --type common.Block --output config_block.json +jq '.data.data[0].payload.data.config' config_block.json > config.json +cp config.json config_copy.json +``` + +Add the `Org2` peer that is joined to the channel as the anchor peer in the channel configuration: +``` +jq '.channel_group.groups.Application.groups.Org2MSP.values += {"AnchorPeers":{"mod_policy": "Admins","value":{"anchor_peers": [{"host": "peer0.org2.example.com","port": 9051}]},"version": "0"}}' config_copy.json > modified_config.json +``` + +We can now convert both the original and updated channel configurations back into protobuf format and calculate the difference between them. +``` +configtxlator proto_encode --input config.json --type common.Config --output config.pb +configtxlator proto_encode --input modified_config.json --type common.Config --output modified_config.pb +configtxlator compute_update --channel_id channel1 --original config.pb --updated modified_config.pb --output config_update.pb +``` + +Wrap the configuration update in a transaction envelope to create the channel configuration update transaction: +``` +configtxlator proto_decode --input config_update.pb --type common.ConfigUpdate --output config_update.json +echo '{"payload":{"header":{"channel_header":{"channel_id":"channel1", "type":2}},"data":{"config_update":'$(cat config_update.json)'}}}' | jq . > config_update_in_envelope.json +configtxlator proto_encode --input config_update_in_envelope.json --type common.Envelope --output config_update_in_envelope.pb +``` + +Navigate back to the `test-network` directory. +``` +cd .. +``` + +Update the channel and set the `Org2` anchor peer by issuing the following command: +``` +peer channel update -f channel-artifacts/config_update_in_envelope.pb -c channel1 -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile "${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem" +``` + +If you want to learn more about how to submit a channel update request, see [update a channel configuration](../config_update.html). + +You can confirm that the channel has been updated successfully by running the `peer channel info` command: +``` +peer channel getinfo -c channel1 +``` +Now that the channel has been updated by adding two channel configuration blocks to the channel genesis block, the height of the channel will have grown to three and the hashes are updated: +``` +Blockchain info: {"height":3,"currentBlockHash":"GKqqk/HNi9x/6YPnaIUpMBlb0Ew6ovUnSB5MEF7Y5Pc=","previousBlockHash":"cl4TOQpZ30+d17OF5YOkX/mtMjJpUXiJmtw8+sON8a8="} +``` + +## Deploy a chaincode to the new channel + +We can confirm that the channel was created successfully by deploying a chaincode to the channel. We can use the `network.sh` script to deploy the Basic asset transfer chaincode to any test network channel. Deploy a chaincode to our new channel using the following command: +``` +./network.sh deployCC -ccn basic -ccp ../asset-transfer-basic/chaincode-go/ -ccl go -c channel1 +``` + +After you run the command, you should see the chaincode being deployed to the channel in your logs. + +``` +Committed chaincode definition for chaincode 'basic' on channel 'channel1': +Version: 1.0, Sequence: 1, Endorsement Plugin: escc, Validation Plugin: vscc, Approvals: [Org1MSP: true, Org2MSP: true] +Query chaincode definition successful on peer0.org2 on channel 'channel1' +Chaincode initialization is not required +``` +Then run the following command to initialize some assets on the ledger: +``` +peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile "$ORDERER_CA" -C channel1 -n basic --peerAddresses localhost:7051 --tlsRootCertFiles "${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt" --peerAddresses localhost:9051 --tlsRootCertFiles "${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt" -c '{"function":"InitLedger","Args":[]}' +``` +When successful you will see: +``` +[chaincodeCmd] chaincodeInvokeOrQuery -> INFO 001 Chaincode invoke successful. result: status:200 +``` + +Confirm the assets were added to the ledger by issuing the following query: + +``` +peer chaincode query -C channel1 -n basic -c '{"Args":["getAllAssets"]}' +``` + +You should see output similar to the following: +``` +[{"ID":"asset1","color":"blue","size":5,"owner":"Tomoko","appraisedValue":300}, +{"ID":"asset2","color":"red","size":5,"owner":"Brad","appraisedValue":400}, +{"ID":"asset3","color":"green","size":10,"owner":"Jin Soo","appraisedValue":500}, +{"ID":"asset4","color":"yellow","size":10,"owner":"Max","appraisedValue":600}, +{"ID":"asset5","color":"black","size":15,"owner":"Adriana","appraisedValue":700}, +{"ID":"asset6","color":"white","size":15,"owner":"Michel","appraisedValue":800}] +``` + +### Create a channel without the test network + +This tutorial has taken you through the basic steps to create a channel on the test network by using the `osnadmin channel join` command. When you are ready to build your own network, follow the steps in the [Create a channel](create_channel_participation.html) tutorial to learn more about using the `osnadmin channel` commands. + diff --git a/docs/source/deploy_chaincode.md b/docs/source/deploy_chaincode.md index 0c42dfd318f..3fa132171e5 100644 --- a/docs/source/deploy_chaincode.md +++ b/docs/source/deploy_chaincode.md @@ -11,7 +11,7 @@ You can use this tutorial to learn how to use the [peer lifecycle chaincode comm ## Start the network -We will start by deploying an instance of the Fabric test network. Before you begin, make sure that that you have installed the [Prerequisites](prereqs.html) and [Installed the Samples, Binaries and Docker Images](install.html). Use the following command to navigate to the test network directory within your local clone of the `fabric-samples` repository: +We will start by deploying an instance of the Fabric test network. Before you begin, make sure that you have installed the [Prerequisites](prereqs.html) and [Installed the Samples, Binaries and Docker Images](install.html). Use the following command to navigate to the test network directory within your local clone of the `fabric-samples` repository: ``` cd fabric-samples/test-network ``` @@ -349,11 +349,10 @@ If the command is successful, the peer will generate and return the package iden 2020-07-16 10:09:57.534 CDT [cli.lifecycle.chaincode] submitInstallProposal -> INFO 002 Chaincode code package identifier: basic_1.0:e2db7f693d4aa6156e652741d5606e9c5f0de9ebb88c5721cb8248c3aead8123 ``` -We can now install the chaincode on the Org2 peer. Set the following environment variables to operate as the Org2 admin and target target the Org2 peer, `peer0.org2.example.com`. +We can now install the chaincode on the Org2 peer. Set the following environment variables to operate as the Org2 admin and target the Org2 peer, `peer0.org2.example.com`. ``` export CORE_PEER_LOCALMSPID="Org2MSP" export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt -export CORE_PEER_TLS_ROOTCERT_FILE=${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt export CORE_PEER_MSPCONFIGPATH=${PWD}/organizations/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp export CORE_PEER_ADDRESS=localhost:9051 ``` @@ -369,7 +368,7 @@ The chaincode is built by the peer when the chaincode is installed. The install After you install the chaincode package, you need to approve a chaincode definition for your organization. The definition includes the important parameters of chaincode governance such as the name, version, and the chaincode endorsement policy. -The set of channel members who need to approve a chaincode before it can be deployed is governed by the `Application/Channel/lifeycleEndorsement` policy. By default, this policy requires that a majority of channel members need to approve a chaincode before it can used on a channel. Because we have only two organizations on the channel, and a majority of 2 is 2, we need approve a chaincode definition of asset-transfer (basic) as Org1 and Org2. +The set of channel members who need to approve a chaincode before it can be deployed is governed by the `Application/Channel/lifecycleEndorsement` policy. By default, this policy requires that a majority of channel members need to approve a chaincode before it can be used on a channel. Because we have only two organizations on the channel, and a majority of 2 is 2, we need approve a chaincode definition of asset-transfer (basic) as Org1 and Org2. If an organization has installed the chaincode on their peer, they need to include the packageID in the chaincode definition approved by their organization. The package ID is used to associate the chaincode installed on a peer with an approved chaincode definition, and allows an organization to use the chaincode to endorse transactions. You can find the package ID of a chaincode by using the [peer lifecycle chaincode queryinstalled](commands/peerlifecycle.html#peer-lifecycle-chaincode-queryinstalled) command to query your peer. ``` diff --git a/docs/source/deployment_guide_overview.rst b/docs/source/deployment_guide_overview.rst index 2df0527ba2f..edfff8744c9 100644 --- a/docs/source/deployment_guide_overview.rst +++ b/docs/source/deployment_guide_overview.rst @@ -37,6 +37,9 @@ In addition to the above, here is a sampling of the decisions you will need to m * **Database type.** Some channels in a network might require all data to be modeled in a way :doc:`couchdb_as_state_database` can understand, while other networks, prioritizing speed, might decide that all peers will use LevelDB. Note that channels should not have peers that use both CouchDB and LevelDB on them, as CouchDB imposes some data restrictions on keys and values. Keys and values that are valid in LevelDB may not be valid in CouchDB. +* **Create a system channel or not.** + Ordering nodes can be bootstrapped with a configuration block for an administrative channel known as the “system channel” (from which application channels can be created), or simply started and joined to application channels as needed. The recommended method is to bootstrap without a configuration block, which is the approach this deployment guide assumes you will take. For more information about creating a system channel genesis block and bootstrapping an ordering node with it, check out `Deploying a production network `_ from the Fabric v2.2 documentation. + * **Channels and private data.** Some networks might decide that :doc:`channels` are the best way to ensure privacy and isolation for certain transactions. Others might decide that fewer channels, supplemented where necessary with :doc:`private-data/private-data` collections, better serves their privacy needs. @@ -71,7 +74,7 @@ Managing your infrastructure The exact methods and tools you use to manage your backend will depend on the backend you choose. However, here are some considerations worth noting. -* Using secret objects to securely store important configuration files in your cluster. For information about Kubernetes secrets, check out `Kubernetes secrets `_. You also have the option to use Hardened Security Modules (HSMs) or encrypted Persistent Volumes (PVs). Along similar lines, after deploying Fabric components, you will likely want to connect to a container on your own backend, for example using a private repo in a service like Docker Hub. In that case, you will need to code the login information in the form of a Kubernetes secret and include it in the YAML file when deploying components. +* Using secret objects to securely store important configuration files in your cluster. For information about Kubernetes secrets, check out `Kubernetes secrets `_. You also have the option to use Hardware Security Modules (HSMs) or encrypted Persistent Volumes (PVs). Along similar lines, after deploying Fabric components, you will likely want to connect to a container on your own backend, for example using a private repo in a service like Docker Hub. In that case, you will need to code the login information in the form of a Kubernetes secret and include it in the YAML file when deploying components. * Cluster considerations and node sizing. In step 2 above, we discussed a general outline for how to think about the sizings of nodes. Your use case, as well as a robust period of development, is the only way you will truly know how large your peers, ordering nodes, and CAs will need to be. * How you choose to mount your volumes. It is a best practice to mount the volumes relevant to your nodes external to the place where your nodes are deployed. This will allow you to reference these volumes later on (for example, restarting a node or a container that has crashed) without having to redeploy or regenerate your crypto material. * How you will monitor your resources. It is critical that you establish a strategy and method for monitoring the resources used by your individual nodes and the resources deployed to your cluster generally. As you join your peers to more channels, you will need likely need to increase its CPU and memory allocation. Similarly, you will need to make sure you have enough storage space for your state database and blockchain. @@ -175,7 +178,7 @@ Note: while it is possible to add additional nodes to an ordering service, only If you’ve read through the key concept topic on :doc:`orderer/ordering_service`, you should have a good idea of the role the ordering service plays in a network and the nature of its interactions with other network components. The ordering service is responsible for literally “ordering” endorsed transactions into blocks, which peers then validate and commit to their ledgers. -These roles are important to understand before you create an ordering service, as it will influence your customization and deployment decisions. Among the chief differences between a peer and ordering service is that in a production network, multiple ordering nodes work together to form the “ordering service” of a channel. This creates a series of important decisions that need to be made at both the node level and at the cluster level. Some of these cluster decisions are not made in individual ordering node ``orderer.yaml`` files but instead in the ``configtx.yaml`` file that is used to generate the genesis block for the system channel (which is used to bootstrap ordering nodes), and also used to generate the genesis block of application channels. For a look at the various decisions you will need to make, check out :doc:`deployorderer/ordererplan`. +These roles are important to understand before you create an ordering service, as it will influence your customization and deployment decisions. Among the chief differences between a peer and ordering service is that in a production network, multiple ordering nodes work together to form the “ordering service” of a channel (these nodes are also known as the “consenter set”). This creates a series of important decisions that need to be made at both the node level and at the cluster level. Some of these cluster decisions are not made in individual ordering node ``orderer.yaml`` files but instead in the ``configtx.yaml`` file that is used to generate the genesis block for an application channel. For a look at the various decisions you will need to make, check out :doc:`deployorderer/ordererplan`. The configuration values in an ordering node’s ``orderer.yaml`` file must be customized or overridden with environment variables. You can find the default ``orderer.yaml`` configuration file `in the sampleconfig directory of Hyperledger Fabric `_. @@ -191,7 +194,7 @@ Among the parameters in ``orderer.yaml``, there are: For more information about ``orderer.yaml`` and its specific parameters, check out :doc:`deployorderer/ordererchecklist`. -When you're comfortable with how your ordering node has been configured, how your volumes are mounted, and your backend configuration, you can run the command to launch the ordering node (this command will depend on your backend configuration). +Note: This tutorial assumes that a system channel genesis block will not be used when bootstrapping the orderer. Instead, these nodes (or a subset of them), will be joined to a channel using the process to :doc:`create_channel/create_channel_participation`. For information on how to create an orderer that will be bootstrapped with a system channel genesis block, check out `Deploy the ordering service `_ from the Fabric v2.2 documentation. .. toctree:: :maxdepth: 1 @@ -204,7 +207,7 @@ When you're comfortable with how your ordering node has been configured, how you Next steps ---------- -Blockchain networks are all about connection, so once you've deployed nodes, you'll obviously want to connect them to other nodes! If you have a peer organization and a peer, you'll want to join your organization to a consortium and join or :doc:`channels`. If you have an ordering node, you will want to add peer organizations to your consortium. You'll also want to learn how to develop chaincode, which you can learn about in the topics :doc:`developapps/scenario` and :doc:`chaincode4ade`. +Blockchain networks are all about connection, so once you've deployed nodes, you'll obviously want to connect them to other nodes! If you have a peer organization and a peer, you'll want to join your organization to a consortium and join or :doc:`create_channel/create_channel_participation`. If you have an ordering node, you will want to add peer organizations to your consortium. You'll also want to learn how to develop chaincode, which you can learn about in the topics :doc:`developapps/scenario` and :doc:`chaincode4ade`. Part of the process of connecting nodes and creating channels will involve modifying policies to fit the use cases of business networks. For more information about policies, check out :doc:`policies/policies`. diff --git a/docs/source/deployorderer/ordererchecklist.md b/docs/source/deployorderer/ordererchecklist.md index 39dc2dcc8c7..520b61f12da 100644 --- a/docs/source/deployorderer/ordererchecklist.md +++ b/docs/source/deployorderer/ordererchecklist.md @@ -18,6 +18,8 @@ This checklist covers key configuration parameters for setting up a production o * [FileLedger.Location](#fileledger-location) * [Operations.*](#operations) * [Metrics.*](#metrics) +* [Admin.*](#admin) +* [ChannelParticipation.*](#channelparticipation) * [Consensus.*](#consensus) ## General.ListenAddress @@ -140,7 +142,7 @@ In general, these four parameters would only need to be configured if you want t BootstrapMethod: file ``` -* **`BootstrapMethod`**: (default value should not be overridden) Unless you plan to use a file type other than “file”, this value should be left as is. +* **`BootstrapMethod`**: If you plan to create this node on a network that is not using a system channel, override this value to `none` and then ensure that [`ChannelParticipation.Enabled`](#channelparticipation) is set to `true`, otherwise you will get an error when you attempt to start the node. If you are creating a node to be joined to a system channel, unless you plan to use a file type other than “file”, this value should be left as is. ## General.BoostrapFile @@ -155,7 +157,7 @@ BootstrapMethod: file BootstrapFile: ``` -* **`BoostrapFile`**: (default value should be overridden) Specify the location and name of the system channel genesis block to use when this node is created. +* **`BoostrapFile`**: (if you are creating this node to be joined to a system channel, the default value should be overridden) Specify the location and name of the system channel genesis block to use when this node is created. If you are creating this node without using a system channel, this value will not be used, and can therefore be left blank. ## General.LocalMSPDir @@ -180,7 +182,7 @@ LocalMSPDir: msp LocalMSPID: SampleOrg ``` -* **`LocalMSPID`**: (default value should be overridden) The MSP ID must match the orderer organization MSP ID that exists in the configuration of the system channel. This means the MSP ID must have been listed in the `configtx.yaml` used to create the genesis block of the system channel (or have been added later to the list of system channel administrators). +* **`LocalMSPID`**: (default value should be overridden) This identifies the organization this ordering node belongs to. The MSP ID must match the orderer organization MSP ID that exists in the configuration of any channel this joined will be joined to. ## General.BCCSP.* @@ -308,6 +310,57 @@ Because Prometheus utilizes a "pull" model there is not any configuration requir * **`Provider`**: Set this value to `statsd` if using `StatsD` or `prometheus` if using `Prometheus`. * **`Statsd.Address`**: (required to use `StatsD` metrics for the ordering node) When `StatsD` is enabled, you will need to configure the `hostname` and `port` of the `StatsD` server so that the ordering node can push metric updates. +## Admin.* + +``` +Admin: + # host and port for the admin server + ListenAddress: 127.0.0.1:9443 + + # TLS configuration for the admin endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most admin service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + # + # NOTE: When TLS is enabled, the admin endpoint requires mutual TLS. The + # orderer will panic on startup if this value is set to false. + ClientAuthRequired: true + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] +``` + +* **`ListenAddress`**: The orderer admin server address (host and port) that can be used by the `osnadmin` command to configure channels on the ordering service. This value should be a unique `host:port` combination to avoid conflicts. +* **`TLS.Enabled`**: Technically this can be set to `false`, but this is not recommended. In general, you should always set this value to `true`. +* **`TLS.Certificate`**: The path to and file name of the orderer signed certificate issued by the TLS CA. +* **`TLS.PrivateKey`**: The path to and file name of the orderer private key issued by the TLS CA. +* **`TLS.ClientAuthRequired`**: This value must be set to `true`. Note that while mutual TLS is required for all operations on the orderer `Admin` endpoint, the entire network is not required to use mutual TLS. +* **`TLS.ClientRootCAs`**: The path to and file name of the admin client TLS CA root certificate. + +## ChannelParticipation.* + +``` +ChannelParticipation: + # Channel participation API is enabled. + Enabled: false + + # The maximum size of the request body when joining a channel. + MaxRequestBodySize: 1 MB +``` + +* **`Enabled`**: If you are bootstrapping the ordering node with a system channel genesis block, this value can be set to either `true` or `false` (setting the value to `true` allows you to list channels and to migrate away from the system channel in the future). If you are **not** bootstrapping the ordering node with a system channel genesis block, this value must be set to `true` and the [`General.BoostrapMethod`](#general-boostrapmethod) should be set to `none`. +* **`MaxRequestBodySize`**: (default value should not be overridden) This value controls the maximum size a configuration block can be and be accepted by this ordering node. Most configuration blocks are smaller than 1 MB, but if for some reason a configuration block is too large to be accept, bring down the node, increase this value, and restart the node. + ## Consensus.* The values of this section vary by consensus plugin. The values below are for the `etcdraft` consensus plugin. If you are using a different consensus plugin, refer to its documentation for allowed keys and recommended values. diff --git a/docs/source/deployorderer/ordererdeploy.md b/docs/source/deployorderer/ordererdeploy.md index 9cde632c34a..72d8076094b 100644 --- a/docs/source/deployorderer/ordererdeploy.md +++ b/docs/source/deployorderer/ordererdeploy.md @@ -2,7 +2,9 @@ Before deploying an ordering service, review the material in [Planning for an ordering service](./ordererplan.html) and [Checklist for a production ordering service](./ordererchecklist.html), which discusses all of the relevant decisions you need to make and parameters you need to configure before deploying an ordering service. -This tutorial is based on the Raft consensus protocol and can be used to build an ordering service, which is comprised of ordering nodes, or "orderers". It describes the process to create a three-node Raft ordering service where all of the ordering nodes belong to the same organization. +This tutorial is based on the Raft consensus protocol and can be used to build an ordering service, which is comprised of ordering nodes, or "orderers". It describes the process to create a three-node Raft ordering service where all of the ordering nodes belong to the same organization. This tutorial assumes that a system channel genesis block will not be used when bootstrapping the orderer. Instead, these nodes (or a subset of them), will be joined to a channel using the process to [Create a channel](../create_channel/create_channel_participation.html). + +For information on how to create an orderer that will be bootstrapped with a system channel genesis block, check out [Deploy the ordering service](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployorderer/ordererdeploy.html) from the Fabric v2.2 documentation. ## Download the ordering service binary and configuration files @@ -88,81 +90,6 @@ Similarly, you need to point to the [local MSP of your orderer](https://hyperled Note that the local MSP contains the signed certificate (public key) and the private key for the orderer. The private key is used by the node to sign transactions, and is therefore not shared and must be secured. For maximum security, a Hardware Security Module (HSM) can be configured to generate and store this private key. -### Create the ordering service genesis block - -The first channel that is created in a Fabric network is the "system" channel. The system channel defines the set of ordering nodes that form the ordering service and the set of organizations that serve as ordering service administrators. Peers transact on private "application" channels that are derived from the ordering service system channel, which also defines the "consortium" (the peer organizations known to the ordering service). Therefore, before you can deploy an ordering service, you need to generate the system channel configuration by creating the system channel "genesis block" using a tool called `configtxgen`. We'll then use the generated system channel genesis block to bootstrap each ordering node. - -#### Set up the `configtxgen` tool - -While it is possible to build the channel creation transaction file manually, it is easier to use the [configtxgen](../commands/configtxgen.html) tool, which works by reading a `configtx.yaml` file that defines the configuration of your channel and then writing the relevant information into a configuration block known as the "genesis block". - -Notice that the `configtxgen` tool is located in the `bin` folder of downloaded Fabric binaries. - -Before using `configtxgen`, confirm you have set the `FABRIC_CFG_PATH` environment variable to the path of the directory that contains your local copy of the `configtx.yaml` file. You can verify that are able to use the tool by printing the `configtxgen` help text: - -``` -configtxgen --help -``` - -#### The `configtx.yaml` file - -The `configtx.yaml` file is used to specify the **channel configuration** of the system channel and application channels. The information that is required to build the channel configuration is specified in a readable and editable form in the `configtx.yaml` file. The `configtxgen` tool uses the channel profiles that are defined in `configtx.yaml` to create the channel configuration block in the [protobuf format](https://developers.google.com/protocol-buffers). - -The `configtx.yaml` file is located in the `config` folder alongside the images that you downloaded and contains the following configuration sections that we need to create our new channel: - -- **Organizations:** The organizations that can become members of your channel. Each organization has a reference to the cryptographic material that is used to build the [channel MSP](../membership/membership.html). -- **Orderer:** Which ordering nodes will form the Raft consenter set of the channel. -- **Policies** Different sections of the file work together to define the channel policies that will govern how organizations interact with the channel and which organizations need to approve channel updates. For the purposes of this tutorial, we will use the defaults that are used by Fabric. For more information about policies, check out [Policies](../policies/policies.html). -- **Profiles** Each channel profile references information from other sections of the `configtx.yaml` file to build a channel configuration. The profiles are used to create the genesis block of the channel. - -The `configtxgen` tool uses `configtx.yaml` file to create the genesis block for the channel. A detailed version of the `configtx.yaml` file is available in the [Fabric sample configuration](https://github.com/hyperledger/fabric/blob/{BRANCH}/sampleconfig/configtx.yaml). Refer to the [Using configtx.yaml to build a channel configuration](../create_channel/create_channel_config.html) tutorial to learn more about the settings in this file. - -#### Generate the system channel genesis block - -The first channel that is created in a Fabric network is the system channel. The system channel defines the set of ordering nodes that form the ordering service and the set of organizations that serve as ordering service administrators. The system channel also includes the organizations that are members of blockchain [consortium](../glossary.html#consortium). The consortium is a set of peer organizations that belong to the system channel, but are not administrators of the ordering service. Consortium members have the ability to create new channels and include other consortium organizations as channel members. - -The genesis block of the system channel is required to deploy a new ordering service. A good example of a system channel profile can be found in the [test network configtx.yaml](https://github.com/hyperledger/fabric-samples/blob/master/test-network/configtx/configtx.yaml#L319) which includes the `TwoOrgsOrdererGenesis` profile as shown below: - -```yaml -TwoOrgsOrdererGenesis: - <<: *ChannelDefaults - Orderer: - <<: *OrdererDefaults - Organizations: - - *OrdererOrg - Capabilities: - <<: *OrdererCapabilities - Consortiums: - SampleConsortium: - Organizations: - - *Org1 - - *Org2 -``` - -The `Orderer:` section of the profile defines the Raft ordering service, with the `OrdererOrg` as the ordering service administrator. The `Consortiums` section of the profile creates a consortium of peer organizations named `SampleConsortium:`. For a production deployment, it is recommended that the peer and ordering nodes belong to separate organizations. This example uses peer organizations `Org1` and `Org2`. You will want to customize this section by providing your own consortium name and replacing `Org1` and `Org2` with the names of your peer organizations. If they are unknown at this time, you do not have to list any organizations under `Consortiums.SampleConsortium.Organizations`. Adding the peer organizations now saves the effort of a channel configuration update later. If you do add them, don't forget to define the peer organizations in the `Organizations:` section at the top of the `configtx.yaml` file as well. Notice this profile is missing an `Application:` section. You will need to create the application channels after you deploy the ordering service. - -After you have completed editing the `configtx.yaml` to reflect the orderer and peer organizations that will participate in your network, run the following command to create the genesis block of the system channel: -``` -configtxgen -profile TwoOrgsOrdererGenesis -channelID system-channel -outputBlock ./system-genesis-block/genesis.block -``` - -Where: -- `-profile` refers to the `TwoOrgsOrdererGenesis` profile in `configtx.yaml`. -- `-channelID` is the name of the system channel. In this tutorial, the system channel is named `system-channel`. -- `-outputBlock` refers to the location of the generated genesis block. - -When the command is successful, you will see logs of `configtxgen` loading the `configtx.yaml` file and printing a channel creation transaction: -``` -INFO 001 Loading configuration -INFO 002 Loaded configuration: /Usrs/fabric-samples/test-network/configtx/configtx.yaml -INFO 003 Generating new channel configtx -INFO 004 Generating genesis block -INFO 005 Creating system channel genesis block -INFO 006 Writing genesis block -``` - -Make note of the generated output block filename. This is your genesis block and will be referenced in the `orderer.yaml` file below. - ### Storage You must provision persistent storage for your ledgers. The default location for the ledger is located at `/var/hyperledger/production/orderer`. Ensure that your orderer has write access to the folder. If you choose to use a different location, provide that path in the `FileLedger:` parameter in the `orderer.yaml` file. If you decide to use Kubernetes or Docker, recall that in a containerized environment, local storage disappears when the container goes away, so you will need to provision or mount persistent storage for the ledger before you deploy an orderer. @@ -178,11 +105,20 @@ At a minimum, you need to configure the following parameters: - `General.TLS.PrivateKey ` - Ordering node private key from TLS CA. - `General.TLS.Certificate ` - Ordering node signed certificate (public key) from the TLS CA. - `General.TLS.RootCAS` - This value should be unset. -- `General.BoostrapMethod:file` - Start ordering service with a system channel. -- `General.BootstrapFile` - Path to and name of the genesis block file for the ordering service system channel. +- `General.BoostrapMethod:none` - This allows the orderer to start without needing a system channel configuration block. - `General.LocalMSPDir` - Path to the ordering node MSP folder. - `General.LocalMSPID` - MSP ID of the ordering organization as specified in the channel configuration. -- `FileLedger.Location` - Location of the orderer ledger on the file system. +- `FileLedger.Location` - Location on the file system to the ledgers of the channels this orderer will be servicing. +- `ChannelParticipation.Enabled` - Set to `true`. This allows the orderer to be joined to an application channel without joining a system channel first. + +Because this tutorial assumes that a system channel genesis block will not be used when bootstrapping the orderer, the following additional parameters are required if you want to create an application channel with the `osnadmin` command. + +- `Admin.ListenAddress` - The orderer admin server address (host and port) that can be used by the `osnadmin` command to configure channels on the ordering service. This value should be a unique `host:port` combination to avoid conflicts. +- `Admin.TLS.Enabled:` - Technically this can be set to `false`, but this is not recommended. In general, you should always set this value to `true`. +- `Admin.TLS.PrivateKey:` - The path to and file name of the orderer private key issued by the TLS CA. +- `Admin.TLS.Certificate:` - The path to and file name of the orderer signed certificate issued by the TLS CA. +- `Admin.TLS.ClientAuthRequired:` This value must be set to `true`. Note that while mutual TLS is required for all operations on the orderer Admin endpoint, the entire network is not required to use Mutual TLS. +- `Admin.TLS.ClientRootCAs:` - The path to and file name of the admin client TLS CA Root certificate. In the folder structure above, this is `admin-client/client-tls-ca-cert.pem`. ## Start the orderer @@ -201,24 +137,17 @@ cd bin When the orderer starts successfully, you should see a message similar to: ``` -INFO 019 Starting orderer: -INFO 01a Beginning to serve requests +INFO 01d Registrar initializing without a system channel, number of application channels: 0, with 0 consensus.Chain(s) and 0 follower.Chain(s) +INFO 01f Starting orderer: ``` -You have successfully started one node, you now need to repeat these steps to configure and start the other two orderers. When a majority of orderers are started, a Raft leader is elected. You should see something similar to the following output: -``` -INFO 01b Applied config change to add node 1, current nodes in channel: [1] channel=syschannel node=1 -INFO 01c Applied config change to add node 2, current nodes in channel: [1 2] channel=syschannel node=1 -INFO 01d Applied config change to add node 3, current nodes in channel: [1 2 3] channel=syschannel node=1 -INFO 01e raft.node: 1 elected leader 2 at term 11 channel=syschannel node=1 -INFO 01f Raft leader changed: 0 -> 2 channel=syschannel node=1 -``` +You have successfully started one node, you now need to repeat these steps to configure and start the other two orderers. ## Next steps Your ordering service is started and ready to order transactions into blocks. Depending on your use case, you may need to add or remove orderers from the consenter set, or other organizations may want to contribute their own orderers to the cluster. See the topic on ordering service [reconfiguration](../raft_configuration.html#reconfiguration) for considerations and instructions. -Finally, your system channel includes a consortium of peer organizations as defined in the `Organization` section of the channel configuration. This list of peer organizations are allowed to create channels on your ordering service. You need to use the `configtxgen` command and the `configtx.yaml` file to create an application channel. Refer to the [Creating a channel](../create_channel/create_channel.html#creating-an-application-channel) tutorial for more details. +Once your nodes have been created, you are ready to join them to a channel. Check out [Create a channel](../create_channel/create_channel_participation.html) for more information. ## Troubleshooting @@ -244,36 +173,6 @@ PANI 003 Failed to setup local msp with config: administrators must be declared Your local MSP definition is missing the `config.yaml` file. Create the file and copy it into the local MSP `/msp` folder of orderer. See the [Fabric CA](https://hyperledger-fabric-ca.readthedocs.io/en/release-1.4/deployguide/use_CA.html#nodeous) documentation for more instructions. - -### When you start the orderer, it fails with the following error: -``` -PANI 005 Failed validating bootstrap block: initializing channelconfig failed: could not create channel Orderer sub-group config: setting up the MSP manager failed: administrators must be declared when no admin ou classification is set -``` - -**Solution:** - -The system channel configuration is missing `config.yaml` file. If you are creating a new ordering service, the `MSPDir` referenced in `configtx.yaml` file is missing the `config.yaml` file. Follow instructions in the [Fabric CA](https://hyperledger-fabric-ca.readthedocs.io/en/release-1.4/deployguide/use_CA.html#nodeous) documentation to generate this file and then rerun `configtxgen` to regenerate the genesis block for the system channel. -``` -# MSPDir is the filesystem path which contains the MSP configuration. - MSPDir: ../config/organizations/ordererOrganizations/ordererOrg1.example.com/msp -``` -Before you restart the orderer, delete the existing channel ledger files that are stored in the `FileLedger.Location` setting of the `orderer.yaml` file. - - -### When you start the orderer, it fails with the following error: -``` -PANI 004 Failed validating bootstrap block: the block isn't a system channel block because it lacks ConsortiumsConfig -``` -**Solution:** - -Your channel configuration is missing the consortium definition. If you are starting a new ordering service, edit the `configtx.yaml` file `Profiles:` section and add the consortium definition: -``` -Consortiums: - SampleConsortium: - Organizations: -``` -The `Consortiums:` section is required but can be empty, as shown above, if the peer organizations are not yet known. Rerun `configtxgen` to regenerate the genesis block for the system channel and then before you start the orderer, delete the existing channel ledger files that are stored in the `FileLedger.Location` setting of the `orderer.yaml` file. - ### When you start the orderer, it fails with the following error: ``` PANI 27c Failed creating a block puller: client certificate isn't in PEM format: channel=mychannel node=3 @@ -283,14 +182,5 @@ PANI 27c Failed creating a block puller: client certificate isn't in PEM format: Your `orderer.yaml` file is missing the `General.Cluster.ClientCertificate` and `General.Cluster.ClientPrivateKey` definitions. Provide the path to and filename of the public certificate (also known as a signed certificate) and private key generated by your TLS CA for the orderer in these two fields and restart the node. -### When you start the orderer, it fails with the following error: -``` -ServerHandshake -> ERRO 025 TLS handshake failed with error remote error: tls: bad certificate server=Orderer remoteaddress=192.168.1.134:52413 -``` - -**Solution:** - -This error can occur when the `tlscacerts` folder is missing from the orderer organization MSP folder specified in the channel configuration. Create the `tlscacerts` folder inside your MSP definition and insert the root certificate from your TLS CA (`ca-cert.pem`). Rerun `configtxgen` to regenerate the genesis block for the system channel so that the channel configuration includes this certificate. Before you start the orderer again, delete the existing channel ledger files that are stored in the `FileLedger.Location` setting of the `orderer.yaml` file. - diff --git a/docs/source/deployorderer/ordererplan.md b/docs/source/deployorderer/ordererplan.md index 23856543f75..aab578f12a9 100644 --- a/docs/source/deployorderer/ordererplan.md +++ b/docs/source/deployorderer/ordererplan.md @@ -8,6 +8,8 @@ In a Hyperledger Fabric network, a node or collection of nodes together form wha Whereas Fabric networks that will only be used for testing and development purposes (such as our [test network](../test_network.html)) often feature an ordering service made up of only one node (these nodes are typically referred to as "orderers" or "ordering nodes"), production networks require a more robust deployment of at least three nodes. For this reason, our deployment guide will feature instructions on how to create a three-node ordering service. For more guidance on the number of nodes you should deploy, check out [Cluster considerations](#cluster-considerations). +This tutorial assumes that a system channel genesis block will not be used when bootstrapping the orderer. Instead, these nodes (or a subset of them), will be joined to a channel using the process to [Create a channel](../create_channel/create_channel_participation.html). For information on how to create an orderer that will be bootstrapped with a system channel genesis block, check out [Deploy the ordering service](https://hyperledger-fabric.readthedocs.io/en/release-2.2/deployorderer/ordererdeploy.html) from the Fabric v2.2 documentation. + ## Generate ordering node identities and Membership Service Providers (MSPs) Before proceeding with this topic, you should have reviewed the process for a Deploying a Certificate Authority (CA) for your organization in order to generate the identities and MSPs for the admins and ordering nodes in your organization. To learn how to use a CA to create these identities, check out [Registering and enrolling identities with a CA](https://hyperledger-fabric-ca.readthedocs.io/en/release-1.4/deployguide/use_CA.html). Note that the best practice is to register and enroll a separate node identity for each ordering node and to use distinct TLS certificates for each node. @@ -28,31 +30,9 @@ While it is possible to use a non-Fabric CA to generate identities, this process To prevent “man in the middle” attacks and otherwise secure communications, the use of TLS is a requirement for any production network. Therefore, in addition to registering your ordering nodes identities with your organization CA, you will also need to create certificates for your ordering nodes with the TLS CA of your organization. These TLS certificates will be used by the ordering nodes when communicating with the network. -## Creating the system channel genesis block - -Note: “consenters” refers to the nodes servicing a particular channel at a particular time. For each channel, the “consenters” may be a subset of the ordering nodes available in the system channel. - -Every ordering node must be bootstrapped with a configuration block from the system channel (either the system channel "genesis block" or a later configuration block). This guide will assume you are creating a new ordering service and will therefore bootstrap ordering nodes from a system channel genesis block. - -This “system channel” is a special channel run by the ordering service and contains, among other things, the list of peer organizations that are allowed to create application channels (this list is known as the “consortium”). Although this system channel cannot be joined by peers or peer organizations (and thus, no transactions other than configuration transactions can be made on it), it does contain many of the same configuration parameters that application channels contain. Because application channels inherit these configuration values by default unless they are changed during the channel creation process, take care when creating your system channel genesis block to keep the use case of your network in mind. - -If you’re creating an ordering service, you must create this system channel genesis block by specifying the necessary parameters in `configtx.yaml` and using the `configtxgen` tool to create the block. - -If you are adding a node to the system channel, the best practice is to bootstrap using the latest configuration block of the system channel. Similarly, an ordering node added to the consenter of an application channel will be boostrapped using the latest configuration block of that channel. - -Note that the `configtx.yaml` that is shipped with Fabric binaries is identical to the [sample `configtx.yaml` found here](https://github.com/hyperledger/fabric/blob/master/sampleconfig/configtx.yaml), and contains the same channel "profiles" that are used to specify particular desired policies and parameters (for example, it can be used to specify which ordering nodes that are consenters in the system channel will be used in an application channel). When creating a channel (whether for an orderer system channel or an application channel), you specify a particular profile by name in your channel creation command, and that profile, along with the other parameters specified in `configtx.yaml`, are used to build the configuration block. - -You will likely have to modify one of these profiles in order to create your system channel and to create your application channels (if nothing else, you are likely to have to modify the sample organization names). Note that to create a Raft ordering service, you will have to specify an `OrdererType` of `etcdraft`. - -Check out the [tutorial on creating a channel](../create_channel/create_channel.html#the-orderer-system-channel) for more information on how to create a system channel genesis block and application channels. - -### Creating profiles for application channels - -Both the system and all application channels are built using a `configtx.yaml` file. Therefore, when editing your `configtx.yaml` to create the genesis block for your system channel, you can also add profiles for any application channels that will be created on this network. However, note that while you can define any set of consenters for each channel, **every consenter added to an application channel must first be a part of the system channel**. You cannot specify a consenter that is not a part of the system channel. Also, it is not possible to control the leader of the consenter set. Leaders are chosen by the `etcdraft` protocol used by the ordering nodes. - ## Sizing your ordering node resources -Because ordering nodes do not host a state database or chaincode, an ordering node will typically only have a single container associated with it. Like the “peer container” associated with the peer, this container encapsulates the ordering process that orders transactions into blocks for all channels on which the ordering node is a consenter (ordering nodes also validate actions in particular cases). The ordering node storage includes the blockchain for all of channels on which the node is a consenter. +Because ordering nodes do not host a state database or chaincode, an ordering node will typically only have a single container associated with it. Like the “peer container” associated with the peer, this container encapsulates the ordering process that orders transactions into blocks for all channels on which the ordering node is a consenter (ordering nodes also validate actions in particular cases). The ordering node storage includes the blockchain for all of the channels on which the node is a consenter. Note that, at a logical level, every “consenter set” for each channel is a separate ordering service, in which “alive” messages and other communications are duplicated. This affects the CPU and memory required for each node. Similarly, there is a direct relationship between the size of a consenter set and the amount of resources each node will need. This is because in a Raft ordering service, the nodes do not collaborate in ordering transactions. One node, a "leader" elected by the other nodes, performs all ordering and validation functions, and then replicates decisions to the other nodes. As a result, as consenter sets increase in size, there is more traffic and burden on the leader node and more communications across the consenter set. diff --git a/docs/source/deploypeer/peerchecklist.md b/docs/source/deploypeer/peerchecklist.md index 121d972d9b6..6c7180abde8 100644 --- a/docs/source/deploypeer/peerchecklist.md +++ b/docs/source/deploypeer/peerchecklist.md @@ -112,7 +112,7 @@ localMspId: SampleOrg # modification that might corrupt the peer operations. fileSystemPath: /var/hyperledger/production ``` -- **`fileSystemPath`**: (Default value should be overridden.) This is the path to the ledger and installed chaincodes on the local filesystem of the peer. It can be an absolute path or relative to `FABRIC_CFG_PATH`. It defaults to `/var/hyperledger/production`. The user running the peer needs to own and have write access to this directory. **The best practice is to store this data in persistent storage**. This prevents the ledger and any installed chaincodes from being lost if your peer containers are destroyed for some reason. +- **`fileSystemPath`**: (Default value should be overridden.) This is the path to the ledger and installed chaincodes on the local filesystem of the peer. It can be an absolute path or relative to `FABRIC_CFG_PATH`. It defaults to `/var/hyperledger/production`. The user running the peer needs to own and have write access to this directory. **The best practice is to store this data in persistent storage**. This prevents the ledger and any installed chaincodes from being lost if your peer containers are destroyed for some reason. Note that ledger snapshots will be written to `ledger.snapshots.rootDir`, described in the [ledger.* section](#ledger). ## peer.gossip.* @@ -384,7 +384,7 @@ In the unlikely case where two peers are running on the same node, you need to m - **`operations.listenAddress:`** (Required when using the operations service.) Specify the address and port of the operations server. - **`operations.tls.cert.file*:`** (Required when using the operations service). Can be the same file as the `peer.tls.cert.file`. - **`operations.tls.key.file*:`** (Required when using the operations service). Can be the same file as the `peer.tls.key.file`. -- **`operations.tls.clientAuthRequired*:`** (Required when using the operations service). Must be set tp `true` to enable mutual TLS between the client and the server. +- **`operations.tls.clientAuthRequired*:`** (Required when using the operations service). Must be set to `true` to enable mutual TLS between the client and the server. - **`operations.tls.clientRootCAs.files*:`** (Required when using the operations service). Similar to the [peer.tls.clientRootCAs.files](#tls), it contains a list of client root CA certificates that can be used to verify client certificates. If the client enrolled with the peer organization CA, then this value is the peer organization root CA cert. ## metrics.* diff --git a/docs/source/deploypeer/peerdeploy.md b/docs/source/deploypeer/peerdeploy.md index 69744df7a3f..b240938e54e 100644 --- a/docs/source/deploypeer/peerdeploy.md +++ b/docs/source/deploypeer/peerdeploy.md @@ -80,7 +80,13 @@ Note that the local MSP contains the signed certificate (public key) and the pri ### Storage -You must provision persistent storage for your ledger. If you are not using an external chaincode builder and launcher, you should factor in storage for that as well. The default location for the ledger is located at `/var/hyperledger/production`. Ensure that your peer has write access to the folder. If you choose to use a different location, provide that path in the `peer.fileSystemPath` parameter in the `core.yaml` file. If you decide to use Kubernetes or Docker, recall that in a containerized environment local storage disappears when the container goes away, so you will need to provision or mount persistent storage for the ledger before you deploy a peer. +You must provision persistent storage for your ledger files. The following properties in `core.yaml` dictates where ledger files and snapshots are written: +* `peer.fileSystemPath` - defaults to `/var/hyperledger/production` +* `ledger.snapshots.rootDir` - defaults to `/var/hyperledger/production/snapshots` + +Ensure that your peer has write access to these directories. + +If you decide to use Kubernetes or Docker, recall that in a containerized environment local storage disappears when the container goes away, so you will need to provision or mount persistent storage for the ledger before you deploy a peer. ### Configuration of `core.yaml` diff --git a/docs/source/dev-setup/devenv.rst b/docs/source/dev-setup/devenv.rst index 57b984f173d..7f3b2b759ce 100644 --- a/docs/source/dev-setup/devenv.rst +++ b/docs/source/dev-setup/devenv.rst @@ -5,7 +5,7 @@ Prerequisites ~~~~~~~~~~~~~ - `Git client `__ -- `Go `__ version 1.14.x +- `Go `__ version 1.15.x - `Docker `__ version 18.03 or later - (macOS) `Xcode Command Line Tools `__ - `SoftHSM `__ @@ -153,7 +153,7 @@ few commands. If those commands completely successfully, you're ready to Go! -If you plan to use the Hyperledger Fabric application SDKs then be sure to check out their prerequisites in the Node.js SDK `README `__ and Java SDK `README `__. +If you plan to use the Hyperledger Fabric application SDKs then be sure to check out their prerequisites in the Node.js SDK `README `__, Java SDK `README `__, and Go SDK `README `__. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ diff --git a/docs/source/developapps/analysis.md b/docs/source/developapps/analysis.md index 74f7a58fd97..fc0ef08cbe2 100644 --- a/docs/source/developapps/analysis.md +++ b/docs/source/developapps/analysis.md @@ -215,12 +215,12 @@ The required sign-offs on transactions are enforced through rules, which are evaluated before appending a transaction to the ledger. Only if the required signatures are present, Fabric will accept a transaction as valid. -You're now in a great place translate these ideas into a smart contract. Don't +You're now in a great place to translate these ideas into a smart contract. Don't worry if your programming is a little rusty, we'll provide tips and pointers to understand the program code. Mastering the commercial paper smart contract is the first big step towards designing your own application. Or, if you're a business analyst who's comfortable with a little programming, don't be afraid to -keep dig a little deeper! +dig a little deeper! diff --git a/docs/source/developapps/chaincodenamespace.md b/docs/source/developapps/chaincodenamespace.md index c79207d325a..c1b5a8bb2ab 100644 --- a/docs/source/developapps/chaincodenamespace.md +++ b/docs/source/developapps/chaincodenamespace.md @@ -182,18 +182,18 @@ See how the application: * Submits a `redeem` transaction to the network for commercial paper `PAP21` using the `yenPaper` contract. See interaction point **2a**. This results in - the creation of a commercial paper represented by state `PAP21` in `world + the redemption of a commercial paper represented by state `PAP21` in `world state A`; interaction point **2b**. This operation is captured as a transaction in the blockchain at interaction point **2c**. * Submits a `buy` transaction to the network for bond `BON31` using the `euroBond` contract. See interaction point **3a**. This results in the - creation of a bond represented by state `BON31` in `world state B`; + update of a bond represented by state `BON31` in `world state B`; interaction point **3b**. This operation is captured as a transaction in the blockchain at interaction point **3c**. * Submits a `sell` transaction to the network for bond `BON41` using the - `yenBond` contract. See interaction point **4a**. This results in the creation + `yenBond` contract. See interaction point **4a**. This results in the update of a bond represented by state `BON41` in `world state B`; interaction point **4b**. This operation is captured as a transaction in the blockchain at interaction point **4c**. diff --git a/docs/source/developapps/connectionprofile.md b/docs/source/developapps/connectionprofile.md index 8bc816b6db6..58ce3fa9e3c 100644 --- a/docs/source/developapps/connectionprofile.md +++ b/docs/source/developapps/connectionprofile.md @@ -209,7 +209,7 @@ about it: application operations relate to organizations rather than channels. For example, an application can request notification from one or all peers within its organization, or all organizations within the network -- using [connection - options](./connectoptions.html). For this, there needs to be an organization + options](./connectionoptions.html). For this, there needs to be an organization to peer mapping, and this section provides it. * Line 101: `MagnetoCorp:` diff --git a/docs/source/developapps/gateway.md b/docs/source/developapps/gateway.md index 88737d06db6..cfaf8bf3f82 100644 --- a/docs/source/developapps/gateway.md +++ b/docs/source/developapps/gateway.md @@ -39,7 +39,7 @@ A gateway can be used by an application in two different ways: these roles in the connection profile [topic](./connectionprofile.html). The SDK will use this static topology, in conjunction with gateway - [connection options](./connectionoptions), to manage the transaction + [connection options](connectionoptions.html), to manage the transaction submission and notification processes. The connection profile must contain enough of the network topology to allow a gateway to interact with the network on behalf of the application; this includes the network channels, diff --git a/docs/source/developapps/scenario.md b/docs/source/developapps/scenario.md index a338e64b426..176614764f8 100644 --- a/docs/source/developapps/scenario.md +++ b/docs/source/developapps/scenario.md @@ -9,6 +9,17 @@ Fabric, to issue, buy and redeem commercial paper. We're going to use the scenario to outline requirements for the development of commercial paper applications and smart contracts used by the participant organizations. +## What is commercial paper? + +Commercial paper is a commonly used type of unsecured, short-term debt instrument +issued by corporations, typically used for the financing of payroll, accounts +payable and inventories, and meeting other short-term liabilities. Maturities on +commercial paper typically last several days, and rarely range longer than 270 days. +The face value of the commerical paper is the value the issuing corporating would be +paying the redeemer of the paper upon maturity. While buying the paper, the lender +buys it for a price lesser than the face value. The difference between the face value and +the price the lender bought the paper for is the profit made by the lender. + ## PaperNet network PaperNet is a commercial paper network that allows suitably authorized diff --git a/docs/source/developapps/transactionhandler.md b/docs/source/developapps/transactionhandler.md index f8d3a4a9e5d..cad1cc91c9a 100644 --- a/docs/source/developapps/transactionhandler.md +++ b/docs/source/developapps/transactionhandler.md @@ -85,7 +85,7 @@ shows you the exact form of these handlers. Once a handler has been added to the smart contract, it will be invoked during transaction processing. During processing, the handler receives `ctx`, the -[transaction context](./transationcontext.md), performs some processing, and +[transaction context](transationcontext.html), performs some processing, and returns control as it completes. Processing continues as follows: * **Before handler**: If the handler completes successfully, the transaction is diff --git a/docs/source/developapps/wallet.md b/docs/source/developapps/wallet.md index c2b692705f2..1aa60440df7 100644 --- a/docs/source/developapps/wallet.md +++ b/docs/source/developapps/wallet.md @@ -144,7 +144,7 @@ particular `identityLabel`. The `Gateway` class only requires the `mspId` and `type` metadata to be set for an identity -- `Org1MSP` and `X.509` in the above example. It *currently* uses the MSP ID value to identify particular peers from a [connection profile](./connectionprofile.html), -for example when a specific notification [strategy](./connectoptions.html) is +for example when a specific notification [strategy](./connectionoptions.html) is requested. In the DigiBank gateway file `networkConnection.yaml`, see how `Org1MSP` notifications will be associated with `peer0.org1.example.com`: diff --git a/docs/source/diagrams/diagrams.pptx b/docs/source/diagrams/diagrams.pptx index 76de835cb87..ffab70b2341 100644 Binary files a/docs/source/diagrams/diagrams.pptx and b/docs/source/diagrams/diagrams.pptx differ diff --git a/docs/source/docs_guide.md b/docs/source/docs_guide.md index 3f824fcc2b4..aa7677fa60b 100644 --- a/docs/source/docs_guide.md +++ b/docs/source/docs_guide.md @@ -205,13 +205,14 @@ your GitHub account. For International Languages (Malayalam as an example): ```bash git clone git@github.com:hyperledger/fabric-docs-i18n.git - cd fabric + cd fabric-docs-i18n make docs-lang-ml_IN ``` - The `make` command generates documentation html files in the `build/html/` + The `make` command generates documentation html files in the `docs/build/html/` folder which you can now view locally; simply navigate your browser to the - `build/html/index.html` file. + `docs/build/html/index.html` file. For International Languages, you need to read `docs/build/html/` as + `docs/locale/${LANG_CODE}/_build/html/` (e.g., `docs/locale/ml_IN/_build/html/`). 4. Now make a small change to a file, and rebuild the documentation to verify that your change was as expected. Every time you make a change to the @@ -222,7 +223,7 @@ your GitHub account. ```bash sudo apt-get install apache2 - cd build/html + cd docs/build/html sudo cp -r * /var/www/html/ ``` diff --git a/docs/source/enable_cc_lifecycle.md b/docs/source/enable_cc_lifecycle.md index 770d1e7c5bb..dae3329adf7 100644 --- a/docs/source/enable_cc_lifecycle.md +++ b/docs/source/enable_cc_lifecycle.md @@ -12,6 +12,8 @@ Updating a channel configuration is, at a high level, a three step process (for We will be performing these channel configuration updates by leveraging a file called `enable_lifecycle.json`, which contains all of the updates we will be making in the channel configurations. Note that in a production setting it is likely that multiple users would be making these channel update requests. However, for the sake of simplicity, we are presenting all of the updates as how they would appear in a single file. +Note: this topic describes a network that does not use a "system channel", a channel that the ordering service is bootstrapped with and the ordering service exclusively controls. Since the release of v2.3, using a system channel is now considered the legacy process as compared to the new process to [Create a channel](./create_channel/create_channel_participation.html) without a system channel. For a version of this topic that includes information about the system channel, check out [Enabling the new chaincode lifecycle](https://hyperledger-fabric.readthedocs.io/en/release-2.2/enable_cc_lifecycle.html) from the v2.2 documentation. + ## Create `enable_lifecycle.json` Note that in addition to using `enable_lifecycle.json`, this tutorial also uses `jq` to apply the edits to the modified config file. The modified config can also be edited manually (after it has been pulled, translated, and scoped). Check out this [sample channel configuration](./config_update.html#sample-channel-configuration) for reference. @@ -129,42 +131,13 @@ Note that the `enable_lifecycle.json` uses sample values, for example `org1Polic ## Edit the channel configurations -### System channel updates - -Because configuration changes to the system channel to enable the new lifecycle only involve parameters inside the configuration of the peer organizations within the channel configuration, each peer organization being edited will have to sign the relevant channel configuration update. - -However, by default, the system channel can only be edited by system channel admins (typically these are admins of the ordering service organizations and not peer organizations), which means that the configuration updates to the peer organizations in the consortium will have to be proposed by a system channel admin and sent to the relevant peer organization to be signed. - -You will need to export the following variables: - -* `CH_NAME`: the name of the system channel being updated. -* `CORE_PEER_LOCALMSPID`: the MSP ID of the organization proposing the channel update. This will be the MSP of one of the ordering service organizations. -* `CORE_PEER_MSPCONFIGPATH`: the absolute path to the MSP representing your organization. -* `TLS_ROOT_CA`: the absolute path to the root CA certificate of the organization proposing the system channel update. -* `ORDERER_CONTAINER`: the name of an ordering node container. When targeting the ordering service, you can target any particular node in the ordering service. Your requests will be forwarded to the leader automatically. -* `ORGNAME`: the name of the organization you are currently updating. -* `CONSORTIUM_NAME`: the name of the consortium being updated. - -Once you have set the environment variables, navigate to [Step 1: Pull and translate the config](./config_update.html#step-1-pull-and-translate-the-config). - -Then, add the lifecycle organization policy (as listed in `enable_lifecycle.json`) to a file called `modified_config.json` using this command: - -``` -jq -s ".[0] * {\"channel_group\":{\"groups\":{\"Consortiums\":{\"groups\": {\"$CONSORTIUM_NAME\": {\"groups\": {\"$ORGNAME\": {\"policies\": .[1].${ORGNAME}Policies}}}}}}}}" config.json ./enable_lifecycle.json > modified_config.json -``` - -Then, follow the steps at [Step 3: Re-encode and submit the config](./config_update.html#step-3-re-encode-and-submit-the-config). - -As stated above, these changes will have to be proposed by a system channel admin and sent to the relevant peer organization for signature. - -### Application channel updates +To fully enable the new chaincode lifecycle, you must first edit the configuration of your own organization as it exists in a channel configuration, and then you must update the channel itself to include a default endorsement policy for the channel. You can then optionally update your channel access control list. -#### Edit the peer organizations +Note: this topic leverages the instructions on how to update a channel configuration that are found in the [Updating a channel configuration](./config_update.html) tutorial. The environment variables listed here work in conjunction with those commands to update your channels. -We need to perform a similar set of edits to all of the organizations on all -application channels. +### Edit the peer organizations -Note that unlike the system channel, peer organizations are able to make configuration update requests to application channels. If you are making a configuration change to your own organization, you will be able to make these changes without needing the signature of other organizations. However, if you are attempting to make a change to a different organization, that organization will have to approve the change. +By default, peer organizations are able to make configuration update requests to their own organization on an application channel without needing the approval of any other peer organizations. However, if you are attempting to make a change to a different organization, that organization will have to approve the change. You will need to export the following variables: @@ -185,13 +158,11 @@ jq -s ".[0] * {\"channel_group\":{\"groups\":{\"Application\": {\"groups\": {\"$ Then, follow the steps at [Step 3: Re-encode and submit the config](./config_update.html#step-3-re-encode-and-submit-the-config). -#### Edit the application channels +### Edit the application channels -After all of the application channels have been [updated to include V2_0 capabilities](./upgrade_to_newest_version.html#capabilities), -endorsement policies for the new chaincode lifecycle must be added to each -channel. +After all of the application channels have been [updated to include V2_0 capabilities](./upgrade_to_newest_version.html#capabilities), endorsement policies for the new chaincode lifecycle must be added to each channel. -You can set the same environment you set when updating the peer organizations. Note that in this case you will not be updating the configuration of an org in the configuration, so the `ORGNAME` variable will not be used. +You can set the same environment variables you set when updating the peer organizations. Note that in this case you will not be updating the configuration of an org in the configuration, so the `ORGNAME` variable will not be used. Once you have set the environment variables, navigate to [Step 1: Pull and translate the config](./config_update.html#step-1-pull-and-translate-the-config). diff --git a/docs/source/enable_tls.rst b/docs/source/enable_tls.rst index 57c425f4d69..12b12d25d5b 100644 --- a/docs/source/enable_tls.rst +++ b/docs/source/enable_tls.rst @@ -121,11 +121,13 @@ as well: Debugging TLS issues -------------------- -Before debugging TLS issues, it is advisable to enable ``GRPC debug`` on both the TLS client -and the server side to get additional information. To enable ``GRPC debug``, set the -environment variable ``FABRIC_LOGGING_SPEC`` to include ``grpc=debug``. For example, to -set the default logging level to ``INFO`` and the GRPC logging level to ``DEBUG``, set -the logging specification to ``grpc=debug:info``. +If you see the error message ``remote error: tls: bad certificate`` on the server side +(for example on the peer node or ordering service node when making requests from a client), +it usually means that the client is not configured to trust the signer of the server's TLS certificate. +Check the client's ``CORE_PEER_TLS_ROOTCERT_FILE`` (for connections to peer nodes) +or ``--cafile`` (for connections to orderer nodes). +The corresponding error on the client side in these cases is the handshake error ``x509: certificate signed by unknown authority`` +and ultimately connection failure with ``context deadline exceeded``. If you see the error message ``remote error: tls: bad certificate`` on the client side, it usually means that the TLS server has enabled client authentication and the server either did @@ -133,9 +135,13 @@ not receive the correct client certificate or it received a client certificate t not trust. Make sure the client is sending its certificate and that it has been signed by one of the CA certificates trusted by the peer or orderer node. -If you see the error message ``remote error: tls: bad certificate`` in your chaincode logs, -ensure that your chaincode has been built using the chaincode shim provided with Fabric v1.1 -or newer. +To receive additional debug information, enable ``GRPC debug`` on both the TLS client +and the server side to get additional information. To enable ``GRPC debug``, set the +environment variable ``FABRIC_LOGGING_SPEC`` to include ``grpc=debug``. For example, to +set the default logging level to ``INFO`` and the GRPC logging level to ``DEBUG``, set +the logging specification to ``grpc=debug:info``. + +You can check a TLS certificate against a trusted CA certificate by using the "openssl verify" command. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ diff --git a/docs/source/endorsement-policies.rst b/docs/source/endorsement-policies.rst index efc9234df6c..4d16f2616a8 100644 --- a/docs/source/endorsement-policies.rst +++ b/docs/source/endorsement-policies.rst @@ -67,9 +67,7 @@ channel. Once the definition has been committed, the chaincode is ready to use. Any invoke of the chaincode that writes data to the ledger will need to be validated by enough channel members to meet the endorsement policy. -You can specify an endorsement policy for a chaincode using the Fabric SDKs. -For an example, visit the `How to install and start your chaincode `_ -in the Node.js SDK documentation. You can also create an endorsement policy from +You can create an endorsement policy from your CLI when you approve and commit a chaincode definition with the Fabric peer binaries by using the ``--signature-policy`` flag. @@ -120,8 +118,7 @@ the membership of the channel, so it will be updated automatically when organiza are added or removed from a channel. One advantage of using channel policies is that they can be written to be updated automatically with channel membership. -If you specify an endorsement policy using the ``--signature-policy`` flag or -the SDK, you will need to update the policy when organizations join or leave the +If you specify an endorsement policy using the ``--signature-policy`` flag, you will need to update the policy when organizations join or leave the channel. A new organization added to the channel after the chaincode has been defined will be able to query a chaincode (provided the query has appropriate authorization as defined by channel policies and any application level checks enforced by the diff --git a/docs/source/fabric-sdks.rst b/docs/source/fabric-sdks.rst index b52dd32a89e..e6585de92f1 100644 --- a/docs/source/fabric-sdks.rst +++ b/docs/source/fabric-sdks.rst @@ -1,12 +1,13 @@ Hyperledger Fabric SDKs ======================= -Hyperledger Fabric intends to offer a number of SDKs for a wide variety of -programming languages. The first two delivered are the Node.js and Java -SDKs. We hope to provide Python, REST and Go SDKs in a subsequent release. +Hyperledger Fabric offers a number of SDKs for a wide variety of +programming languages. The first three delivered are the Node.js, Java, and Go +SDKs. We hope to provide Python, and REST SDKs in a subsequent release. * `Hyperledger Fabric Node SDK documentation `__. * `Hyperledger Fabric Java SDK documentation `__. + * `Hyperledger Fabric Go SDK documentation `__. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ diff --git a/docs/source/fabric_model.rst b/docs/source/fabric_model.rst index 61f651b3d18..11ea3fb825a 100644 --- a/docs/source/fabric_model.rst +++ b/docs/source/fabric_model.rst @@ -122,10 +122,7 @@ governed on the broader network and on channel levels. This "permissioned" noti of Hyperledger Fabric, coupled with the existence and capabilities of channels, helps address scenarios where privacy and confidentiality are paramount concerns. -See the :doc:`msp` topic to better understand cryptographic -implementations, and the sign, verify, authenticate approach used in -Hyperledger Fabric. - +For more information see the :doc:`security_model` topic. Consensus --------- diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst index 087396c3db8..cd7884a5724 100644 --- a/docs/source/getting_started.rst +++ b/docs/source/getting_started.rst @@ -40,14 +40,16 @@ Hyperledger Fabric offers a number of SDKs to support developing applications in * `Node.js SDK `__ and `Node.js SDK documentation `__. * `Java SDK `__ and `Java SDK documentation `__. + * `Go SDK `__ and `Go SDK documentation `__. - Prerequisites for developing with the SDKs can be found in the Node.js SDK `README `__ and Java SDK `README `__. + Prerequisites for developing with the SDKs can be found in the Node.js SDK `README `__ , + Java SDK `README `__, and + Go SDK `README `__. -In addition, there are two more application SDKs that have not yet been officially released -(for Python and Go), but they are still available for downloading and testing: +In addition, there is one other application SDK that has not yet been officially released +for Python, but is still available for downloading and testing: * `Python SDK `__. - * `Go SDK `__. Currently, Node.js, Java and Go support the new application programming model delivered in Hyperledger Fabric v1.4. diff --git a/docs/source/github/github.rst b/docs/source/github/github.rst index 72534199e91..409e1b06677 100644 --- a/docs/source/github/github.rst +++ b/docs/source/github/github.rst @@ -145,7 +145,8 @@ Perform the following steps to commit and push your code to your forked reposito - one line summary of the work in this commit as title, followed by an empty line - in the commit message body, explain why this change is needed, and how you approached it. This helps reviewers better understand your code and often speeds up the review process. - - link to JIRA item or JIRA number, i.e. FAB-XXXXX + - link to GitHub issue (if exists), using syntax like "Resolves #" so that the + GitHub issue automatically gets linked and closed when the PR gets merged. - (optional) if no new tests are added, how the code is tested .. code:: @@ -154,7 +155,7 @@ Perform the following steps to commit and push your code to your forked reposito .. note:: - Hyperledger requires that commits be signed by the commiter. + Hyperledger requires that commits be signed by the committer. When issuing the `commit` command, specify the `-s` flag to automatically add your signature to your commit. @@ -196,7 +197,7 @@ repository from which you created your fork and begin the code review process. - You can now choose one of two options for creating your pull request. In the green `Create Pull Request` box select the down-arrow to the right of it. - You can choose the first option to open your pull request as-is. - This will automatically assign the repostiories maintainers as reviewers for + This will automatically assign the repositories maintainers as reviewers for your pull request. - You can choose the second option to open your pull request as a draft. Opening your pull request as a draft will not assign any reviewers, but will @@ -208,7 +209,7 @@ by navigating to the `Checks` tab of the pull request. .. warning:: - If you bypass the perscribed pull request process and generate a pull request + If you bypass the prescribed pull request process and generate a pull request from an edit you made using GitHub's editor UI, you must manually add your signature to the commit message when the commit is generated in the UI. diff --git a/docs/source/glossary.rst b/docs/source/glossary.rst index 2a644d3b39c..1d958f417a4 100644 --- a/docs/source/glossary.rst +++ b/docs/source/glossary.rst @@ -148,8 +148,10 @@ Concurrency Control Version Check Concurrency Control Version Check is a method of keeping ledger state in sync across peers on a channel. Peers execute transactions in parallel, and before committing -to the ledger, peers check whether the state read at the time the transaction was executed -has been modified. If the data read for the transaction has changed between execution time and +to the ledger, peers check whether the state read at the time the transaction +was executed has been modified in a new block that was in-flight at time of execution +or in a prior transaction in the same block. +If the data read for the transaction has changed between execution time and commit time, then a Concurrency Control Version Check violation has occurred, and the transaction is marked as invalid on the ledger and values are not updated in the state database. @@ -590,9 +592,8 @@ cryptographic algorithms for signatures, logging frameworks and state stores, are easily swapped in and out of the SDK. The SDK provides APIs for transaction processing, membership services, node traversal and event handling. -Currently, the two officially supported SDKs are for Node.js and Java, while two -more -- Python and Go -- are not yet official but can still be downloaded -and tested. +Currently, there are three officially supported SDKs -- for Node.js, Java, and Go. While the Python SDK +is not yet official but can still be downloaded and tested. .. _Smart-Contract: diff --git a/docs/source/hsm.md b/docs/source/hsm.md index 5a11d3dcff5..78a056a443f 100644 --- a/docs/source/hsm.md +++ b/docs/source/hsm.md @@ -62,6 +62,27 @@ bccsp: By default, when private keys are generated using the HSM, the private key is mutable, meaning PKCS11 private key attributes can be changed after the key is generated. Setting `Immutable` to `true` means that the private key attributes cannot be altered after key generation. Before you configure immutability by setting `Immutable: true`, ensure that PKCS11 object copy is supported by the HSM. +If you are using AWS HSM there is an additional step required: + +- Add the parameter, `AltID` to the `pkcs11` section of the `bccsp` block. When AWS HSM is being used, this parameter is used to assign a unique value for the Subject Key Identifier (SKI). Create a long secure string outside of Fabric and assign it to the `AltID` parameter. For example: + + ``` + ############################################################################# + # BCCSP (BlockChain Crypto Service Provider) section is used to select which + # crypto library implementation to use + ############################################################################# + bccsp: + default: PKCS11 + pkcs11: + Library: /etc/hyperledger/fabric/libsofthsm2.so + Pin: 71811222 + Label: fabric + hash: SHA2 + security: 256 + Immutable: false + AltID: 4AMfmFMtLY6B6vN3q4SQtCkCQ6UY5f6gUF3rDRE4wqD4YDUrunuZbmZpVk8zszkt86yenPBUGE2aCQCZmQFcmnj3UaxyLzfTMjCnapAe3 + ``` + You can also use environment variables to override the relevant fields of the configuration file. If you are connecting to softhsm2 using the Fabric CA server, you could set the following environment variables or directly set the corresponding values in the CA server config file: ``` diff --git a/docs/source/install.rst b/docs/source/install.rst index 1ddab370c10..8c49aa00688 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -47,12 +47,12 @@ the binaries and images. .. note:: If you want a specific release, pass a version identifier for Fabric and Fabric-CA docker images. The command below demonstrates how to download the latest production releases - - **Fabric v2.2.1** and **Fabric CA v1.4.9** + **Fabric v2.3.3** and **Fabric CA v1.5.2** .. code:: bash curl -sSL https://bit.ly/2ysbOFE | bash -s -- - curl -sSL https://bit.ly/2ysbOFE | bash -s -- 2.2.1 1.4.9 + curl -sSL https://bit.ly/2ysbOFE | bash -s -- 2.3.3 1.5.2 .. note:: If you get an error running the above curl command, you may have too old a version of curl that does not handle @@ -79,6 +79,7 @@ created above. It retrieves the following platform-specific binaries: * ``discover``, * ``idemixgen`` * ``orderer``, + * ``osnadmin``, * ``peer``, * ``fabric-ca-client``, * ``fabric-ca-server`` diff --git a/docs/source/international_languages.md b/docs/source/international_languages.md index 14bc5c3057a..4a0e551ec88 100644 --- a/docs/source/international_languages.md +++ b/docs/source/international_languages.md @@ -30,7 +30,7 @@ create a new language workgroup. It's much easier to translate, maintain, and manage a language repository if you collaborate with other translators. Start this process by adding a new workgroup to the [list of international -workgroups](https://wiki.hyperledger.org/display/fabric/International+groups), +workgroups](https://wiki.hyperledger.org/display/I18N/International+groups), using one of the existing workgroup pages as an exemplar. Document how your workgroup will collaborate; meetings, chat and mailing lists @@ -148,7 +148,7 @@ Before your new language can be published to the documentation website, you must translate the following topics. These topics help users and translators of your new language get started. -* [Fabric front page](https://hyperledger-fabric.readthedocs.io/zh_CN/{BRANCH_DOC}/) +* [Fabric front page](https://hyperledger-fabric.readthedocs.io/en/{BRANCH_DOC}/) This is your advert! Thanks to you, users can now see that the documentation is available in their language. It might not be complete yet, but its clear diff --git a/docs/source/kafka_raft_migration.md b/docs/source/kafka_raft_migration.md index 99cb2e00dc8..4e63eb4840d 100644 --- a/docs/source/kafka_raft_migration.md +++ b/docs/source/kafka_raft_migration.md @@ -185,7 +185,7 @@ channel, and elect a leader on each channel. **Note**: Since the Raft-based ordering service uses client and server TLS certificates for authentication between orderer nodes, **additional configurations** are required before you start them again, see -[Section: Local Configuration](./raft_configuration.md#local-configuration) for more details. +[Section: Local Configuration](raft_configuration.html#local-configuration) for more details. After restart process finished, make sure to **validate** that a leader has been elected on each channel by inspecting the node logs (you can see diff --git a/docs/source/key_concepts.rst b/docs/source/key_concepts.rst index 8af2c5384d3..370cba3e46e 100644 --- a/docs/source/key_concepts.rst +++ b/docs/source/key_concepts.rst @@ -17,4 +17,5 @@ Key Concepts chaincode_lifecycle.md private-data/private-data.md capabilities_concept.md + security_model.md usecases diff --git a/docs/source/logging-control.rst b/docs/source/logging-control.rst index 7c3f8e30d4d..03af6f4c587 100644 --- a/docs/source/logging-control.rst +++ b/docs/source/logging-control.rst @@ -74,6 +74,12 @@ syntax. Examples of specifications: warning:msp,gossip=warning:chaincode=info - Default WARNING; Override for msp, gossip, and chaincode chaincode=info:msp,gossip=warning:warning - Same as above +.. note:: Logging specification terms are separated by a colon. If a term does not include a specific logger, for example `info:` then it is applied as the default log level + across all loggers on the component. The string `info:dockercontroller,endorser,chaincode,chaincode.platform=debug` sets + the default log level to `INFO` for all loggers and then the `dockercontroller`, `endorser`, `chaincode`, and + `chaincode.platform` loggers are set to `DEBUG`. The order of the terms does not matter. In the examples above, + the second and third options produce the same result although the order of the terms is reversed. + Logging format -------------- @@ -119,4 +125,3 @@ chaincode container using standard commands for your container platform. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ - diff --git a/docs/source/membership/membership.md b/docs/source/membership/membership.md index e43fb77832c..0c0cafb582e 100644 --- a/docs/source/membership/membership.md +++ b/docs/source/membership/membership.md @@ -1,5 +1,7 @@ # Membership Service Provider (MSP) +Note: this topic describes a network that does not use a "system channel", a channel that the ordering service is bootstrapped with and the ordering service exclusively controls. Since the release of v2.3, using system channel is now considered the legacy process as compared to the process to [Create a channel](../create_channel/create_channel_participation.html) without a system channel. For a version of this topic that includes information about the system channel, check out [Membership Service Provider (MSP)](https://hyperledger-fabric.readthedocs.io/en/release-2.2/membership/membership.html) from the v2.2 documentation. + ## Why do I need an MSP? Because Fabric is a permissioned network, blockchain participants need a way to prove their identity to the rest of the network in order to transact on the network. If you've read through the documentation on [Identity](../identity/identity.html) @@ -15,13 +17,13 @@ This ability to turn verifiable identities into roles is fundamental to the way *Identities are similar to your credit cards that are used to prove you can pay. The MSP is similar to the list of accepted credit cards.* -Consider a consortium of banks that operate a blockchain network. Each bank operates peer and ordering nodes, and the peers endorse transactions submitted to the network. However, each bank would also have departments and account holders. The account holders would belong to each organization, but would not run nodes on the network. They would only interact with the system from their mobile or web application. So how does the network recognize and differentiate these identities? A CA was used to create the identities, but like the card example, those identities can't just be issued, they need to be recognized by the network. MSPs are used to define the organizations that are trusted by the network members. MSPs are also the mechanism that provide members with a set of roles and permissions within the network. Because the MSPs defining these organizations are known to the members of a network, they can then be used to validate that network entities that attempt to perform actions are allowed to. +Consider a group of banks that operate a blockchain network. Each bank operates peer and ordering nodes, and the peers endorse transactions submitted to the network. However, each bank would also have departments and account holders. The account holders would belong to each organization, but would not run nodes on the network. They would only interact with the system from their mobile or web application. So how does the network recognize and differentiate these identities? A CA was used to create the identities, but like the card example, those identities can't just be issued, they need to be recognized by the network. MSPs are used to define the organizations that are trusted by the network members. MSPs are also the mechanism that provide members with a set of roles and permissions within the network. Because the MSPs defining these organizations are known to the members of a network, they can then be used to validate that network entities that attempt to perform actions are allowed to. Finally, consider if you want to join an _existing_ network, you need a way to turn your identity into something that is recognized by the network. The MSP is the mechanism that enables you to participate on a permissioned blockchain network. To transact on a Fabric network a member needs to: 1. Have an identity issued by a CA that is trusted by the network. 2. Become a member of an _organization_ that is recognized and approved by the network members. The MSP is how the identity is linked to the membership of an organization. Membership is achieved by adding the member's public key (also known as certificate, signing cert, or signcert) to the organization’s MSP. -3. Add the MSP to either a [consortium](../glossary.html#consortium) on the network or a channel. +3. Add the MSP to a channel. 4. Ensure the MSP is included in the [policy](../policies/policies.html) definitions on the network. ## What is an MSP? @@ -65,16 +67,15 @@ The channel MSP defines the _relationship_ between the identities of channel mem **Every organization participating in a channel must have an MSP defined for it**. In fact, it is recommended that there is a one-to-one mapping between organizations and MSPs. The MSP defines which members are empowered to act on behalf of the organization. This includes configuration of the MSP itself as well as approving administrative tasks that the organization has role, such as adding new members to a channel. If all network members were part of a single organization or MSP, data privacy is sacrificed. Multiple organizations facilitate privacy by segregating ledger data to only channel members. If more granularity is required within an organization, the organization can be further divided into organizational units (OUs) which we describe in more detail later in this topic. -**The system channel MSP includes the MSPs of all the organizations that participate in an ordering service.** An ordering service will likely include ordering nodes from multiple organizations and collectively these organizations run the ordering service, most importantly managing the consortium of organizations and the default policies that are inherited by the application channels. +**The channel MSP includes the MSPs of all the organizations on a channel.** This includes not just "peer organizations", which own peers and invoke chaincodes, but the organizations that own and run the ordering service. -**Local MSPs are only defined on the file system of the node or user** to which they apply. Therefore, physically and logically there is only one local MSP per -node. However, as channel MSPs are available to all nodes in the channel, they are logically defined once in the channel configuration. However, **a channel MSP is also instantiated on the file system of every node in the channel and kept synchronized via consensus**. So while there is a copy of each channel MSP on the local file system of every node, logically a channel MSP resides on and is maintained by the channel or the network. +**Local MSPs are only defined on the file system of the node or user** to which they apply. Therefore, physically and logically there is only one local MSP per node. However, as channel MSPs are available to all nodes in the channel, they are logically defined once in the channel configuration. However, **a channel MSP is also instantiated on the file system of every node in the channel and kept synchronized via consensus**. So while there is a copy of each channel MSP on the local file system of every node, logically a channel MSP resides on and is maintained by the channel or the network. The following diagram illustrates how local and channel MSPs coexist on the network: ![MSP3](./membership.diagram.2.png) -*The MSPs for the peer and orderer are local, whereas the MSPs for a channel (including the network configuration channel, also known as the system channel) are global, shared across all participants of that channel. In this figure, the network system channel is administered by ORG1, but another application channel can be managed by ORG1 and ORG2. The peer is a member of and managed by ORG2, whereas ORG1 manages the orderer of the figure. ORG1 trusts identities from RCA1, whereas ORG2 trusts identities from RCA2. It is important to note that these are administration identities, reflecting who can administer these components. So while ORG1 administers the network, ORG2.MSP does exist in the network definition.* +*In this figure, ORG1, owns the ordering node joined to the channel. The MSPs related to ORG1, the local MSP of the node and the global MSP that formally represents ORG1 on the channel, have been created by RCA1, the CA for ORG1. The peer organization, ORG2, also has a local MSP for its peer and another global MSP that represents ORG2 on the channel. Both ORG1 and ORG2 are channel members, and manage the channel in their areas of administration, and trust identities created by each other's CA. Note that in a production scenario, it is likely that there will be several peer organizations who collaborate in the administration of the channel, and potentially more than one orderer organization as well.* ## What role does an organization play in an MSP? @@ -130,7 +131,7 @@ The resulting ROLE and OU attributes are visible inside the X.509 signing certif **Note:** For Channel MSPs, just because an actor has the role of an administrator it doesn't mean that they can administer particular resources. The actual power a given identity has with respect to administering the system is determined by the _policies_ that manage system resources. For example, a channel policy might specify that `ORG1-MANUFACTURING` administrators, meaning identities with a role of `admin` and a Node OU of `ORG1-MANUFACTURING`, have the rights to add new organizations to the channel, whereas the `ORG1-DISTRIBUTION` administrators have no such rights. -Finally, OUs could be used by different organizations in a consortium to distinguish each other. But in such cases, the different organizations have to use the same Root CAs and Intermediate CAs for their chain of trust, and assign the OU field to identify members of each organization. When every organization has the same CA or chain of trust, this makes the system more centralized than what might be desirable and therefore deserves careful consideration on a blockchain network. +Finally, OUs could be used by different organizations to distinguish each other. But in such cases, the different organizations have to use the same Root CAs and Intermediate CAs for their chain of trust, and assign the OU field to identify members of each organization. When every organization has the same CA or chain of trust, this makes the system more centralized than what might be desirable and therefore deserves careful consideration on a blockchain network. ## MSP Structure diff --git a/docs/source/msp.rst b/docs/source/msp.rst index c9749be807e..bfc8f8c87e5 100644 --- a/docs/source/msp.rst +++ b/docs/source/msp.rst @@ -1,26 +1,13 @@ Membership Service Providers (MSP) ================================== -The document serves to provide details on the setup and best practices for MSPs. +For a conceptual overview of the Membership Service Provider (MSP), check out +:doc:`membership/membership`. -Membership Service Provider (MSP) is a Hyperledger Fabric component that offers -an abstraction of membership operations. +This topic elaborates on the setup of the MSP implementation supported by +Hyperledger Fabric and discusses best practices concerning its use. -In particular, an MSP abstracts away all cryptographic mechanisms and protocols -behind issuing certificates, validating certificates, and user authentication. -An MSP may define its own notion of identity, and the rules by which those -identities are governed (identity validation) and authenticated (signature -generation and verification). - -A Hyperledger Fabric blockchain network can be governed by one or more MSPs. -This provides modularity of membership operations, and interoperability -across different membership standards and architectures. - -In the rest of this document we elaborate on the setup of the MSP -implementation supported by Hyperledger Fabric, and discuss best practices -concerning its use. - -MSP Configuration +MSP configuration ----------------- To setup an instance of the MSP, its configuration needs to be specified @@ -34,8 +21,7 @@ in the network (e.g. ``msp1``, ``org2``, and ``org3.divA``). This is the name un which membership rules of an MSP representing a consortium, organization or organization division is to be referenced in a channel. This is also referred to as the *MSP Identifier* or *MSP ID*. MSP Identifiers are required to be unique per MSP -instance. For example, shall two MSP instances with the same identifier be -detected at the system channel genesis, orderer setup will fail. +instance. In the case of the default MSP implementation, a set of parameters need to be specified to allow for identity (certificate) validation and signature @@ -98,54 +84,10 @@ How to generate MSP certificates and their signing keys? certificates and keys. Please note that Hyperledger Fabric does not support RSA key and certificates. -Alternatively, the ``cryptogen`` tool can be used as described in -:doc:`getting_started`. - -`Hyperledger Fabric CA `_ -can also be used to generate the keys and certificates needed to configure an MSP. - -MSP setup on the peer & orderer side ------------------------------------- - -To set up a local MSP (for either a peer or an orderer), the administrator -should create a folder (e.g. ``$MY_PATH/mspconfig``) that contains six subfolders -and a file: - -1. a folder ``admincerts`` to include PEM files each corresponding to an - administrator certificate -2. a folder ``cacerts`` to include PEM files each corresponding to a root - CA's certificate -3. (optional) a folder ``intermediatecerts`` to include PEM files each - corresponding to an intermediate CA's certificate -4. (optional) a file ``config.yaml`` to configure the supported Organizational Units - and identity classifications (see respective sections below). -5. (optional) a folder ``crls`` to include the considered CRLs -6. a folder ``keystore`` to include a PEM file with the node's signing key; - we emphasise that currently RSA keys are not supported -7. a folder ``signcerts`` to include a PEM file with the node's X.509 - certificate -8. (optional) a folder ``tlscacerts`` to include PEM files each corresponding to a TLS root - CA's certificate -9. (optional) a folder ``tlsintermediatecerts`` to include PEM files each - corresponding to an intermediate TLS CA's certificate - -In the configuration file of the node (core.yaml file for the peer, and -orderer.yaml for the orderer), one needs to specify the path to the -mspconfig folder, and the MSP Identifier of the node's MSP. The path to the -mspconfig folder is expected to be relative to FABRIC_CFG_PATH and is provided -as the value of parameter ``mspConfigPath`` for the peer, and ``LocalMSPDir`` -for the orderer. The identifier of the node's MSP is provided as a value of -parameter ``localMspId`` for the peer and ``LocalMSPID`` for the orderer. -These variables can be overridden via the environment using the CORE prefix for -peer (e.g. CORE_PEER_LOCALMSPID) and the ORDERER prefix for the orderer (e.g. -ORDERER_GENERAL_LOCALMSPID). Notice that for the orderer setup, one needs to -generate, and provide to the orderer the genesis block of the system channel. -The MSP configuration needs of this block are detailed in the next section. - -*Reconfiguration* of a "local" MSP is only possible manually, and requires that -the peer or orderer process is restarted. In subsequent releases we aim to -offer online/dynamic reconfiguration (i.e. without requiring to stop the node -by using a node managed system chaincode). +The Hyperledger Fabric CA can also be used to generate the keys and certificates +needed to configure an MSP. Check out +`Registering and enrolling identities with a CA `_ +for more information about how to generate MSPs for nodes and organizations. Organizational Units -------------------- @@ -169,7 +111,7 @@ The ``Certificate`` field refers to the CA or intermediate CA certificate path under which identities, having that specific OU, should be validated. The path is relative to the MSP root folder and cannot be empty. -Identity Classification +Identity classification ----------------------- The default MSP implementation allows organizations to further classify identities into clients, @@ -241,38 +183,14 @@ without the certificate being stored in the ``admincerts`` folder of the MSP. In with the admin OU. Certificates in the ``admincerts`` folder will still grant the role of administrator to their bearer, provided that they possess the client or admin OU. -Channel MSP setup ------------------ +Adding MSPs to channels +----------------------- + +For information about how to add MSPs to a channel (including the decision of +whether to bootstrap ordering nodes with a system channel genesis block), check +out :doc:`create_channel/create_channel_overview`. -At the genesis of the system, verification parameters of all the MSPs that -appear in the network need to be specified, and included in the system -channel's genesis block. Recall that MSP verification parameters consist of -the MSP identifier, the root of trust certificates, intermediate CA and admin -certificates, as well as OU specifications and CRLs. -The system genesis block is provided to the orderers at their setup phase, -and allows them to authenticate channel creation requests. Orderers would -reject the system genesis block, if the latter includes two MSPs with the same -identifier, and consequently the bootstrapping of the network would fail. - -For application channels, the verification components of only the MSPs that -govern a channel need to reside in the channel's genesis block. We emphasize -that it is **the responsibility of the application** to ensure that correct -MSP configuration information is included in the genesis blocks (or the -most recent configuration block) of a channel prior to instructing one or -more of their peers to join the channel. - -When bootstrapping a channel with the help of the configtxgen tool, one can -configure the channel MSPs by including the verification parameters of MSP -in the mspconfig folder, and setting that path in the relevant section in -``configtx.yaml``. - -*Reconfiguration* of an MSP on the channel, including announcements of the -certificate revocation lists associated to the CAs of that MSP is achieved -through the creation of a ``config_update`` object by the owner of one of the -administrator certificates of the MSP. The client application managed by the -admin would then announce this update to the channels in which this MSP appears. - -Best Practices +Best practices -------------- In this section we elaborate on best practices for MSP diff --git a/docs/source/network/network.diagram.1.png b/docs/source/network/network.diagram.1.png index 3a9b4e0f1d8..36243405847 100644 Binary files a/docs/source/network/network.diagram.1.png and b/docs/source/network/network.diagram.1.png differ diff --git a/docs/source/network/network.diagram.10.png b/docs/source/network/network.diagram.10.png deleted file mode 100644 index cc08da9c317..00000000000 Binary files a/docs/source/network/network.diagram.10.png and /dev/null differ diff --git a/docs/source/network/network.diagram.11.png b/docs/source/network/network.diagram.11.png deleted file mode 100644 index b971e529860..00000000000 Binary files a/docs/source/network/network.diagram.11.png and /dev/null differ diff --git a/docs/source/network/network.diagram.12.png b/docs/source/network/network.diagram.12.png deleted file mode 100644 index d742d504c92..00000000000 Binary files a/docs/source/network/network.diagram.12.png and /dev/null differ diff --git a/docs/source/network/network.diagram.14.png b/docs/source/network/network.diagram.14.png deleted file mode 100644 index d57249065f3..00000000000 Binary files a/docs/source/network/network.diagram.14.png and /dev/null differ diff --git a/docs/source/network/network.diagram.15.png b/docs/source/network/network.diagram.15.png deleted file mode 100644 index f18b56b7bd2..00000000000 Binary files a/docs/source/network/network.diagram.15.png and /dev/null differ diff --git a/docs/source/network/network.diagram.2.1.png b/docs/source/network/network.diagram.2.1.png deleted file mode 100644 index 092ae2253d2..00000000000 Binary files a/docs/source/network/network.diagram.2.1.png and /dev/null differ diff --git a/docs/source/network/network.diagram.2.png b/docs/source/network/network.diagram.2.png index bdfcfebc4d7..a4d8b1321f6 100644 Binary files a/docs/source/network/network.diagram.2.png and b/docs/source/network/network.diagram.2.png differ diff --git a/docs/source/network/network.diagram.3.png b/docs/source/network/network.diagram.3.png index 187841e4ca7..c0cbdc144de 100644 Binary files a/docs/source/network/network.diagram.3.png and b/docs/source/network/network.diagram.3.png differ diff --git a/docs/source/network/network.diagram.4.png b/docs/source/network/network.diagram.4.png index c1521ecb435..a55ffc6cff1 100644 Binary files a/docs/source/network/network.diagram.4.png and b/docs/source/network/network.diagram.4.png differ diff --git a/docs/source/network/network.diagram.5.png b/docs/source/network/network.diagram.5.png index d6a824f8394..9f305706801 100644 Binary files a/docs/source/network/network.diagram.5.png and b/docs/source/network/network.diagram.5.png differ diff --git a/docs/source/network/network.diagram.6.png b/docs/source/network/network.diagram.6.png index 16e7bf1787e..ba30cb7b602 100644 Binary files a/docs/source/network/network.diagram.6.png and b/docs/source/network/network.diagram.6.png differ diff --git a/docs/source/network/network.diagram.7.png b/docs/source/network/network.diagram.7.png index df2cfe40cf1..6fa5eedaa86 100644 Binary files a/docs/source/network/network.diagram.7.png and b/docs/source/network/network.diagram.7.png differ diff --git a/docs/source/network/network.diagram.8.png b/docs/source/network/network.diagram.8.png index f4079be0561..2636313c2c8 100644 Binary files a/docs/source/network/network.diagram.8.png and b/docs/source/network/network.diagram.8.png differ diff --git a/docs/source/network/network.diagram.9.png b/docs/source/network/network.diagram.9.png deleted file mode 100644 index de26b7cc231..00000000000 Binary files a/docs/source/network/network.diagram.9.png and /dev/null differ diff --git a/docs/source/network/network.md b/docs/source/network/network.md index 5938b523934..631ae1c235c 100644 --- a/docs/source/network/network.md +++ b/docs/source/network/network.md @@ -1,1030 +1,165 @@ -# Blockchain network - -This topic will describe, **at a conceptual level**, how Hyperledger Fabric -allows organizations to collaborate in the formation of blockchain networks. If -you're an architect, administrator or developer, you can use this topic to get a -solid understanding of the major structure and process components in a -Hyperledger Fabric blockchain network. This topic will use a manageable worked -example that introduces all of the major components in a blockchain network. - -After reading this topic and understanding the concept of policies, you will -have a solid understanding of the decisions that organizations need to make to -establish the policies that control a deployed Hyperledger Fabric network. -You'll also understand how organizations manage network evolution using -declarative policies -- a key feature of Hyperledger Fabric. In a nutshell, -you'll understand the major technical components of Hyperledger Fabric and the -decisions organizations need to make about them. +# How Fabric networks are structured + +This topic will describe, **at a conceptual level**, how Hyperledger Fabric allows organizations to collaborate in the formation of blockchain networks. If you're an architect, administrator or developer, you can use this topic to get a solid understanding of the major structure and process components in a Hyperledger Fabric blockchain network. This topic will use a manageable example that introduces all of the major components in a blockchain network. + +After reading this topic and understanding the concept of policies, you will have a solid understanding of the decisions that organizations need to make to establish the policies that control a deployed Hyperledger Fabric network. You'll also understand how organizations manage network evolution using declarative policies -- a key feature of Hyperledger Fabric. In a nutshell, you'll understand the major technical components of Hyperledger Fabric and the decisions organizations need to make about them. + +Note: in this topic, we'll refer to the structure of a network that does not have a "system channel", a channel run by the ordering service that ordering nodes are bootstrapped with. For a version of this topic that does use the system channel, check out [Blockchain network](https://hyperledger-fabric.readthedocs.io/en/release-2.2/network/network.html). ## What is a blockchain network? -A blockchain network is a technical infrastructure that provides ledger and -smart contract (chaincode) services to applications. Primarily, smart contracts -are used to generate transactions which are subsequently distributed to every -peer node in the network where they are immutably recorded on their copy of the -ledger. The users of applications might be end users using client applications -or blockchain network administrators. - -In most cases, multiple [organizations](../glossary.html#organization) come -together as a [consortium](../glossary.html#consortium) to form the network and -their permissions are determined by a set of [policies](../glossary.html#policy) -that are agreed by the consortium when the network is originally configured. -Moreover, network policies can change over time subject to the agreement of the -organizations in the consortium, as we'll discover when we discuss the concept -of *modification policy*. +A blockchain network is a technical infrastructure that provides ledger and smart contract (which are packaged as part of a "chaincode") services to applications. Primarily, smart contracts are used to generate transactions which are subsequently distributed to every peer node in the network where they are immutably recorded on their copy of the ledger. The users of applications might be end users using client applications or blockchain network administrators. + +In most cases, multiple [organizations](../glossary.html#organization) come together to form a **channel** on which transactions are invoked on chaincodes and where permissions are determined by a set of [policies](../policies/policies.html) that are agreed to when the channel is originally configured. Moreover, policies can change over time subject to the agreement of the organizations. + +**In this topic, we'll refer to both the "network" and the "channel". In Hyperledger Fabric, these terms are effectively synonymous, as they both refer collectively to the organizations, components, policies, and processes that govern the interactions between organizations within a defined structure.** ## The sample network -Before we start, let's show you what we're aiming at! Here's a diagram -representing the **final state** of our sample network. - -Don't worry that this might look complicated! As we go through this topic, we -will build up the network piece by piece, so that you see how the organizations -R1, R2, R3 and R4 contribute infrastructure to the network to help form it. This -infrastructure implements the blockchain network, and it is governed by policies -agreed by the organizations who form the network -- for example, who can add new -organizations. You'll discover how applications consume the ledger and smart -contract services provided by the blockchain network. - -![network.structure](./network.diagram.1.png) - -*Four organizations, R1, R2, R3 and R4 have jointly decided, and written into an -agreement, that they will set up and exploit a Hyperledger Fabric -network. R4 has been assigned to be the network initiator -- it has been given -the power to set up the initial version of the network. R4 has no intention to -perform business transactions on the network. R1 and R2 have a need for a -private communications within the overall network, as do R2 and R3. -Organization R1 has a client application that can perform business transactions -within channel C1. Organization R2 has a client application that can do similar -work both in channel C1 and C2. Organization R3 has a client application that -can do this on channel C2. Peer node P1 maintains a copy of the ledger L1 -associated with C1. Peer node P2 maintains a copy of the ledger L1 associated -with C1 and a copy of ledger L2 associated with C2. Peer node P3 maintains a -copy of the ledger L2 associated with C2. The network is governed according to -policy rules specified in network configuration NC4, the network is under the -control of organizations R1 and R4. Channel C1 is governed according to the -policy rules specified in channel configuration CC1; the channel is under the -control of organizations R1 and R2. Channel C2 is governed according to the -policy rules specified in channel configuration CC2; the channel is under the -control of organizations R2 and R3. There is an ordering service O4 that -services as a network administration point for N, and uses the system channel. -The ordering service also supports application channels C1 and C2, for the -purposes of transaction ordering into blocks for distribution. Each of the four -organizations has a preferred Certificate Authority.* - -## Creating the Network - -Let's start at the beginning by creating the basis for the network: - -![network.creation](./network.diagram.2.png) - -*The network is formed when an orderer is started. In our example network, N, -the ordering service comprising a single node, O4, is configured according to a -network configuration NC4, which gives administrative rights to organization -R4. At the network level, Certificate Authority CA4 is used to dispense -identities to the administrators and network nodes of the R4 organization.* - -We can see that the first thing that defines a **network, N,** is an **ordering -service, O4**. It's helpful to think of the ordering service as the initial -administration point for the network. As agreed beforehand, O4 is initially -configured and started by an administrator in organization R4, and hosted in R4. -The configuration NC4 contains the policies that describe the starting set of -administrative capabilities for the network. Initially this is set to only give -R4 rights over the network. This will change, as we'll see later, but for now R4 -is the only member of the network. +Before we start, let's show you what we're aiming at! Here's a diagram representing the **final state** of our sample network. + +It might look complicated right now, but as we go through this topic, we will build up the network piece by piece, so that you see how the organizations R1, R2 and R0 contribute infrastructure to the network to help form it. This infrastructure implements the blockchain network, and it is governed by policies agreed by the organizations who form the network -- for example, who can add new organizations. You'll discover how applications consume the ledger and smart contract services provided by the blockchain network. + +![network.1](./network.diagram.1.png) + +Three organizations, R1, R2, and R0 have jointly decided that they will establish a network. This network has a configuration, CC1, which all of the organizations have agreed to and which lists the definition of the organizations as well as the policies which define the roles each organization will play on the channel. + +On this channel, R1 and R2 will join peers, named P1 and P2, to the channel, C1, while R0 owns O, the ordering service of the channel. All of these nodes will contain a copy of the ledger (L1) of the channel, which is where transactions are recorded. Note that the copy of the ledger kept by the ordering service does not contain a [state database](../glossary.html#state-database). R1 and R2 will also interact with the channel through the applications A1 and A2, which they own. All three organizations have a Certificate Authority that has generated the necessary certificates for the nodes, admins, organizations definitions, and applications of its organization. + +## Creating the network + +The first step in creating a network or a channel is to agree to and then define its configuration: + +![network.2](./network.diagram.2.png) + +The channel configuration, CC1, has been agreed to by organizations R1, R2, and R0 and is contained in a block known as a "configuration block" that is, typically, created by the `configtxgen` tool from a `configtx.yaml` file. While it is possible for one organization to create this a channel unilaterally and then invite other organizations to it (we'll explore this in [Adding an organization to an existing channel](#adding-an-organization-to-an-existing-channel)), for now we'll assume that the organizations wish to collaborate on the channel from the beginning. + +Once a configuration block exists, a channel can be said to **logically exist**, even though no components are physically joined to it. This configuration block contains a record of the organizations that can join components and interact on the channel, as well as the **policies** that define the structure for how decisions are made and specific outcomes are reached. While the peers and applications are critical actors in the network, their behavior in a channel is dictated more by the channel configuration policy than any other factor. For more information about the policies and how they are defined in a channel configuration, check out [Policies](../policies/policies.html). + +The definitions of these organizations, and the identities of their admins, must be created by a Certificate Authority (CA) associated with each organization. In our example, the organizations R1, R2, and R0 have had their certifications and organization definition created by CA1, CA2, and CA0, respectively. For information about how to create a CA, check out [Planning for a CA](https://hyperledger-fabric-ca.readthedocs.io/en/latest/deployguide/ca-deploy-topology.html). After the CA has been created, check out [Registering and enrolling identities with a CA](https://hyperledger-fabric-ca.readthedocs.io/en/latest/deployguide/use_CA.html) for information about how to define an organization and create identities for admins and nodes. + +For more information about using `configtxgen` to create a configuration block, check out [Using configtx.yaml to build a channel configuration](../create_channel/create_channel_config.html). ### Certificate Authorities -You can also see a Certificate Authority, CA4, which is used to issue -certificates to administrators and network nodes. CA4 plays a key role in our -network because it dispenses X.509 certificates that can be used to identify -components as belonging to organization R4. Certificates issued by CAs -can also be used to sign transactions to indicate that an organization endorses -the transaction result -- a precondition of it being accepted onto the -ledger. Let's examine these two aspects of a CA in a little more detail. - -Firstly, different components of the blockchain network use certificates to -identify themselves to each other as being from a particular organization. -That's why there is usually more than one CA supporting a blockchain network -- -different organizations often use different CAs. We're going to use four CAs in -our network; one for each organization. Indeed, CAs are so important that -Hyperledger Fabric provides you with a built-in one (called *Fabric-CA*) to help -you get going, though in practice, organizations will choose to use their own -CA. - -The mapping of certificates to member organizations is achieved by via -a structure called a -[Membership Services Provider (MSP)](../glossary.html#membership-services). -Network configuration NC4 uses a named -MSP to identify the properties of certificates dispensed by CA4 which associate -certificate holders with organization R4. NC4 can then use this MSP name in -policies to grant actors from R4 particular -rights over network resources. An example of such a policy is to identify the -administrators in R4 who can add new member organizations to the network. We -don't show MSPs on these diagrams, as they would just clutter them up, but they -are very important. - -Secondly, we'll see later how certificates issued by CAs are at the heart of the -[transaction](../glossary.html#transaction) generation and validation process. -Specifically, X.509 certificates are used in client application -[transaction proposals](../glossary.html#proposal) and smart contract -[transaction responses](../glossary.html#response) to digitally sign -[transactions](../glossary.html#transaction). Subsequently the network nodes -who host copies of the ledger verify that transaction signatures are valid -before accepting transactions onto the ledger. - -Let's recap the basic structure of our example blockchain network. There's a -resource, the network N, accessed by a set of users defined by a Certificate -Authority CA4, who have a set of rights over the resources in the network N as -described by policies contained inside a network configuration NC4. All of this -is made real when we configure and start the ordering service node O4. - -## Adding Network Administrators - -NC4 was initially configured to only allow R4 users administrative rights over -the network. In this next phase, we are going to allow organization R1 users to -administer the network. Let's see how the network evolves: - -![network.admins](./network.diagram.2.1.png) - -*Organization R4 updates the network configuration to make organization R1 an -administrator too. After this point R1 and R4 have equal rights over the -network configuration.* - -We see the addition of a new organization R1 as an administrator -- R1 and R4 -now have equal rights over the network. We can also see that certificate -authority CA1 has been added -- it can be used to identify users from the R1 -organization. After this point, users from both R1 and R4 can administer the -network. - -Although the orderer node, O4, is running on R4's infrastructure, R1 has shared -administrative rights over it, as long as it can gain network access. It means -that R1 or R4 could update the network configuration NC4 to allow the R2 -organization a subset of network operations. In this way, even though R4 is -running the ordering service, and R1 has full administrative rights over it, R2 -has limited rights to create new consortia. - -In its simplest form, the ordering service is a single node in the network, and -that's what you can see in the example. Ordering services are usually -multi-node, and can be configured to have different nodes in different -organizations. For example, we might run O4 in R4 and connect it to O2, a -separate orderer node in organization R1. In this way, we would have a -multi-site, multi-organization administration structure. - -We'll discuss the ordering service a little [later in this topic](#the-ordering-service), -but for now just think of the ordering service as an administration point which -provides different organizations controlled access to the network. - -## Defining a Consortium - -Although the network can now be administered by R1 and R4, there is very little -that can be done. The first thing we need to do is define a consortium. This -word literally means "a group with a shared destiny", so it's an appropriate -choice for a set of organizations in a blockchain network. - -Let's see how a consortium is defined: - -![network.consortium](./network.diagram.3.png) - -*A network administrator defines a consortium X1 that contains two members, -the organizations R1 and R2. This consortium definition is stored in the -network configuration NC4, and will be used at the next stage of network -development. CA1 and CA2 are the respective Certificate Authorities for these -organizations.* - -Because of the way NC4 is configured, only R1 or R4 can create new consortia. -This diagram shows the addition of a new consortium, X1, which defines R1 and R2 -as its constituting organizations. We can also see that CA2 has been added to -identify users from R2. Note that a consortium can have any number of -organizational members -- we have just shown two as it is the simplest -configuration. - -Why are consortia important? We can see that a consortium defines the set of -organizations in the network who share a need to **transact** with one another -- -in this case R1 and R2. It really makes sense to group organizations together if -they have a common goal, and that's exactly what's happening. - -The network, although started by a single organization, is now controlled by a -larger set of organizations. We could have started it this way, with R1, R2 and -R4 having shared control, but this build up makes it easier to understand. - -We're now going to use consortium X1 to create a really important part of a -Hyperledger Fabric blockchain -- **a channel**. - -## Creating a channel for a consortium - -So let's create this key part of the Fabric blockchain network -- **a channel**. -A channel is a primary communications mechanism by which the members of a -consortium can communicate with each other. There can be multiple channels in a -network, but for now, we'll start with one. - -Let's see how the first channel has been added to the network: - -![network.channel](./network.diagram.4.png) - -*A channel C1 has been created for R1 and R2 using the consortium definition X1. -The channel is governed by a channel configuration CC1, completely separate to -the network configuration. CC1 is managed by R1 and R2 who have equal rights -over C1. R4 has no rights in CC1 whatsoever.* - -The channel C1 provides a private communications mechanism for the consortium -X1. We can see channel C1 has been connected to the ordering service O4 but that -nothing else is attached to it. In the next stage of network development, we're -going to connect components such as client applications and peer nodes. But at -this point, a channel represents the **potential** for future connectivity. - -Even though channel C1 is a part of the network N, it is quite distinguishable -from it. Also notice that organizations R3 and R4 are not in this channel -- it -is for transaction processing between R1 and R2. In the previous step, we saw -how R4 could grant R1 permission to create new consortia. It's helpful to -mention that R4 **also** allowed R1 to create channels! In this diagram, it -could have been organization R1 or R4 who created a channel C1. Again, note -that a channel can have any number of organizations connected to it -- we've -shown two as it's the simplest configuration. - -Again, notice how channel C1 has a completely separate configuration, CC1, to -the network configuration NC4. CC1 contains the policies that govern the -rights that R1 and R2 have over the channel C1 -- and as we've seen, R3 and -R4 have no permissions in this channel. R3 and R4 can only interact with C1 if -they are added by R1 or R2 to the appropriate policy in the channel -configuration CC1. An example is defining who can add a new organization to the -channel. Specifically, note that R4 cannot add itself to the channel C1 -- it -must, and can only, be authorized by R1 or R2. - -Why are channels so important? Channels are useful because they provide a -mechanism for private communications and private data between the members of a -consortium. Channels provide privacy from other channels, and from the network. -Hyperledger Fabric is powerful in this regard, as it allows organizations to -share infrastructure and keep it private at the same time. There's no -contradiction here -- different consortia within the network will have a need -for different information and processes to be appropriately shared, and channels -provide an efficient mechanism to do this. Channels provide an efficient -sharing of infrastructure while maintaining data and communications privacy. - -We can also see that once a channel has been created, it is in a very real sense -"free from the network". It is only organizations that are explicitly specified -in a channel configuration that have any control over it, from this time forward -into the future. Likewise, any updates to network configuration NC4 from this -time onwards will have no direct effect on channel configuration CC1; for -example if consortia definition X1 is changed, it will not affect the members of -channel C1. Channels are therefore useful because they allow private -communications between the organizations constituting the channel. Moreover, the -data in a channel is completely isolated from the rest of the network, including -other channels. - -As an aside, there is also a special **system channel** defined for use by the -ordering service. It behaves in exactly the same way as a regular channel, -which are sometimes called **application channels** for this reason. We don't -normally need to worry about this channel, but we'll discuss a little bit more -about it [later in this topic](#the-ordering-service). - -## Peers and Ledgers - -Let's now start to use the channel to connect the blockchain network and the -organizational components together. In the next stage of network development, we -can see that our network N has just acquired two new components, namely a peer -node P1 and a ledger instance, L1. - -![network.peersledger](./network.diagram.5.png) - -*A peer node P1 has joined the channel C1. P1 physically hosts a copy of the -ledger L1. P1 and O4 can communicate with each other using channel C1.* - -Peer nodes are the network components where copies of the blockchain ledger are -hosted! At last, we're starting to see some recognizable blockchain components! -P1's purpose in the network is purely to host a copy of the ledger L1 for others -to access. We can think of L1 as being **physically hosted** on P1, but -**logically hosted** on the channel C1. We'll see this idea more clearly when we -add more peers to the channel. - -A key part of a P1's configuration is an X.509 identity issued by CA1 which -associates P1 with organization R1. When R1 administrator takes the -action of joining peer P1 to channel C1, and the peer starts pulling blocks from -the orderer O4, the orderer uses the channel configuration -CC1 to determine P1's permissions on this channel. For example, policy in CC1 -determines whether P1 (or the organization R1) can read and/or write on the -channel C1. - -Notice how peers are joined to channels by the organizations that own them, and -though we've only added one peer, we'll see how there can be multiple peer -nodes on multiple channels within the network. We'll see the different roles -that peers can take on a little later. - -## Applications and Smart Contract chaincode - -Now that the channel C1 has a ledger on it, we can start connecting client -applications to consume some of the services provided by workhorse of the -ledger, the peer! - -Notice how the network has grown: - -![network.appsmartcontract](./network.diagram.6.png) - -*A smart contract S5 has been installed onto P1. Client application A1 in -organization R1 can use S5 to access the ledger via peer node P1. A1, P1 and -O4 are all joined to channel C1, i.e. they can all make use of the -communication facilities provided by that channel.* - -In the next stage of network development, we can see that client application A1 -can use channel C1 to connect to specific network resources -- in this case A1 -can connect to both peer node P1 and orderer node O4. Again, see how channels -are central to the communication between network and organization components. -Just like peers and orderers, a client application will have an identity that -associates it with an organization. In our example, client application A1 is -associated with organization R1; and although it is outside the Fabric -blockchain network, it is connected to it via the channel C1. - -It might now appear that A1 can access the ledger L1 directly via P1, but in -fact, all access is managed via a special program called a smart contract -chaincode, S5. Think of S5 as defining all the common access patterns to the -ledger; S5 provides a well-defined set of ways by which the ledger L1 can -be queried or updated. In short, client application A1 has to go through smart -contract S5 to get to ledger L1! - -Smart contracts can be created by application developers in each organization to -implement a business process shared by the consortium members. Smart contracts -are used to help generate transactions which can be subsequently distributed to -every node in the network. We'll discuss this idea a little later; it'll be -easier to understand when the network is bigger. For now, the important thing to -understand is that to get to this point two operations must have been performed -on the smart contract; it must have been **installed** on peers, and then -**defined** on a channel. - -Hyperledger Fabric users often use the terms **smart contract** and -**chaincode** interchangeably. In general, a smart contract defines the -**transaction logic** that controls the lifecycle of a business object contained -in the world state. It is then packaged into a chaincode which is then deployed -to a blockchain network. Think of smart contracts as governing transactions, -whereas chaincode governs how smart contracts are packaged for deployment. - -### Installing a chaincode package - -After a smart contract S5 has been developed, an administrator in organization -R1 must create a chaincode package and [install](../glossary.html#install) it -onto peer node P1. This is a straightforward operation; once completed, P1 has -full knowledge of S5. Specifically, P1 can see the **implementation** logic of -S5 -- the program code that it uses to access the ledger L1. We contrast this to -the S5 **interface** which merely describes the inputs and outputs of S5, -without regard to its implementation. - -When an organization has multiple peers in a channel, it can choose the peers -upon which it installs smart contracts; it does not need to install a smart -contract on every peer. - -### Defining a chaincode - -Although a chaincode is installed on the peers of individual organizations, it -is governed and operated in the scope of a channel. Each organization needs to -approve a **chaincode definition**, a set of parameters that establish how a -chaincode will be used on a channel. An organization must approve a chaincode -definition in order to use the installed smart contract to query the ledger -and endorse transactions. In our example, which only has a single peer node P1, -an administrator in organization R1 must approve a chaincode definition for S5. - -A sufficient number of organizations need to approve a chaincode definition (A -majority, by default) before the chaincode definition can be committed to the -channel and used to interact with the channel ledger. Because the channel only -has one member, the administrator of R1 can commit the chaincode definition of -S5 to the channel C1. Once the definition has been committed, S5 can now be -[invoked](../glossary.html#invoke) by client application A1! - -Note that although every component on the channel can now access S5, they are -not able to see its program logic. This remains private to those nodes who have -installed it; in our example that means P1. Conceptually this means that it's -the smart contract **interface** that is defined and committed to a channel, in -contrast to the smart contract **implementation** that is installed. To reinforce -this idea; installing a smart contract shows how we think of it being -**physically hosted** on a peer, whereas a smart contract that has been defined -on a channel shows how we consider it **logically hosted** by the channel. - -### Endorsement policy - -The most important piece of information supplied within the chaincode definition -is the [endorsement policy](../glossary.html#endorsement-policy). It describes -which organizations must approve transactions before they will be accepted by other -organizations onto their copy of the ledger. In our sample network, transactions -can only be accepted onto ledger L1 if R1 or R2 endorse them. - -Committing the chaincode definition to the channel places the endorsement policy -on the channel ledger; it enables it to be accessed by any member of the channel. -You can read more about endorsement policies in the [transaction flow topic](../txflow.html). - -### Invoking a smart contract - -Once a smart contract has been installed on a peer node and defined on a -channel it can be [invoked](../glossary.html#invoke) by a client application. -Client applications do this by sending transaction proposals to peers owned by -the organizations specified by the smart contract endorsement policy. The -transaction proposal serves as input to the smart contract, which uses it to -generate an endorsed transaction response, which is returned by the peer node to -the client application. - -It's these transactions responses that are packaged together with the -transaction proposal to form a fully endorsed transaction, which can be -distributed to the entire network. We'll look at this in more detail later For -now, it's enough to understand how applications invoke smart contracts to -generate endorsed transactions. - -By this stage in network development we can see that organization R1 is fully -participating in the network. Its applications -- starting with A1 -- can access -the ledger L1 via smart contract S5, to generate transactions that will be -endorsed by R1, and therefore accepted onto the ledger because they conform to -the endorsement policy. - -## Network completed - -Recall that our objective was to create a channel for consortium X1 -- -organizations R1 and R2. This next phase of network development sees -organization R2 add its infrastructure to the network. - -Let's see how the network has evolved: - -![network.grow](./network.diagram.7.png) - -*The network has grown through the addition of infrastructure from -organization R2. Specifically, R2 has added peer node P2, which hosts a copy of -ledger L1, and chaincode S5. R2 approves the same chaincode definition as R1. -P2 has also joined channel C1, as has application A2. A2 and P2 are identified -using certificates from CA2. All of this means that both applications A1 and A2 -can invoke S5 on C1 either using peer node P1 or P2.* - -We can see that organization R2 has added a peer node, P2, on channel C1. P2 -also hosts a copy of the ledger L1 and smart contract S5. We can see that R2 has -also added client application A2 which can connect to the network via channel -C1. To achieve this, an administrator in organization R2 has created peer node -P2 and joined it to channel C1, in the same way as an administrator in R1. The -administrator also has to approve the same chaincode definition as R1. - -We have created our first operational network! At this stage in network -development, we have a channel in which organizations R1 and R2 can fully -transact with each other. Specifically, this means that applications A1 and A2 -can generate transactions using smart contract S5 and ledger L1 on channel C1. - -### Generating and accepting transactions - -In contrast to peer nodes, which always host a copy of the ledger, we see that -there are two different kinds of peer nodes; those which host smart contracts -and those which do not. In our network, every peer hosts a copy of the smart -contract, but in larger networks, there will be many more peer nodes that do not -host a copy of the smart contract. A peer can only *run* a smart contract if it -is installed on it, but it can *know* about the interface of a smart contract by -being connected to a channel. - -You should not think of peer nodes which do not have smart contracts installed -as being somehow inferior. It's more the case that peer nodes with smart -contracts have a special power -- to help **generate** transactions. Note that -all peer nodes can **validate** and subsequently **accept** or **reject** -transactions onto their copy of the ledger L1. However, only peer nodes with a -smart contract installed can take part in the process of transaction -**endorsement** which is central to the generation of valid transactions. - -We don't need to worry about the exact details of how transactions are -generated, distributed and accepted in this topic -- it is sufficient to -understand that we have a blockchain network where organizations R1 and R2 can -share information and processes as ledger-captured transactions. We'll learn a -lot more about transactions, ledgers, smart contracts in other topics. - -### Types of peers - -In Hyperledger Fabric, while all peers are the same, they can assume multiple -roles depending on how the network is configured. We now have enough -understanding of a typical network topology to describe these roles. - - * [*Committing peer*](../glossary.html#commitment). Every peer node in a - channel is a committing peer. It receives blocks of generated transactions, - which are subsequently validated before they are committed to the peer - node's copy of the ledger as an append operation. - - * [*Endorsing peer*](../glossary.html#endorsement). Every peer with a smart - contract *can* be an endorsing peer if it has a smart contract installed. - However, to actually *be* an endorsing peer, the smart contract on the peer - must be used by a client application to generate a digitally signed - transaction response. The term *endorsing peer* is an explicit reference to - this fact. - - An endorsement policy for a smart contract identifies the - organizations whose peer should digitally sign a generated transaction - before it can be accepted onto a committing peer's copy of the ledger. - -These are the two major types of peer; there are two other roles a peer can -adopt: - - * [*Leader peer*](../glossary.html#leading-peer). When an organization has - multiple peers in a channel, a leader peer is a node which takes - responsibility for distributing transactions from the orderer to the other - committing peers in the organization. A peer can choose to participate in - static or dynamic leadership selection. - - It is helpful, therefore to think of two sets of peers from leadership - perspective -- those that have static leader selection, and those with - dynamic leader selection. For the static set, zero or more peers can be - configured as leaders. For the dynamic set, one peer will be elected leader - by the set. Moreover, in the dynamic set, if a leader peer fails, then the - remaining peers will re-elect a leader. - - It means that an organization's peers can have one or more leaders connected - to the ordering service. This can help to improve resilience and scalability - in large networks which process high volumes of transactions. - - * [*Anchor peer*](../glossary.html#anchor-peer). If a peer needs to - communicate with a peer in another organization, then it can use one of the - **anchor peers** defined in the channel configuration for that organization. - An organization can have zero or more anchor peers defined for it, and an - anchor peer can help with many different cross-organization communication - scenarios. - -Note that a peer can be a committing peer, endorsing peer, leader peer and -anchor peer all at the same time! Only the anchor peer is optional -- for all -practical purposes there will always be a leader peer and at least one -endorsing peer and at least one committing peer. - -### Adding organizations and peers to the channel - -When R2 joins the channel, the organization must install smart contract S5 -onto its peer node, P2. That's obvious -- if applications A1 or A2 wish to use -S5 on peer node P2 to generate transactions, it must first be present; -installation is the mechanism by which this happens. At this point, peer node P2 -has a physical copy of the smart contract and the ledger; like P1, it can both -generate and accept transactions onto its copy of ledger L1. - -R2 must approve the same chaincode definition as was approved by R1 in order to -use smart contract S5. Because the chaincode definition has already been -committed to the channel by organization R1, R2 can use the chaincode as soon as -the organization approves the chaincode definition and installs the chaincode -package. The commit transaction only needs to happen once. A new organization -can use the chaincode as soon as they approve the chaincode parameters agreed to -by other members of the channel. Because the approval of a chaincode definition -occurs at the organization level, R2 can approve the chaincode definition once -and join multiple peers to the channel with the chaincode package installed. -However, if R2 wanted to change the chaincode definition, both R1 and R2 would -need to approve a new definition for their organization, and then one of the -organizations would need to commit the definition to the channel. - -In our network, we can see that channel C1 connects two client applications, two -peer nodes and an ordering service. Since there is only one channel, there is -only one **logical** ledger with which these components interact. Peer nodes P1 -and P2 have identical copies of ledger L1. Copies of smart contract S5 will -usually be identically implemented using the same programming language, but -if not, they must be semantically equivalent. - -We can see that the careful addition of peers to the network can help support -increased throughput, stability, and resilience. For example, more peers in a -network will allow more applications to connect to it; and multiple peers in an -organization will provide extra resilience in the case of planned or unplanned -outages. - -It all means that it is possible to configure sophisticated topologies which -support a variety of operational goals -- there is no theoretical limit to how -big a network can get. Moreover, the technical mechanism by which peers within -an individual organization efficiently discover and communicate with each other -- -the [gossip protocol](../gossip.html#gossip-protocol) -- will accommodate a -large number of peer nodes in support of such topologies. - -The careful use of network and channel policies allow even large networks to be -well-governed. Organizations are free to add peer nodes to the network so long -as they conform to the policies agreed by the network. Network and channel -policies create the balance between autonomy and control which characterizes a -de-centralized network. - -## Simplifying the visual vocabulary - -We’re now going to simplify the visual vocabulary used to represent our sample -blockchain network. As the size of the network grows, the lines initially used -to help us understand channels will become cumbersome. Imagine how complicated -our diagram would be if we added another peer or client application, or another -channel? - -That's what we're going to do in a minute, so before we do, let's simplify the -visual vocabulary. Here's a simplified representation of the network we've -developed so far: - -![network.vocabulary](./network.diagram.8.png) - -*The diagram shows the facts relating to channel C1 in the network N as follows: -Client applications A1 and A2 can use channel C1 for communication with peers -P1 and P2, and orderer O4. Peer nodes P1 and P2 can use the communication -services of channel C1. Ordering service O4 can make use of the communication -services of channel C1. Channel configuration CC1 applies to channel C1.* - -Note that the network diagram has been simplified by replacing channel lines -with connection points, shown as blue circles which include the channel number. -No information has been lost. This representation is more scalable because it -eliminates crossing lines. This allows us to more clearly represent larger -networks. We've achieved this simplification by focusing on the connection -points between components and a channel, rather than the channel itself. - -## Adding another consortium definition - -In this next phase of network development, we introduce organization R3. We're -going to give organizations R2 and R3 a separate application channel which -allows them to transact with each other. This application channel will be -completely separate to that previously defined, so that R2 and R3 transactions -can be kept private to them. - -Let's return to the network level and define a new consortium, X2, for R2 and -R3: - -![network.consortium2](./network.diagram.9.png) - -*A network administrator from organization R1 or R4 has added a new consortium -definition, X2, which includes organizations R2 and R3. This will be used to -define a new channel for X2.* - -Notice that the network now has two consortia defined: X1 for organizations R1 -and R2 and X2 for organizations R2 and R3. Consortium X2 has been introduced in -order to be able to create a new channel for R2 and R3. - -A new channel can only be created by those organizations specifically identified -in the network configuration policy, NC4, as having the appropriate rights to do -so, i.e. R1 or R4. This is an example of a policy which separates organizations -that can manage resources at the network level versus those who can manage -resources at the channel level. Seeing these policies at work helps us -understand why Hyperledger Fabric has a sophisticated **tiered** policy -structure. - -In practice, consortium definition X2 has been added to the network -configuration NC4. We discuss the exact mechanics of this operation elsewhere in -the documentation. - -## Adding a new channel - -Let's now use this new consortium definition, X2, to create a new channel, C2. -To help reinforce your understanding of the simpler channel notation, we've used -both visual styles -- channel C1 is represented with blue circular end points, -whereas channel C2 is represented with red connecting lines: - -![network.channel2](./network.diagram.10.png) - -*A new channel C2 has been created for R2 and R3 using consortium definition X2. -The channel has a channel configuration CC2, completely separate to the network -configuration NC4, and the channel configuration CC1. Channel C2 is managed by -R2 and R3 who have equal rights over C2 as defined by a policy in CC2. R1 and -R4 have no rights defined in CC2 whatsoever.* - -The channel C2 provides a private communications mechanism for the consortium -X2. Again, notice how organizations united in a consortium are what form -channels. The channel configuration CC2 now contains the policies that govern -channel resources, assigning management rights to organizations R2 and R3 over -channel C2. It is managed exclusively by R2 and R3; R1 and R4 have no power in -channel C2. For example, channel configuration CC2 can subsequently be updated -to add organizations to support network growth, but this can only be done by R2 -or R3. - -Note how the channel configurations CC1 and CC2 remain completely separate from -each other, and completely separate from the network configuration, NC4. Again -we're seeing the de-centralized nature of a Hyperledger Fabric network; once -channel C2 has been created, it is managed by organizations R2 and R3 -independently to other network elements. Channel policies always remain separate -from each other and can only be changed by the organizations authorized to do so -in the channel. - -As the network and channels evolve, so will the network and channel -configurations. There is a process by which this is accomplished in a controlled -manner -- involving configuration transactions which capture the change to these -configurations. Every configuration change results in a new configuration block -transaction being generated, and [later in this topic](#the-ordering-serivce), -we'll see how these blocks are validated and accepted to create updated network -and channel configurations respectively. - -### Network and channel configurations - -Throughout our sample network, we see the importance of network and channel -configurations. These configurations are important because they encapsulate the -**policies** agreed by the network members, which provide a shared reference for -controlling access to network resources. Network and channel configurations also -contain **facts** about the network and channel composition, such as the name of -consortia and its organizations. - -For example, when the network is first formed using the ordering service node -O4, its behaviour is governed by the network configuration NC4. The initial -configuration of NC4 only contains policies that permit organization R4 to -manage network resources. NC4 is subsequently updated to also allow R1 to manage -network resources. Once this change is made, any administrator from organization -R1 or R4 that connects to O4 will have network management rights because that is -what the policy in the network configuration NC4 permits. Internally, each node -in the ordering service records each channel in the network configuration, so -that there is a record of each channel created, at the network level. - -It means that although ordering service node O4 is the actor that created -consortia X1 and X2 and channels C1 and C2, the **intelligence** of the network -is contained in the network configuration NC4 that O4 is obeying. As long as O4 -behaves as a good actor, and correctly implements the policies defined in NC4 -whenever it is dealing with network resources, our network will behave as all -organizations have agreed. In many ways NC4 can be considered more important -than O4 because, ultimately, it controls network access. - -The same principles apply for channel configurations with respect to peers. In -our network, P1 and P2 are likewise good actors. When peer nodes P1 and P2 are -interacting with client applications A1 or A2 they are each using the policies -defined within channel configuration CC1 to control access to the channel C1 -resources. - -For example, if A1 wants to access the smart contract chaincode S5 on peer nodes -P1 or P2, each peer node uses its copy of CC1 to determine the operations that -A1 can perform. For example, A1 may be permitted to read or write data from the -ledger L1 according to policies defined in CC1. We'll see later the same pattern -for actors in channel and its channel configuration CC2. Again, we can see that -while the peers and applications are critical actors in the network, their -behaviour in a channel is dictated more by the channel configuration policy than -any other factor. - -Finally, it is helpful to understand how network and channel configurations are -physically realized. We can see that network and channel configurations are -logically singular -- there is one for the network, and one for each channel. -This is important; every component that accesses the network or the channel must -have a shared understanding of the permissions granted to different -organizations. - -Even though there is logically a single configuration, it is actually replicated -and kept consistent by every node that forms the network or channel. For -example, in our network peer nodes P1 and P2 both have a copy of channel -configuration CC1, and by the time the network is fully complete, peer nodes P2 -and P3 will both have a copy of channel configuration CC2. Similarly ordering -service node O4 has a copy of the network configuration, but in a [multi-node -configuration](#the-ordering-service), every ordering service node will have its -own copy of the network configuration. - -Both network and channel configurations are kept consistent using the same -blockchain technology that is used for user transactions -- but for -**configuration** transactions. To change a network or channel configuration, an -administrator must submit a configuration transaction to change the network or -channel configuration. It must be signed by the organizations identified in the -appropriate policy as being responsible for configuration change. This policy is -called the **mod_policy** and we'll [discuss it later](#changing-policy). - -Indeed, the ordering service nodes operate a mini-blockchain, connected via the -**system channel** we mentioned earlier. Using the system channel ordering -service nodes distribute network configuration transactions. These transactions -are used to co-operatively maintain a consistent copy of the network -configuration at each ordering service node. In a similar way, peer nodes in an -**application channel** can distribute channel configuration transactions. -Likewise, these transactions are used to maintain a consistent copy of the -channel configuration at each peer node. - -This balance between objects that are logically singular, by being physically -distributed is a common pattern in Hyperledger Fabric. Objects like network -configurations, that are logically single, turn out to be physically replicated -among a set of ordering services nodes for example. We also see it with channel -configurations, ledgers, and to some extent smart contracts which are installed -in multiple places but whose interfaces exist logically at the channel level. -It's a pattern you see repeated time and again in Hyperledger Fabric, and -enables Hyperledger Fabric to be both de-centralized and yet manageable at the -same time. - -## Adding another peer - -Now that organization R3 is able to fully participate in channel C2, let's add -its infrastructure components to the channel. Rather than do this one component -at a time, we're going to add a peer, its local copy of a ledger, a smart -contract and a client application all at once! - -Let's see the network with organization R3's components added: - -![network.peer2](./network.diagram.11.png) - -*The diagram shows the facts relating to channels C1 and C2 in the network N as -follows: Client applications A1 and A2 can use channel C1 for communication -with peers P1 and P2, and ordering service O4; client applications A3 can use -channel C2 for communication with peer P3 and ordering service O4. Ordering -service O4 can make use of the communication services of channels C1 and C2. -Channel configuration CC1 applies to channel C1, CC2 applies to channel C2.* - -First of all, notice that because peer node P3 is connected to channel C2, it -has a **different** ledger -- L2 -- to those peer nodes using channel C1. The -ledger L2 is effectively scoped to channel C2. The ledger L1 is completely -separate; it is scoped to channel C1. This makes sense -- the purpose of the -channel C2 is to provide private communications between the members of the -consortium X2, and the ledger L2 is the private store for their transactions. - -In a similar way, the smart contract S6, installed on peer node P3, and defined -on channel C2, is used to provide controlled access to ledger L2. Application A3 -can now use channel C2 to invoke the services provided by smart contract S6 to -generate transactions that can be accepted onto every copy of the ledger L2 in -the network. - -At this point in time, we have a single network that has two completely separate -channels defined within it. These channels provide independently managed -facilities for organizations to transact with each other. Again, this is -de-centralization at work; we have a balance between control and autonomy. This -is achieved through policies which are applied to channels which are controlled -by, and affect, different organizations. - -## Joining a peer to multiple channels - -In this final stage of network development, let's return our focus to -organization R2. We can exploit the fact that R2 is a member of both consortia -X1 and X2 by joining it to multiple channels: - -![network.multichannel](./network.diagram.12.png) - -*The diagram shows the facts relating to channels C1 and C2 in the network N as -follows: Client applications A1 can use channel C1 for communication with peers -P1 and P2, and ordering service O4; client application A2 can use channel C1 -for communication with peers P1 and P2 and channel C2 for communication with -peers P2 and P3 and ordering service O4; client application A3 can use channel -C2 for communication with peer P3 and P2 and ordering service O4. Ordering service O4 -can make use of the communication services of channels C1 and C2. Channel -configuration CC1 applies to channel C1, CC2 applies to channel C2.* - -We can see that R2 is a special organization in the network, because it is the -only organization that is a member of two application channels! It is able to -transact with organization R1 on channel C1, while at the same time it can also -transact with organization R3 on a different channel, C2. - -Notice how peer node P2 has smart contract S5 installed for channel C1 and smart -contract S6 installed for channel C2. Peer node P2 is a full member of both -channels at the same time via different smart contracts for different ledgers. - -This is a very powerful concept -- channels provide both a mechanism for the -separation of organizations, and a mechanism for collaboration between -organizations. All the while, this infrastructure is provided by, and shared -between, a set of independent organizations. - -It is also important to note that peer node P2's behaviour is controlled very -differently depending upon the channel in which it is transacting. Specifically, -the policies contained in channel configuration CC1 dictate the operations -available to P2 when it is transacting in channel C1, whereas it is the policies -in channel configuration CC2 that control P2's behaviour in channel C2. - -Again, this is desirable -- R2 and R1 agreed the rules for channel C1, whereas -R2 and R3 agreed the rules for channel C2. These rules were captured in the -respective channel policies -- they can and must be used by every -component in a channel to enforce correct behaviour, as agreed. - -Similarly, we can see that client application A2 is now able to transact on -channels C1 and C2. And likewise, it too will be governed by the policies in -the appropriate channel configurations. As an aside, note that client -application A2 and peer node P2 are using a mixed visual vocabulary -- both -lines and connections. You can see that they are equivalent; they are visual -synonyms. - -### The ordering service - -The observant reader may notice that the ordering service node appears to be a -centralized component; it was used to create the network initially, and connects -to every channel in the network. Even though we added R1 and R4 to the network -configuration policy NC4 which controls the orderer, the node was running on -R4's infrastructure. In a world of de-centralization, this looks wrong! - -Don't worry! Our example network showed the simplest ordering service -configuration to help you understand the idea of a network administration point. -In fact, the ordering service can itself too be completely de-centralized! We -mentioned earlier that an ordering service could be comprised of many individual -nodes owned by different organizations, so let's see how that would be done in -our sample network. - -Let's have a look at a more realistic ordering service node configuration: - -![network.finalnetwork2](./network.diagram.15.png) - -*A multi-organization ordering service. The ordering service comprises ordering -service nodes O1 and O4. O1 is provided by organization R1 and node O4 is -provided by organization R4. The network configuration NC4 defines network -resource permissions for actors from both organizations R1 and R4.* - -We can see that this ordering service completely de-centralized -- it runs in -organization R1 and it runs in organization R4. The network configuration -policy, NC4, permits R1 and R4 equal rights over network resources. Client -applications and peer nodes from organizations R1 and R4 can manage network -resources by connecting to either node O1 or node O4, because both nodes behave -the same way, as defined by the policies in network configuration NC4. In -practice, actors from a particular organization *tend* to use infrastructure -provided by their home organization, but that's certainly not always the case. - -### De-centralized transaction distribution - -As well as being the management point for the network, the ordering service also -provides another key facility -- it is the distribution point for transactions. -The ordering service is the component which gathers endorsed transactions -from applications and orders them into transaction blocks, which are -subsequently distributed to every peer node in the channel. At each of these -committing peers, transactions are recorded, whether valid or invalid, and their -local copy of the ledger updated appropriately. - -Notice how the ordering service node O4 performs a very different role for the -channel C1 than it does for the network N. When acting at the channel level, -O4's role is to gather transactions and distribute blocks inside channel C1. It -does this according to the policies defined in channel configuration CC1. In -contrast, when acting at the network level, O4's role is to provide a management -point for network resources according to the policies defined in network -configuration NC4. Notice again how these roles are defined by different -policies within the channel and network configurations respectively. This should -reinforce to you the importance of declarative policy based configuration in -Hyperledger Fabric. Policies both define, and are used to control, the agreed -behaviours by each and every member of a consortium. - -We can see that the ordering service, like the other components in Hyperledger -Fabric, is a fully de-centralized component. Whether acting as a network -management point, or as a distributor of blocks in a channel, its nodes can be -distributed as required throughout the multiple organizations in a network. - -### Changing policy - -Throughout our exploration of the sample network, we've seen the importance of -the policies to control the behaviour of the actors in the system. We've only -discussed a few of the available policies, but there are many that can be -declaratively defined to control every aspect of behaviour. These individual -policies are discussed elsewhere in the documentation. - -Most importantly of all, Hyperledger Fabric provides a uniquely powerful policy -that allows network and channel administrators to manage policy change itself! -The underlying philosophy is that policy change is a constant, whether it occurs -within or between organizations, or whether it is imposed by external -regulators. For example, new organizations may join a channel, or existing -organizations may have their permissions increased or decreased. Let's -investigate a little more how change policy is implemented in Hyperledger -Fabric. - -The key point of understanding is that policy change is managed by a -policy within the policy itself. The **modification policy**, or -**mod_policy** for short, is a first class policy within a network or channel -configuration that manages change. Let's give two brief examples of how we've -**already** used mod_policy to manage change in our network! - -The first example was when the network was initially set up. At this time, only -organization R4 was allowed to manage the network. In practice, this was -achieved by making R4 the only organization defined in the network configuration -NC4 with permissions to network resources. Moreover, the mod_policy for NC4 -only mentioned organization R4 -- only R4 was allowed to change this -configuration. - -We then evolved the network N to also allow organization R1 to administer the -network. R4 did this by adding R1 to the policies for channel creation and -consortium creation. Because of this change, R1 was able to define the -consortia X1 and X2, and create the channels C1 and C2. R1 had equal -administrative rights over the channel and consortium policies in the network -configuration. - -R4 however, could grant even more power over the network configuration to R1! R4 -could add R1 to the mod_policy such that R1 would be able to manage change of -the network policy too. - -This second power is much more powerful than the first, because R1 now has -**full control** over the network configuration NC4! This means that R1 can, in -principle remove R4's management rights from the network. In practice, R4 would -configure the mod_policy such that R4 would need to also approve the change, or -that all organizations in the mod_policy would have to approve the change. -There's lots of flexibility to make the mod_policy as sophisticated as it needs -to be to support whatever change process is required. - -This is mod_policy at work -- it has allowed the graceful evolution of a basic -configuration into a sophisticated one. All the time this has occurred with the -agreement of all organization involved. The mod_policy behaves like every other -policy inside a network or channel configuration; it defines a set of -organizations that are allowed to change the mod_policy itself. - -We've only scratched the surface of the power of policies and mod_policy in -particular in this subsection. It is discussed at much more length in the policy -topic, but for now let's return to our finished network! - -## Network fully formed - -Let's recap what our network looks like using a consistent visual vocabulary. -We've re-organized it slightly using our more compact visual syntax, because it -better accommodates larger topologies: - -![network.finalnetwork2](./network.diagram.14.png) - -*In this diagram we see that the Fabric blockchain network consists of two -application channels and one ordering channel. The organizations R1 and R4 are -responsible for the ordering channel, R1 and R2 are responsible for the blue -application channel while R2 and R3 are responsible for the red application -channel. Client applications A1 is an element of organization R1, and CA1 is -it's certificate authority. Note that peer P2 of organization R2 can use the -communication facilities of the blue and the red application channel. Each -application channel has its own channel configuration, in this case CC1 and -CC2. The channel configuration of the system channel is part of the network -configuration, NC4.* - -We're at the end of our conceptual journey to build a sample Hyperledger Fabric -blockchain network. We've created a four organization network with two channels -and three peer nodes, with two smart contracts and an ordering service. It is -supported by four certificate authorities. It provides ledger and smart contract -services to three client applications, who can interact with it via the two -channels. Take a moment to look through the details of the network in the -diagram, and feel free to read back through the topic to reinforce your -knowledge, or go to a more detailed topic. - -### Summary of network components - -Here's a quick summary of the network components we've discussed: - -* [Ledger](../glossary.html#ledger). One per channel. Comprised of the - [Blockchain](../glossary.html#block) and - the [World state](../glossary.html#world-state) -* [Smart contract](../glossary.html#smart-contract) (aka chaincode) -* [Peer nodes](../glossary.html#peer) -* [Ordering service](../glossary.html#ordering-service) -* [Channel](../glossary.html#channel) -* [Certificate Authority](../glossary.html#hyperledger-fabric-ca) - -## Network summary - -In this topic, we've seen how different organizations share their infrastructure -to provide an integrated Hyperledger Fabric blockchain network. We've seen how -the collective infrastructure can be organized into channels that provide -private communications mechanisms that are independently managed. We've seen -how actors such as client applications, administrators, peers and orderers are -identified as being from different organizations by their use of certificates -from their respective certificate authorities. And in turn, we've seen the -importance of policy to define the agreed permissions that these organizational -actors have over network and channel resources. +Certificate Authorities play a key role in the network because they dispense X.509 certificates that can be used to identify components as belonging to an organization. Certificates issued by CAs can also be used to sign transactions to indicate that an organization endorses the transaction result -- a precondition of it being accepted onto the ledger. Let's examine these two aspects of a CA in a little more detail. + +Firstly, different components of the blockchain network use certificates to identify themselves to each other as being from a particular organization. That's why there is usually more than one CA supporting a blockchain network -- different organizations often use different CAs. We're going to use three CAs in our channel; one for each organization. Indeed, CAs are so important that Hyperledger Fabric provides you with a built-in one (called the *Fabric-CA*) to help you get going, though in practice, organizations will choose to use their own CA. + +The mapping of certificates to member organizations is achieved via a structure called a [Membership Services Provider (MSP)](../membership/membership.html), which defines an organization by creating an MSP which is tied to a root CA certificate to identify that components and identities were created by the root CA. The channel configuration can then assign certain rights and permissions to the organization through a policy (which will give a particular organization, such as R1, the right to add new organizations to the channel). We don't show MSPs on these diagrams, as they would clutter them up, but because they define organizations, they are very important. + +Secondly, we'll see later how certificates issued by CAs are at the heart of the [transaction](../glossary.html#transaction) generation and validation process. Specifically, X.509 certificates are used in client application [transaction proposals](../glossary.html#proposal) and smart contract [transaction responses](../glossary.html#response) to digitally sign [transactions](../glossary.html#transaction). Subsequently the network nodes who host copies of the ledger verify that transaction signatures are valid before accepting transactions onto the ledger. + +## Join nodes to the channel + +Peers are a fundamental element of the network because they host ledgers and chaincode (which contain smart contracts) and are therefore one of the physical points at which organizations that transact on a channel connect to the channel (the other being an application). A peer can belong to as many channels as an organizations deems appropriate (depending on factors like the processing limitations of the peer pod and data residency rules that exist in a particular country). For more information about peers, check out [Peers](../peers/peers.html). + +The ordering service, on the other hand, gathers endorsed transactions from applications and orders them into transaction blocks, which are subsequently distributed to every peer node in the channel. At each of these committing peers, transactions are recorded and the local copy of the ledger updated appropriately. An ordering service is unique to a particular channel, with the nodes servicing that channel also known as a "consenter set". Even if a node (or group of nodes) services multiple channels, each channel's ordering service is considered to be a distinct instance of the ordering service. For more information about the ordering service, check out [The Ordering Service](../orderer/ordering_service.html). + +**For information about how to create peer and ordering nodes, check out [Deploying a production network](../deployment_guide_overview.html).** + +Because R1, R2, and R0 are listed in the channel configuration, they are allowed to join peers (in the case of R1 and R2) or ordering nodes (in the case of R0) to the channel. + +![network.3](./network.diagram.3.png) + +R1's peer, P1, and R2's peer, P2, along with R0's ordering service, O, join the channel through the process described in the [Create a channel](../create_channel/create_channel_participation.html) tutorial. Note that while only one ordering node, 1, is joined to this channel, in a production scenario, an ordering service should contain at least three nodes. For the purposes of this topic, however, it is more important to conceptualize the interactions of the ordering service and the other components of the network than it is to understand how the needs of high availability impact configuration decisions. The nodes belonging to each organization have x.509 certificates created for them by the Certificate Authority associated with that organization. P1's certificates are created by CA1, P2's certificates are created by CA2, and so on. + +Every node in the channel stores a copy of the ledger of the channel, L1, which will be updated with each new block (note that the ordering service only contains the blockchain portion of a ledger and not the [state database](../glossary.html#state-database)). Because of this, we can think of L1 as being **physically hosted** on P1, but **logically hosted** on the channel C1. The best practice is for R1 and R2 to make their peers, P1 and P2, [anchor peers](../glossary.html#anchor-peer), as this will bootstrap communication on the network between R1 and R2. + +After the ordering service has been joined to the channel, it is possible to propose and commit updates to the channel configuration, but little else. Next, you must install, approve, and commit a chaincode on a channel. + +## Install, approve, and commit a chaincode + +Chaincodes are installed on peers, and then defined and committed on a channel: + +![network.4](./network.diagram.4.png) + +In Fabric, the business logic that defines how peer organizations interact with the ledger (for example, a transaction that changes the ownership of an asset), is contained in a smart contract. The structure that contains the smart contract, called chaincode, is installed on the relevant peers, approved by the relevant peer organizations, and committed on the channel. In this way, you can consider a chaincode to be **physically hosted** on a peer but **logically hosted** on a channel. In our example, the chaincode, S5, is installed on every peer, even though organizations are not required to install every chaincode. Note that the ordering service does not have the chaincode installed on it, as ordering nodes do not typically propose transactions. The process of installing, approving, and committing a chaincode is known as the "lifecycle" of the chaincode. For more information, check out [Fabric chaincode lifecycle](../chaincode_lifecycle.html). + +The most important piece of information supplied within the chaincode definition is the [endorsement policy](../glossary.html#endorsement-policy). It describes which organizations must endorse transactions before they will be accepted by other organizations onto their copy of the ledger. An endorsement policy can be set to any combination of members in a channel, depending on the use case. If an endorsement policy is not set, it is inherited from the default endorsement policy specified in the channel configuration. + +Note that while some chaincodes include the ability to create [private data transactions](../private_data_tutorial.html) between members on a channel, private data is outside the scope of this topic. + +While it's now technically possible to drive transactions using the peer CLI, the best practice is to create an application and use it to invoke transactions on your chaincode. + +## Using an application on the channel + +After a smart contract has been committed, client applications can be used to invoke transactions on a chaincode. This completes the structure we showed in the first image: + +![network.1](./network.diagram.1.png) + +Just like peers and orderers, a client application has an identity that associates it with an organization. In our example, client application A1 is associated with organization R1 and is connected to C1. + +Once a chaincode has been installed on a peer node and defined on a channel it can be [invoked](../glossary.html#invoke) by a client application. Client applications do this by sending transaction proposals to peers owned by the organizations specified by the endorsement policy. The transaction proposal serves as input to the chaincode, which uses it to generate an endorsed transaction response, which is returned by the peer node to the client application. + +We can see that our peer organizations, R1 and R2, are fully participating in the channel. Their applications can access the ledger L1 via smart contract S5 to generate transactions that will be endorsed by the organizations specified in the endorsement policy and written to the ledger. + +For more information about how to develop an application, check out [Developing applications](../developapps/developing_applications.html). + +## Joining components to multiple channels + +Now that we have showed the process for how a channel is created, as well as the nature of the high level interactions between organizations, nodes, policies, chaincodes, and applications, let's expand our view by adding a new organization and a new channel to our scenario. To show how Fabric components can be joined to multiple channels, we'll join R2 and its peer, P2, to the new channel, while R1 and P1 will not be joined. + +### Creating the new channel configuration + +As we've seen, the first step in creating a channel is to create its configuration. This channel will include not just R2 and R0, but a new organization, R3, which has had its identities and certificates created for it by CA3. R1 will have no rights over this channel and will not be able to join components to it. In fact, it has no way to know it even exists! + +![network.5](./network.diagram.5.png) + +As before, now that the channel configuration, CC2, has been created, the channel can be said to **logically** exist, even though no components are joined to it. + +So let's join some components to it! + +### Join components to the new channel + +Just as we did with C1, let's join our components to C2. Because we already showed how all channels have a ledger and how chaincodes are installed on peers and committed to a channel (in this case, the chaincode is called S6), we'll skip those steps for now to show the end state of C2. Note that this channel has its own ledger, L2, which is completely separate from the ledger of C1. That's because even though R2 (and its peer, P2) are joined to both channels, the two channels are entirely separate administrative domains. + +![network.6](./network.diagram.6.png) + +Note that while both C1 and C2 both have the same orderer organization joined to it, R0, different ordering nodes are servicing each channel. This is not a mandatory configuration because even if the same ordering nodes are joined to multiple channels, each channel has a separate instance of the ordering service, and is more common in channels in which multiple orderer organizations come together to contribute nodes to an ordering service. Note that only the ordering node joined to a particular channel has the ledger of that channel. + +While it would also be possible for R2 to deploy a new peer to join to channel C2, in this case they have chosen to deploy the P2 to C2. Note that P2 has both the ledger of C1 (called L1) and the ledger of C2 (called L2) on its file system. Similarly, R2 has chosen to modify its application, A2, to be able to be used with C2, while R3's application, A3, is being used with C2. + +Logically, this is all very similar to the creation of C1. Two peer organizations come together with an ordering organization to create a channel and join components and a chaincode to it. + +Think about this configuration from the standpoint of R2, which is joined to both channels. From their perspective, they might think about both C1 and C2, as well as the components they have joined to both, as the "network", even though both channels are distinct from each other. In this sense, a "network" can also be seen as existing within the perspective of a particular organization as "all of the channels I am a member of and all of the components I own". + +Now that we have shown how organizations and their components can be joined to multiple channels, let's talk about how an organization and its components are added to an existing channel. + +## Adding an organization to an existing channel + +As channels mature, it is natural that its configuration will also mature, reflecting changes in the world that must be reflected in the channel. One of the more common ways a channel will be modified is to add new organizations to it. While it also possible to add more orderer organizations (who may or may not contribute their own nodes), in this example we'll describe the process of how a peer organization, R3, is added to the channel configuration CC1 of channel C1. + +**Note that rights and permissions are defined at a channel level. Just because an organization is an administrator of one channel does not mean it will be an administrator of a different channel. Each channel is a distinct administrative zone and fully customizable to the use case it's serving.** + +![network.7](./network.diagram.7.png) + +Although the update to the diagram looks like one simple step, adding a new organization to a channel is, at a high level, a three step process: + +1. Decide on the new organization's permissions and role. The full scope of these rights must be agreed to before R3 is added to C1 and is beyond the scope of this topic, but comprise the same kinds of questions that must be answered when creating a channel in the first place. What kind of permissions and rights will R3 have on C1? Will it be an admin on the channel? Will its access to any channel resources be restricted (for example, R3 might only be able to write to C1, which means it can propose changes but not sign them)? What chaincodes will R3 install on its peers? +2. Update the channel, including the relevant chaincodes, to reflect these decisions. +3. The organization joins its peer nodes (and potentially ordering nodes) to the channel and begins participating. + +In this topic, we'll assume that R3 will join C1 with the same rights and status enjoyed by R1 and R2. Similarly, R3 will also be joined as an endorser of the S5 chaincode, which means that R1 or R2 must redefine S5 (specifically, the endorsement policy section of the chaincode definition) and approve it on the channel. + +Updating the channel configuration creates a new configuration block, CC1.1, which will serve as the channel configuration until it is updated again. Note that even though the configuration has changed, the channel still exists and P1 and P2 are still joined to it. There is no need to re-add organizations or peers to the channel. + +For more information about the process of adding an organization to a channel, check out [Adding an org to a channel](../channel_update_tutorial.html). + +For more information about policies (which define the roles organizations have on a channel), check out [Policies](../policies/policies.html). + +For more information about upgrading a chaincode, check out [Upgrade a chaincode](../chaincode_lifecycle.html#upgrade-a-chaincode). + +### Adding existing components to the newly joined channel + +Now that R3 is able to fully participate in channel C2, it can add its components to the channel. Rather than do this one component at a time, let's show how its peer, its local copy of a ledger, a smart contract and a client application can be joined all at once! + +![network.8](./network.diagram.8.png) + +In this example, R3 adds P3, which was previously joined to C2, to C1. When it does this, P3 pulls C1's ledger, L1. As we mentioned in the previous section, R3 has been added to C1 with equivalent rights as R1 and R2. Similarly, because the chaincode S5 was redefined and reapproved on the channel to include R3, R3 can now install S5 and begin transacting. Just as R2 modified its application A2 to be able to be used with channel C2, A3 is also now able to invoke transactions on C1. + +## Network recap + +We've covered a lot of ground in this topic. We've gone from a simple configuration with two organizations transacting on a single channel to multiple organizations transacting on multiple channels as well as the process for joining an organization to a channel that already exists. + +While this topic represents a relatively simple case, there are endless combinations of sophisticated topologies which are possible to achieve in Fabric, supporting an endless number of operational goals, and no theoretical limit to how big a network can get. The careful use of network and channel policies allow even large networks to be well-governed. diff --git a/docs/source/ops_guide.rst b/docs/source/ops_guide.rst index 4f1549d78dd..9dee860509a 100644 --- a/docs/source/ops_guide.rst +++ b/docs/source/ops_guide.rst @@ -4,8 +4,8 @@ Operations Guides .. toctree:: :maxdepth: 1 - orderer_deploy msp + certs_management.md peer_ledger_snapshot hsm.md configtx diff --git a/docs/source/orderer/ordering_service.md b/docs/source/orderer/ordering_service.md index e1cdfaa76dc..f0da4f3cc13 100644 --- a/docs/source/orderer/ordering_service.md +++ b/docs/source/orderer/ordering_service.md @@ -32,14 +32,6 @@ execution and ordering are performed by the same nodes. ## Orderer nodes and channel configuration -In addition to their **ordering** role, orderers also maintain the list of -organizations that are allowed to create channels. This list of organizations is -known as the "consortium", and the list itself is kept in the configuration of -the "orderer system channel" (also known as the "ordering system channel"). By -default, this list, and the channel it lives on, can only be edited by the -orderer admin. Note that it is possible for an ordering service to hold several -of these lists, which makes the consortium a vehicle for Fabric multi-tenancy. - Orderers also enforce basic access control for channels, restricting who can read and write data to them, and who can configure them. Remember that who is authorized to modify a configuration element in a channel is subject to the @@ -201,7 +193,7 @@ implementations for achieving consensus on the strict ordering of transactions between ordering service nodes. For information about how to stand up an ordering node (regardless of the -implementation the node will be used in), check out [our documentation on standing up an ordering node](../orderer_deploy.html). +implementation the node will be used in), check out [our documentation on deploying a production ordering service](../deployorderer/ordererplan.html). * **Raft** (recommended) @@ -224,14 +216,13 @@ implementation the node will be used in), check out [our documentation on standi * **Solo** (deprecated in v2.x) The Solo implementation of the ordering service is intended for test only and - consists only of a single ordering node. It has been deprecated and may be - removed entirely in a future release. Existing users of Solo should move to + consists only of a single ordering node. It has been deprecated and may be + removed entirely in a future release. Existing users of Solo should move to a single node Raft network for equivalent function. ## Raft -For information on how to configure a Raft ordering service, check out our -[documentation on configuring a Raft ordering service](../raft_configuration.html). +For information on how to customize the `orderer.yaml` file that determines the configuration of an ordering node, check out the [Checklist for a production ordering node](../deployorderer/ordererchecklist.html). The go-to ordering service choice for production networks, the Fabric implementation of the established Raft protocol uses a "leader and follower" @@ -256,22 +247,22 @@ similar. They're both CFT ordering services using the leader and follower design. If you are an application developer, smart contract developer, or peer administrator, you will not notice a functional difference between an ordering service based on Raft versus Kafka. However, there are a few major differences worth -considering, especially if you intend to manage an ordering service: +considering, especially if you intend to manage an ordering service. * Raft is easier to set up. Although Kafka has many admirers, even those admirers will (usually) admit that deploying a Kafka cluster and its ZooKeeper ensemble can be tricky, requiring a high level of expertise in Kafka infrastructure and settings. Additionally, there are many more components to manage with Kafka than with Raft, which means that there are more places where -things can go wrong. And Kafka has its own versions, which must be coordinated +things can go wrong. Kafka also has its own versions, which must be coordinated with your orderers. **With Raft, everything is embedded into your ordering node**. * Kafka and Zookeeper are not designed to be run across large networks. While Kafka is CFT, it should be run in a tight group of hosts. This means that practically speaking you need to have one organization run the Kafka cluster. Given that, having ordering nodes run by different organizations when using Kafka -(which Fabric supports) doesn't give you much in terms of decentralization because -the nodes will all go to the same Kafka cluster which is under the control of a +(which Fabric supports) doesn't decentralize the nodes because ultimately +the nodes all go to a Kafka cluster which is under the control of a single organization. With Raft, each organization can have its own ordering nodes, participating in the ordering service, which leads to a more decentralized system. @@ -323,9 +314,6 @@ the entries and their order, making the logs on the various orderers replicated. **Consenter set**. The ordering nodes actively participating in the consensus mechanism for a given channel and receiving replicated logs for the channel. -This can be all of the nodes available (either in a single cluster or in -multiple clusters contributing to the system channel), or a subset of those -nodes. **Finite-State Machine (FSM)**. Every ordering node in Raft has an FSM and collectively they're used to ensure that the sequence of logs in the various @@ -338,7 +326,7 @@ there to be a quorum. If a quorum of nodes is unavailable for any reason, the ordering service cluster becomes unavailable for both read and write operations on the channel, and no new logs can be committed. -**Leader**. This is not a new concept --- Kafka also uses leaders, as we've said --- +**Leader**. This is not a new concept --- Kafka also uses leaders --- but it's critical to understand that at any given time, a channel's consenter set elects a single node to be the leader (we'll describe how this happens in Raft later). The leader is responsible for ingesting new log entries, replicating @@ -357,17 +345,7 @@ initiate a leader election and one of them will be elected the new leader. ### Raft in a transaction flow -Every channel runs on a **separate** instance of the Raft protocol, which allows -each instance to elect a different leader. This configuration also allows -further decentralization of the service in use cases where clusters are made up -of ordering nodes controlled by different organizations. While all Raft nodes -must be part of the system channel, they do not necessarily have to be part of -all application channels. Channel creators (and channel admins) have the ability -to pick a subset of the available orderers and to add or remove ordering nodes -as needed (as long as only a single node is added or removed at a time). - -While this configuration creates more overhead in the form of redundant heartbeat -messages and goroutines, it lays necessary groundwork for BFT. +Every channel runs on a **separate** instance of the Raft protocol, which allows each instance to elect a different leader. This configuration also allows further decentralization of the service in use cases where clusters are made up of ordering nodes controlled by different organizations. Ordering nodes can be added or removed from a channel as needed as long as only a single node is added or removed at a time. While this configuration creates more overhead in the form of redundant heartbeat messages and goroutines, it lays necessary groundwork for BFT. In Raft, transactions (in the form of proposals or configuration updates) are automatically routed by the ordering node that receives the transaction to the diff --git a/docs/source/peer-chaincode-devmode.md b/docs/source/peer-chaincode-devmode.md index aaec996c75e..831e77bd429 100644 --- a/docs/source/peer-chaincode-devmode.md +++ b/docs/source/peer-chaincode-devmode.md @@ -33,7 +33,13 @@ Throughout this tutorial, all commands are performed from the `fabric/` folder. ``` export FABRIC_CFG_PATH=$(pwd)/sampleconfig ``` -5. Generate the genesis block for the ordering service. Run the following command to generate the genesis block and store it in `$(pwd)/sampleconfig/genesisblock` so that it can be used by the orderer in the next step when the orderer is started. +5. Create the `hyperledger` subdirectory in the `/var` directory. This is the default location Fabric uses to store blocks as defined in the orderer `orderer.yaml` and peer `core.yaml` files. To create the `hyperledger` subdirectory, execute these commands, replacing the question marks with your username: + + ``` + sudo mkdir /var/hyperledger + sudo chown ????? /var/hyperledger + ``` +6. Generate the genesis block for the ordering service. Run the following command to generate the genesis block and store it in `$(pwd)/sampleconfig/genesisblock` so that it can be used by the orderer in the next step when the orderer is started. ``` configtxgen -profile SampleDevModeSolo -channelID syschannel -outputBlock genesisblock -configPath $FABRIC_CFG_PATH -outputBlock $(pwd)/sampleconfig/genesisblock ``` diff --git a/docs/source/policies/FabricPolicyHierarchy-1.png b/docs/source/policies/FabricPolicyHierarchy-1.png deleted file mode 100644 index c9e72d52315..00000000000 Binary files a/docs/source/policies/FabricPolicyHierarchy-1.png and /dev/null differ diff --git a/docs/source/policies/FabricPolicyHierarchy-2.png b/docs/source/policies/FabricPolicyHierarchy-2.png deleted file mode 100644 index 49f87d4b013..00000000000 Binary files a/docs/source/policies/FabricPolicyHierarchy-2.png and /dev/null differ diff --git a/docs/source/policies/FabricPolicyHierarchy-3.png b/docs/source/policies/FabricPolicyHierarchy-3.png deleted file mode 100644 index 82908aa79a5..00000000000 Binary files a/docs/source/policies/FabricPolicyHierarchy-3.png and /dev/null differ diff --git a/docs/source/policies/FabricPolicyHierarchy-4.png b/docs/source/policies/FabricPolicyHierarchy-4.png deleted file mode 100644 index 06ed383036f..00000000000 Binary files a/docs/source/policies/FabricPolicyHierarchy-4.png and /dev/null differ diff --git a/docs/source/policies/policies.md b/docs/source/policies/policies.md index 4e171372505..fdd5c274fa1 100644 --- a/docs/source/policies/policies.md +++ b/docs/source/policies/policies.md @@ -7,12 +7,13 @@ In this topic, we'll cover: * [What is a policy](#what-is-a-policy) * [Why are policies needed](#why-are-policies-needed) -* [How are policies implemented throughout Fabric](#how-are-policies-implemented-throughout-fabric) -* [Fabric policy domains](#the-fabric-policy-domains) +* [How are policies implemented](#how-are-policies-implemented) * [How do you write a policy in Fabric](#how-do-you-write-a-policy-in-fabric) * [Fabric chaincode lifecycle](#fabric-chaincode-lifecycle) * [Overriding policy definitions](#overriding-policy-definitions) +Note: this topic describes a network that does not use a "system channel", a channel that the ordering service is bootstrapped with and the ordering service exclusively controls. Since the release of v2.3, using system channel is now considered the legacy process as compared to the process to [Create a channel](../create_channel/create_channel_participation.html) without a system channel. For a version of this topic that includes information about the system channel, check out [Policies](https://hyperledger-fabric.readthedocs.io/en/release-2.2/policies/policies.html) from the v2.2 documentation. + ## What is a policy At its most basic level, a policy is a set of rules that define the structure @@ -30,9 +31,9 @@ and responsibilities of each party. Whereas an insurance policy is put in place for risk management, in Hyperledger Fabric, policies are the mechanism for infrastructure management. Fabric policies represent how members come to agreement on accepting or rejecting changes to the -network, a channel, or a smart contract. Policies are agreed to by the consortium -members when a network is originally configured, but they can also be modified -as the network evolves. For example, they describe the criteria for adding or +network, a channel, or a smart contract. Policies are agreed to by the channel +members when the channel is originally configured, but they can also be modified +as the channel evolves. For example, they describe the criteria for adding or removing members from a channel, change how blocks are formed, or specify the number of organizations required to endorse a smart contract. All of these actions are described by a policy which defines who can perform the action. @@ -59,39 +60,11 @@ they are written, policies evaluate the collection of signatures attached to transactions and proposals and validate if the signatures fulfill the governance agreed to by the network. -## How are policies implemented throughout Fabric - -Policies are implemented at different levels of a Fabric network. Each policy -domain governs different aspects of how a network operates. - -![policies.policies](./FabricPolicyHierarchy-2.png) *A visual representation -of the Fabric policy hierarchy.* - -### System channel configuration +## How are policies implemented -Every network begins with an ordering **system channel**. There must be exactly -one ordering system channel for an ordering service, and it is the first channel -to be created. The system channel also contains the organizations who are the -members of the ordering service (ordering organizations) and those that are -on the networks to transact (consortium organizations). +Policies are defined within the relevant administrative domain of a particular action defined by the policy. For example, the policy for adding a peer organization to a channel is defined within the administrative domain of the peer organizations (known as the `Application` group). Similarly, adding ordering nodes in the consenter set of the channel is controlled by a policy inside the `Orderer` group. Actions that cross both the peer and orderer organizational domains are contained in the `Channel` group. -The policies in the ordering system channel configuration blocks govern the -consensus used by the ordering service and define how new blocks are created. -The system channel also governs which members of the consortium are allowed to -create new channels. - -### Application channel configuration - -Application _channels_ are used to provide a private communication mechanism -between organizations in the consortium. - -The policies in an application channel govern the ability to add or remove -members from the channel. Application channels also govern which organizations -are required to approve a chaincode before the chaincode is defined and -committed to a channel using the Fabric chaincode lifecycle. When an application -channel is initially created, it inherits all the ordering service parameters -from the orderer system channel by default. However, those parameters (and the -policies governing them) can be customized in each channel. +Typically, these policies default to the "majority of admins" of the group they fall under (a majority of peer organization admins for example, or in the case of `Channel` policies, a majority of both peer organizations and orderer organizations), though they can be specified to any rule a user wishes to define. Check out [Signature policies](#signature-policies) for more information. ### Access control lists (ACLs) @@ -137,35 +110,6 @@ required to sign (approve) any configuration _update_. It is the policy that defines how the policy is updated. Thus, each channel configuration element includes a reference to a policy which governs its modification. -## The Fabric policy domains - -While Fabric policies are flexible and can be configured to meet the needs of a -network, the policy structure naturally leads to a division between the domains -governed by either the Ordering Service organizations or the members of the -consortium. In the following diagram you can see how the default policies -implement control over the Fabric policy domains below. - -![policies.policies](./FabricPolicyHierarchy-4.png) *A more detailed look at the -policy domains governed by the Orderer organizations and consortium organizations.* - -A fully functional Fabric network can feature many organizations with different -responsibilities. The domains provide the ability to extend different privileges -and roles to different organizations by allowing the founders of the ordering -service the ability to establish the initial rules and membership of the -consortium. They also allow the organizations that join the consortium to create -private application channels, govern their own business logic, and restrict -access to the data that is put on the network. - -The system channel configuration and a portion of each application channel -configuration provides the ordering organizations control over which organizations -are members of the consortium, how blocks are delivered to channels, and the -consensus mechanism used by the nodes of the ordering service. - -The system channel configuration provides members of the consortium the ability -to create channels. Application channels and ACLs are the mechanism that -consortium organizations use to add or remove members from a channel and restrict -access to data and smart contracts on a channel. - ## How do you write a policy in Fabric If you want to change anything in Fabric, the policy associated with the resource @@ -222,10 +166,7 @@ organizations. As mentioned above, a key benefit of an `ImplicitMeta` policy such as `MAJORITY Admins` is that when you add a new admin organization to the channel, you do not have to update the channel policy. Therefore `ImplicitMeta` policies are -considered to be more flexible as the consortium members change. The consortium -on the orderer can change as new members are added or an existing member leaves -with the consortium members agreeing to the changes, but no policy updates are -required. Recall that `ImplicitMeta` policies ultimately resolve the +considered to be more flexible as organizations are added. Recall that `ImplicitMeta` policies ultimately resolve the `Signature` sub-policies underneath them in the configuration tree as the diagram shows. @@ -249,19 +190,19 @@ sign. Understanding policies begins with examining the `configtx.yaml` where the channel policies are defined. We can use the `configtx.yaml` file in the Fabric test network to see examples of both policy syntax types. We are going to examine -the configtx.yaml file used by the [fabric-samples/test-network](https://github.com/hyperledger/fabric-samples/blob/{BRANCH}/test-network/configtx/configtx.yaml) sample. +the `configtx.yaml` file used by the [fabric-samples/test-network](https://github.com/hyperledger/fabric-samples/blob/master/test-network/configtx/configtx.yaml) sample. -The first section of the file defines the organizations of the network. Inside each +The first section of the file defines the organizations that will be members of the channel. Inside each organization definition are the default policies for that organization, `Readers`, `Writers`, `Admins`, and `Endorsement`, although you can name your policies anything you want. Each policy has a `Type` which describes how the policy is expressed (`Signature` or `ImplicitMeta`) and a `Rule`. -The test network example below shows the Org1 organization definition in the system -channel, where the policy `Type` is `Signature` and the endorsement policy rule +The test network example below shows the Org1 organization definition in the +channel, where the policy `Type` is `Signature` and the `Endorsement:` policy rule is defined as `"OR('Org1MSP.peer')"`. This policy specifies that a peer that is a member of `Org1MSP` is required to sign. It is these signature policies that -become the sub-policies that the ImplicitMeta policies point to. +become the sub-policies that the `ImplicitMeta` policies point to.
@@ -269,32 +210,32 @@ become the sub-policies that the ImplicitMeta policies point to. ``` - - &Org1 - # DefaultOrg defines the organization which is used in the sampleconfig - # of the fabric.git development environment - Name: Org1MSP - - # ID to load the MSP definition as - ID: Org1MSP - - MSPDir: crypto-config/peerOrganizations/org1.example.com/msp - - # Policies defines the set of policies at this level of the config tree - # For organization policies, their canonical path is usually - # /Channel/// - Policies: - Readers: - Type: Signature - Rule: "OR('Org1MSP.admin', 'Org1MSP.peer', 'Org1MSP.client')" - Writers: - Type: Signature - Rule: "OR('Org1MSP.admin', 'Org1MSP.client')" - Admins: - Type: Signature - Rule: "OR('Org1MSP.admin')" - Endorsement: - Type: Signature - Rule: "OR('Org1MSP.peer')" +- &Org1 + # DefaultOrg defines the organization which is used in the sampleconfig + # of the fabric.git development environment + Name: Org1MSP + + # ID to load the MSP definition as + ID: Org1MSP + + MSPDir: ../organizations/peerOrganizations/org1.example.com/msp + + # Policies defines the set of policies at this level of the config tree + # For organization policies, their canonical path is usually + # /Channel/// + Policies: + Readers: + Type: Signature + Rule: "OR('Org1MSP.admin', 'Org1MSP.peer', 'Org1MSP.client')" + Writers: + Type: Signature + Rule: "OR('Org1MSP.admin', 'Org1MSP.client')" + Admins: + Type: Signature + Rule: "OR('Org1MSP.admin')" + Endorsement: + Type: Signature + Rule: "OR('Org1MSP.peer')" ```
@@ -423,7 +364,7 @@ If an endorsement policy is not explicitly specified during the approval step, the default `Endorsement` policy `"MAJORITY Endorsement"` is used which means that a majority of the peers belonging to the different channel members (organizations) need to execute and validate a transaction against the chaincode -in order for the transaction to be considered valid. This default policy allows +in order for the transaction to be considered valid. This default policy allows organizations that join the channel to become automatically added to the chaincode endorsement policy. If you don't want to use the default endorsement policy, use the Signature policy format to specify a more complex endorsement @@ -436,8 +377,8 @@ IDs, but they are more versatile because they can include a wide range of properties of an actor’s identity, such as the actor’s organization, organizational unit, role or even the actor’s specific identity. When we talk about principals, they are the properties which determine their permissions. -Principals are described as 'MSP.ROLE', where `MSP` represents the required MSP -ID (the organization), and `ROLE` represents one of the four accepted roles: +Principals are described as `'MSP.ROLE'`, where `MSP` represents the required MSP +ID (the organization), and `ROLE` represents one of the four accepted roles: Member, Admin, Client, and Peer. A role is associated to an identity when a user enrolls with a CA. You can customize the list of roles available on your Fabric CA. @@ -479,14 +420,11 @@ developing, and testing your blockchain, but they are meant to be customized in a production environment. You should be aware of the default policies in the `configtx.yaml` file. Channel configuration policies can be extended with arbitrary verbs, beyond the default `Readers, Writers, Admins` in -`configtx.yaml`. The orderer system and application channels are overridden by -issuing a config update when you override the default policies by editing the -`configtx.yaml` for the orderer system channel or the `configtx.yaml` for a -specific channel. - -See the topic on -[Updating a channel configuration](../config_update.html#updating-a-channel-configuration) -for more information. +`configtx.yaml`. + +For more information on overriding policy definitions when creating a channel, check out [Channel policies](../create_channel/channel_policies.html) and [Creating a channel without a system channel](../create_channel/create_channel_participation.html). + +For information about how to update a channel, check out [Updating a channel configuration](../config_update.html#updating-a-channel-configuration) for more information. diff --git a/docs/source/prereqs.rst b/docs/source/prereqs.rst index 60209ebfa91..f0ee2bf1ba4 100644 --- a/docs/source/prereqs.rst +++ b/docs/source/prereqs.rst @@ -34,12 +34,8 @@ operating, or developing on (or for), Hyperledger Fabric: Toolbox `__ - again, Docker version Docker 17.06.2-ce or greater is required. -You can check the version of Docker you have installed with the following -command from a terminal prompt: - -.. code:: bash - - docker --version +The Fabric sample test network has been successfully +verified with Docker Desktop version 2.5.0.1. Higher versions may not work at this time. .. note:: The following applies to linux systems running systemd. diff --git a/docs/source/private-data-arch.rst b/docs/source/private-data-arch.rst index 8d1dfafd76b..a207003d479 100644 --- a/docs/source/private-data-arch.rst +++ b/docs/source/private-data-arch.rst @@ -13,15 +13,13 @@ used to control dissemination of private data at endorsement time and, optionally, whether the data will be purged. Beginning with the Fabric chaincode lifecycle introduced with Fabric v2.0, the -collection definition is part of the chaincode definition. The collection is -approved by channel members, and then deployed when the chaincode definition -is committed to the channel. The collection file needs to be the same for all -channel members. If you are using the peer CLI to approve and commit the +collection definition is part of the chaincode definition. The chaincode including +collection definition must be approved by the required channel members, and +then becomes effective when the chaincode definition is committed to the channel. +The collection definition that is approved must be identical for each of the required +channel members. When using the peer CLI to approve and commit the chaincode definition, use the ``--collections-config`` flag to specify the path -to the collection definition file. If you are using the Fabric SDK for Node.js, -visit `How to install and start your chaincode `_. -To use the `previous lifecycle process `_ to deploy a private data collection, -use the ``--collections-config`` flag when `instantiating your chaincode `_. +to the collection definition file. Collection definitions are composed of the following properties: @@ -143,6 +141,44 @@ though the chaincode level endorsement policy may require endorsement from In this way you can control which organizations are entrusted to write to certain private data collections. +Implicit private data collections +--------------------------------- + +In addition to explicitly defined private data collections, +every chaincode has an implicit private data namespace reserved for organization-specific +private data. These implicit organization-specific private data collections can +be used to store an individual organization's private data, and do not need to +be defined explicitly. + +The private data dissemination policy and endorsement policy for implicit +organization-specific collections is the respective organization itself. +The implication is that if data exists in an implicit private data collection, +it was endorsed by the respective organization. Implicit private data collections +can therefore be used by an organization to record their agreement or vote +for some fact, which is a useful pattern to leverage in multi-party business +processes implemented in chaincode since other organizations can check +the on-chain hash to verify the organization's record. Private data +can also be shared or transferred to an implicit collection of another organization, +making implicit collections a useful pattern to leverage in chaincode +applications, without the need to explicitly manage collection definitions. + +Since implicit private data collections are not explicitly defined, +it is not possible to set the additional collection properties. Specifically, +``memberOnlyRead`` and ``memberOnlyWrite`` are not available, +meaning that access control for clients reading data from or writing data to +an implicit private data collection must be encoded in the chaincode on the organization's peer. +Furthermore, ``blockToLive`` is not available, meaning that private data is never automatically purged. + +The properties ``requiredPeerCount`` and ``maxPeerCount`` can however be set in the peer's core.yaml +(``peer.gossip.pvtData.implicitCollectionDisseminationPolicy.requiredPeerCount`` and +``peer.gossip.pvtData.implicitCollectionDisseminationPolicy.maxPeerCount``). An organization +can set these properties based on the number of peers that they deploy, as described +in the next section. + +.. note:: Since implicit private data collections are not explicitly defined, + it is not possible to associate CouchDB indexes with them. Utilize + key-based queries and key-range queries rather than JSON queries. + Private data dissemination -------------------------- @@ -152,7 +188,7 @@ to all peers in a channel, the endorsing peer plays an important role in disseminating private data to other peers of authorized organizations. This ensures the availability of private data in the channel's collection, even if endorsing peers become unavailable after their endorsement. To assist with this dissemination, -the ``maxPeerCount`` and ``requiredPeerCount`` properties in the collection definition +the ``maxPeerCount`` and ``requiredPeerCount`` properties control the degree of dissemination at endorsement time. If the endorsing peer cannot successfully disseminate the private data to at least @@ -276,7 +312,8 @@ configuration definitions and how to set them, refer back to the `Private data collection definition`_ section of this topic. .. note:: If you would like more granular access control, you can set - ``memberOnlyRead`` and ``memberOnlyWrite`` to false. You can then apply your + ``memberOnlyRead`` and ``memberOnlyWrite`` to false (implicit collections always + behave as if ``memberOnlyRead`` and ``memberOnlyWrite`` are false). You can then apply your own access control logic in chaincode, for example by calling the GetCreator() chaincode API or using the client identity `chaincode library `__ . @@ -290,19 +327,19 @@ shim APIs: * ``GetPrivateDataByRange(collection, startKey, endKey string)`` * ``GetPrivateDataByPartialCompositeKey(collection, objectType string, keys []string)`` -And for the CouchDB state database, JSON content queries can be passed using the -shim API: +And if using explicit private data collections and CouchDB state database, +JSON content queries can be passed using the shim API: * ``GetPrivateDataQueryResult(collection, query string)`` Limitations: -* Clients that call chaincode that executes range or rich JSON queries should be aware +* Clients that call chaincode that executes key range queries or JSON queries should be aware that they may receive a subset of the result set, if the peer they query has missing private data, based on the explanation in Private Data Dissemination section above. Clients can query multiple peers and compare the results to determine if a peer may be missing some of the result set. -* Chaincode that executes range or rich JSON queries and updates data in a single +* Chaincode that executes key range queries or JSON queries and updates data in a single transaction is not supported, as the query results cannot be validated on the peers that don’t have access to the private data, or on peers that are missing the private data that they have access to. If a chaincode invocation both queries @@ -312,6 +349,9 @@ Limitations: chaincode function to make the updates. Note that calls to GetPrivateData() to retrieve individual keys can be made in the same transaction as PutPrivateData() calls, since all peers can validate key reads based on the hashed key version. +* Since implicit private data collections are not explicitly defined, + it is not possible to associate CouchDB indexes with them. + It is therefore not recommended to utilize JSON queries with implicit private data collections. Using Indexes with collections ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -320,7 +360,7 @@ The topic :doc:`couchdb_as_state_database` describes indexes that can be applied to the channel’s state database to enable JSON content queries, by packaging indexes in a ``META-INF/statedb/couchdb/indexes`` directory at chaincode installation time. Similarly, indexes can also be applied to private data -collections, by packaging indexes in a ``META-INF/statedb/couchdb/collections//indexes`` +collections that are explicitly defined, by packaging indexes in a ``META-INF/statedb/couchdb/collections//indexes`` directory. An example index is available `here `_. Considerations when using private data @@ -329,8 +369,8 @@ Considerations when using private data Private data purging ~~~~~~~~~~~~~~~~~~~~ -Private data can be periodically purged from peers. For more details, -see the ``blockToLive`` collection definition property above. +Private data in explicitly defined private data collections can be periodically purged from peers. +For more details, see the ``blockToLive`` collection definition property above. Additionally, recall that prior to commit, peers store private data in a local transient data store. This data automatically gets purged when the transaction diff --git a/docs/source/private-data/private-data.md b/docs/source/private-data/private-data.md index 737768fd4d9..1a8efcdd9e0 100644 --- a/docs/source/private-data/private-data.md +++ b/docs/source/private-data/private-data.md @@ -15,6 +15,14 @@ That's why Fabric offers the ability to create channel the ability to endorse, commit, or query private data without having to create a separate channel. +Private data collections can be defined explicitly within a chaincode definition. +Additionally, every chaincode has an implicit private data namespace reserved for organization-specific +private data. These implicit organization-specific private data collections can +be used to store an individual organization's private data, which is useful +if you would like to store private data related to a single organization, +such as details about an asset owned by an organization or an organization's +approval for a step in a multi-party business process implemented in chaincode. + ## What is a private data collection? A collection is the combination of two elements: diff --git a/docs/source/private_data_tutorial.rst b/docs/source/private_data_tutorial.rst index 9d6c002dc37..a2f3ea0289e 100644 --- a/docs/source/private_data_tutorial.rst +++ b/docs/source/private_data_tutorial.rst @@ -1,9 +1,9 @@ Using Private Data in Fabric ============================ -This tutorial will demonstrate the use of collections to provide storage +This tutorial will demonstrate the use of Private Data Collections (PDC) to provide storage and retrieval of private data on the blockchain network for authorized peers -of organizations. +of organizations. The collection is specified using a collection definition file containing the policies governing that collection. The information in this tutorial assumes knowledge of private data stores and their use cases. For more information, check out :doc:`private-data/private-data`. @@ -62,7 +62,7 @@ Build a collection definition JSON file --------------------------------------- Before a set of organizations can transact using private data, all organizations -on channel need to build a collection file that defines the private +on channel need to build a collection definition file that defines the private data collections associated with each chaincode. Data that is stored in a private data collection is only distributed to the peers of certain organizations instead of all members of the channel. The collection definition file describes all of the @@ -215,9 +215,11 @@ Specifically, access to the private data will be restricted as follows: All of the data that is created by the asset transfer private data sample smart -contract is stored in private data. The smart contract uses the Fabric chaincode API +contract is stored in PDC. The smart contract uses the Fabric chaincode API to read and write private data to private data collections using the ``GetPrivateData()`` and ``PutPrivateData()`` functions. You can find more information about those functions `here `_. +This private data is stored in private state db on the peer (separate from public state db), and +is disseminated between authorized peers via gossip protocol. The following diagram illustrates the private data model used by the private data sample. Note that Org3 is only shown in the diagram to illustrate that if @@ -265,9 +267,9 @@ For example, in the following snippet of the ``CreateAsset`` function, .. code-block:: GO - // CreateAsset creates a new asset by placing the main asset details in the assetCollection - // that can be read by both organizations. The appraisal value is stored in the owner's org specific collection. - func (s *SmartContract) CreateAsset(ctx contractapi.TransactionContextInterface) error { + // CreateAsset creates a new asset by placing the main asset details in the assetCollection + // that can be read by both organizations. The appraisal value is stored in the owners org specific collection. + func (s *SmartContract) CreateAsset(ctx contractapi.TransactionContextInterface) error { // Get new asset from transient map transientMap, err := ctx.GetStub().GetTransient() @@ -322,9 +324,9 @@ For example, in the following snippet of the ``CreateAsset`` function, } // Get ID of submitting client identity - clientID, err := ctx.GetClientIdentity().GetID() + clientID, err := submittingClientIdentity(ctx) if err != nil { - return fmt.Errorf("failed to get verified OrgID: %v", err) + return err } // Verify that the client is submitting request to peer in their organization @@ -351,7 +353,8 @@ For example, in the following snippet of the ``CreateAsset`` function, // Save asset to private data collection // Typical logger, logs to stdout/file in the fabric managed docker container, running this chaincode // Look for container name like dev-peer0.org1.example.com-{chaincodename_version}-xyz - log.Printf("CreateAsset Put: collection %v, ID %v", assetCollection, assetInput.ID) + log.Printf("CreateAsset Put: collection %v, ID %v, owner %v", assetCollection, assetInput.ID, clientID) + err = ctx.GetStub().PutPrivateData(assetCollection, assetInput.ID, assetJSONasBytes) if err != nil { return fmt.Errorf("failed to put asset into private data collecton: %v", err) @@ -374,7 +377,7 @@ For example, in the following snippet of the ``CreateAsset`` function, return fmt.Errorf("failed to infer private collection name for the org: %v", err) } - // Put asset appraised value into owner's org specific private data collection + // Put asset appraised value into owners org specific private data collection log.Printf("Put: collection %v, ID %v", orgCollection, assetInput.ID) err = ctx.GetStub().PutPrivateData(orgCollection, assetInput.ID, assetPrivateDetailsAsBytes) if err != nil { @@ -444,7 +447,7 @@ Run the following command from the test network directory. .. code:: bash - ./network.sh deployCC -ccn private -ccep "OR('Org1MSP.peer','Org2MSP.peer')" -cccg ../asset-transfer-private-data/chaincode-go/collections_config.json + ./network.sh deployCC -ccn private -ccp ../asset-transfer-private-data/chaincode-go/ -ccl go -ccep "OR('Org1MSP.peer','Org2MSP.peer')" -cccg ../asset-transfer-private-data/chaincode-go/collections_config.json Note that we need to pass the path to the private data collection definition file to the command. As part of deploying the chaincode to the channel, both organizations @@ -455,7 +458,7 @@ This allows Org1 and Org2 to create an asset without receiving an endorsement fr the other organization. You can see the steps required to deploy the chaincode printed in your logs after you issue the command above. -When both organizations approve the chaincode defition using the +When both organizations approve the chaincode definition using the `peer lifecycle chaincode approveformyorg `__ command, the chaincode definition includes the path to the private data collection definition using the ``--collections-config`` flag. You can see the following `approveformyorg` @@ -672,20 +675,10 @@ When successful, the command will return the following result: .. code:: bash - {"objectType":"asset","assetID":"asset1","color":"green","size":20,"owner":"eDUwOTo6Q049b3JnMWFkbWluLE9VPWFkbWluLE89SHlwZXJsZWRnZXIsU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUzo6Q049Y2Eub3JnMS5leGFtcGxlLmNvbSxPPW9yZzEuZXhhbXBsZS5jb20sTD1EdXJoYW0sU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUw=="} - -The `"owner"` of the asset is the identity that created the asset by invoking the smart contract. The private data smart contract uses the ``GetClientIdentity().GetID()`` API to read the name and issuer of the identity certificate. -You can see that information by decoding the owner string out of base64 format: - -.. code:: bash + {"objectType":"asset","assetID":"asset1","color":"green","size":20,"owner":"x509::CN=appUser1,OU=admin,O=Hyperledger,ST=North Carolina,C=US::CN=ca.org1.example.com,O=org1.example.com,L=Durham,ST=North Carolina,C=US"} - echo eDUwOTo6Q049b3JnMWFkbWluLE9VPWFkbWluLE89SHlwZXJsZWRnZXIsU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUzo6Q049Y2Eub3JnMS5leGFtcGxlLmNvbSxPPW9yZzEuZXhhbXBsZS5jb20sTD1EdXJoYW0sU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUw== | base64 --decode - -The result will show the name and issuer of the owner certificate: - -.. code:: bash +The `"owner"` of the asset is the identity that created the asset by invoking the smart contract. The private data smart contract uses the ``GetClientIdentity().GetID()`` API to read the name and issuer of the identity certificate. You can see the name and issuer of the identity certificate, in the owner attribute. - x509::CN=org1admin,OU=admin,O=Hyperledger,ST=North Carolina,C=US::CN=ca.org1.example.com,O=org1.example.com,L=Durham,ST=North Carolina,C=US Query for the ``appraisedValue`` private data of ``asset1`` as a member of Org1. @@ -740,7 +733,8 @@ When successful, should see something similar to the following result: .. code:: json - {"objectType":"asset","assetID":"asset1","color":"green","size":20,"owner":"eDUwOTo6Q049b3JnMWFkbWluLE9VPWFkbWluLE89SHlwZXJsZWRnZXIsU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUzo6Q049Y2Eub3JnMS5leGFtcGxlLmNvbSxPPW9yZzEuZXhhbXBsZS5jb20sTD1EdXJoYW0sU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUw=="} + {"objectType":"asset","assetID":"asset1","color":"green","size":20, + "owner":"x509::CN=appUser1,OU=admin,O=Hyperledger,ST=North Carolina,C=US::CN=ca.org1.example.com,O=org1.example.com,L=Durham,ST=North Carolina,C=US" } Query private data Org2 is not authorized to ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -861,17 +855,9 @@ The results will show that the buyer identity now owns the asset: .. code:: bash - {"objectType":"asset","assetID":"asset1","color":"green","size":20,"owner":"eDUwOTo6Q049YnV5ZXIsT1U9Y2xpZW50LE89SHlwZXJsZWRnZXIsU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUzo6Q049Y2Eub3JnMi5leGFtcGxlLmNvbSxPPW9yZzIuZXhhbXBsZS5jb20sTD1IdXJzbGV5LFNUPUhhbXBzaGlyZSxDPVVL"} + {"objectType":"asset","assetID":"asset1","color":"green","size":20,"owner":"x509::CN=appUser2, OU=client + OU=org2 + OU=department1::CN=ca.org2.example.com, O=org2.example.com, L=Hursley, ST=Hampshire, C=UK"} -You can base64 decode the `"owner"` to see that it is the buyer identity: - -.. code:: bash - - echo eDUwOTo6Q049YnV5ZXIsT1U9Y2xpZW50LE89SHlwZXJsZWRnZXIsU1Q9Tm9ydGggQ2Fyb2xpbmEsQz1VUzo6Q049Y2Eub3JnMi5leGFtcGxlLmNvbSxPPW9yZzIuZXhhbXBsZS5jb20sTD1IdXJzbGV5LFNUPUhhbXBzaGlyZSxDPVVL | base64 --decode - -.. code:: bash - - x509::CN=buyer,OU=client,O=Hyperledger,ST=North Carolina,C=US::CN=ca.org2.example.com,O=org2.example.com,L=Hursley,ST=Hampshire,C=UK +The `"owner"` of the asset now has the buyer identity. You can also confirm that transfer removed the private details from the Org1 collection: @@ -990,6 +976,11 @@ automatically deployed upon chaincode instantiation on the channel when the ``--collections-config`` flag is specified pointing to the location of the collection JSON file. +.. note:: It is not possible to create an index for use with an implict private data collection. + An implicit collection is based on the organizations name and is created automatically. The format of the name + is ``_implicit_org_`` + Please see `FAB-17916 `__ for more information. + Clean up -------- diff --git a/docs/source/raft_configuration.md b/docs/source/raft_configuration.md index 545a6bdc548..9a6e3585853 100644 --- a/docs/source/raft_configuration.md +++ b/docs/source/raft_configuration.md @@ -2,6 +2,8 @@ **Audience**: *Raft ordering node admins* +Note: this topic describes the process for configuring a Raft ordering service that has not been bootstrapped with a system channel genesis block. For a version of this topic that includes information about the system channel, check out [Configuring and operating a Raft ordering service](https://hyperledger-fabric.readthedocs.io/en/release-2.2/raft_configuration.html). + ## Conceptual overview For a high level overview of the concept of ordering and how the supported @@ -14,17 +16,7 @@ documentation on [Setting up an ordering node](orderer_deploy.html). ## Configuration -While every Raft node must be added to the system channel, a node does not need -to be added to every application channel. Additionally, you can remove and add a -node from a channel dynamically without affecting the other nodes, a process -described in the Reconfiguration section below. - -Raft nodes identify each other using TLS pinning, so in order to impersonate a -Raft node, an attacker needs to obtain the **private key** of its TLS -certificate. As a result, it is not possible to run a Raft node without a valid -TLS configuration. - -A Raft cluster is configured in two planes: +A Raft cluster is configured in two places: * **Local configuration**: Governs node specific aspects, such as TLS communication, replication behavior, and file storage. @@ -33,6 +25,11 @@ A Raft cluster is configured in two planes: corresponding channel, as well as protocol specific parameters such as heartbeat frequency, leader timeouts, and more. +Raft nodes identify each other using TLS pinning, so in order to impersonate a +Raft node, an attacker needs to obtain the **private key** of its TLS +certificate. As a result, it is not possible to run a Raft node without a valid +TLS configuration. + Recall, each channel has its own instance of a Raft protocol running. Thus, a Raft node must be referenced in the configuration of each channel it belongs to by adding its server and client TLS certificates (in `PEM` format) to the channel @@ -58,13 +55,12 @@ The following section from `configtx.yaml` shows three Raft nodes (also called ServerTLSCert: path/to/ServerTLSCert2 ``` -Note: an orderer will be listed as a consenter in the system channel as well as -any application channels they're joined to. - When the channel config block is created, the `configtxgen` tool reads the paths to the TLS certificates, and replaces the paths with the corresponding bytes of the certificates. +Note: it is possible to remove and add an ordering node from a channel dynamically without affecting the other nodes, a process described in the Reconfiguration section below. + ### Local configuration The `orderer.yaml` has two configuration sections that are relevant for Raft @@ -86,22 +82,26 @@ other, and TLS certificates issued by a public TLS CA for the client facing API. * `ClientCertificate`, `ClientPrivateKey`: The file path of the client TLS certificate and corresponding private key. - * `ListenPort`: The port the cluster listens on. If blank, the port is the same - port as the orderer general port (`general.listenPort`) + * `ListenPort`: The port the cluster listens on. + It must be same as `consenters[i].Port` in Channel configuration. + If blank, the port is the same port as the orderer general port (`general.listenPort`) * `ListenAddress`: The address the cluster service is listening on. * `ServerCertificate`, `ServerPrivateKey`: The TLS server certificate key pair which is used when the cluster service is running on a separate gRPC server (different port). - * `SendBufferSize`: Regulates the number of messages in the egress buffer. Note: `ListenPort`, `ListenAddress`, `ServerCertificate`, `ServerPrivateKey` must be either set together or unset together. If they are unset, they are inherited from the general TLS section, in example `general.tls.{privateKey, certificate}`. +When general TLS is disabled: + - Use a different `ListenPort` than the orderer general port + - Properly configure TLS root CAs in the channel configuration. There are also hidden configuration parameters for `general.cluster` which can be used to further fine tune the cluster communication or replication mechanisms: + * `SendBufferSize`: Regulates the number of messages in the egress buffer. * `DialTimeout`, `RPCTimeout`: Specify the timeouts of creating connections and establishing streams. * `ReplicationBufferSize`: the maximum number of bytes that can be allocated @@ -203,23 +203,13 @@ one or two). So by extending a cluster of three nodes to four nodes (while only two are alive) you are effectively stuck until the original offline node is resurrected. -Adding a new node to a Raft cluster is done by: - - 1. **Adding the TLS certificates** of the new node to the channel through a - channel configuration update transaction. Note: the new node must be added to - the system channel before being added to one or more application channels. - 2. **Fetching the latest config block** of the system channel from an orderer node - that's part of the system channel. - 3. **Ensuring that the node that will be added is part of the system channel** - by checking that the config block that was fetched includes the certificate of - (soon to be) added node. - 4. **Starting the new Raft node** with the path to the config block in the - `General.BootstrapFile` configuration parameter. - 5. **Waiting for the Raft node to replicate the blocks** from existing nodes for - all channels its certificates have been added to. After this step has been - completed, the node begins servicing the channel. - 6. **Adding the endpoint** of the newly added Raft node to the channel - configuration of all channels. +To add a new node to the ordering service: + + 1. **Ensure the orderer organization that owns the new node is one of the orderer organizations on the channel**. If the orderer organization is not an administrator, the node will be unable to pull blocks as a follower or be joined to the consenter set. + 2. **Start the new ordering node**. For information about how to deploy an ordering node, check out [Planning for an ordering service](./deployorderer/ordererdeploy.html). Note that when you use the `osnadmin` CLI to create and join a channel, you do not need to point to a configuration block when starting the node. + 3. **Use the `osnadmin` CLI to add the first orderer to the channel**. For more information, check out the [Create a channel](./create_channel/create_channel_participation.html#step-two-use-the-osnadmin-cli-to-add-the-first-orderer-to-the-channel) tutorial. + 4. **Wait for the Raft node to replicate the blocks** from existing nodes for all channels its certificates have been added to. When an ordering node is added to a channel, it is added as a "follower", a state in which it can replicate blocks but is not part of the "consenter set" actively servicing the channel. When the node finishes replicating the blocks, its status should change from "onboarding" to "active". Note that an "active" ordering node is still not part of the consenter set. + 5. **Add the new ordering node to the consenter set**. For more information, check out the [Create a channel](./create_channel/create_channel_participation.html#step-three-join-additional-ordering-nodes) tutorial. It is possible to add a node that is already running (and participates in some channels already) to a channel while the node itself is running. To do this, simply @@ -232,27 +222,13 @@ channel, and then start the Raft instance for that chain. After it has successfully done so, the channel configuration can be updated to include the endpoint of the new Raft orderer. -Removing a node from a Raft cluster is done by: +To remove an ordering node from the consenter set of a channel, use the `osnadmin channel remove` command to remove its endpoint and certificates from the channel. For more information, check out [Add or remove orderers from existing channels](./create_channel/create_channel_participation.html#add-or-remove-orderers-from-existing-channels). - 1. Removing its endpoint from the channel config for all channels, including - the system channel controlled by the orderer admins. - 2. Removing its entry (identified by its certificates) from the channel - configuration for all channels. Again, this includes the system channel. - 3. Shut down the node. +Once an ordering node is removed from the channel, the other ordering nodes stop communicating with the removed orderer in the context of the removed channel. They might still be communicating on other channels. -Removing a node from a specific channel, but keeping it servicing other channels -is done by: +The node that is removed from the channel automatically detects its removal either immediately or after `EvictionSuspicion` time has passed (10 minutes by default) and shuts down its Raft instance on that channel. - 1. Removing its endpoint from the channel config for the channel. - 2. Removing its entry (identified by its certificates) from the channel - configuration. - 3. The second phase causes: - * The remaining orderer nodes in the channel to cease communicating with - the removed orderer node in the context of the removed channel. They might - still be communicating on other channels. - * The node that is removed from the channel would autonomously detect its - removal either immediately or after `EvictionSuspicion` time has passed - (10 minutes by default) and will shut down its Raft instance. +If the intent is to delete the node entirely, remove it from all channels before shutting down the node. ### TLS certificate rotation for an orderer node @@ -260,7 +236,7 @@ All TLS certificates have an expiration date that is determined by the issuer. These expiration dates can range from 10 years from the date of issuance to as little as a few months, so check with your issuer. Before the expiration date, you will need to rotate these certificates on the node itself and every channel -the node is joined to, including the system channel. +the node is joined to. **Note:** In case the public key of the TLS certificate remains the same, there is no need to issue channel configuration updates. diff --git a/docs/source/readwrite.rst b/docs/source/readwrite.rst index 56bb02a9e68..829a213a0b1 100644 --- a/docs/source/readwrite.rst +++ b/docs/source/readwrite.rst @@ -73,12 +73,12 @@ read-write set for updating the versions and the values of the affected keys. In the validation phase, a transaction is considered ``valid`` if the -version of each key present in the read set of the transaction matches -the version for the same key in the world state - assuming all the -preceding ``valid`` transactions (including the preceding transactions -in the same block) are committed (*committed-state*). An additional -validation is performed if the read-write set also contains one or more -query-info. +version of each key present in the read set of the transaction (from time of simulation) +matches the current version for the same key, taking into consideration +``valid`` transactions that have been committed to state from new +blocks since the transaction was simulated, as well as valid preceding transactions +in the same block. An additional validation is performed if the read-write set +also contains one or more query-info. This additional validation should ensure that no key has been inserted/deleted/updated in the super range (i.e., union of the ranges) diff --git a/docs/source/secured_asset_transfer/secured_private_asset_transfer_tutorial.md b/docs/source/secured_asset_transfer/secured_private_asset_transfer_tutorial.md index ba816b7224d..41ac2290ad2 100644 --- a/docs/source/secured_asset_transfer/secured_private_asset_transfer_tutorial.md +++ b/docs/source/secured_asset_transfer/secured_private_asset_transfer_tutorial.md @@ -3,9 +3,8 @@ This tutorial will demonstrate how an asset can be represented and traded between organizations in a Hyperledger Fabric blockchain channel, while keeping details of the asset and transaction private using private data. Each on-chain asset is a non-fungible token (NFT) that represents a specific asset having certain immutable metadata properties (such as size and color) with a unique owner. When the owner wants to sell the asset, both parties need to agree to the same price before the asset is transferred. The private asset transfer smart contract enforces that only the owner of the asset can transfer the asset. In the course of this tutorial, you will learn how Fabric features such as state based endorsement, private data, and access control come together to provide secured transactions that are both private and verifiable. -This tutorial will deploy the [secured asset transfer sample](https://github.com/hyperledger/fabric-samples/tree/master/asset-transfer-secured-agreement/chaincode-go) to demonstrate how to transfer a private asset between two organizations without publicly sharing data. You should have completed the task -[Install Samples, Binaries, and Docker Images](./install.html#install-samples-binaries-and-docker-images). - +This tutorial will deploy the [secured asset transfer sample](https://github.com/hyperledger/fabric-samples/tree/main/asset-transfer-secured-agreement/chaincode-go) to demonstrate how to transfer a private asset between two organizations without publicly sharing data. You should have completed the task +[Install Samples, Binaries, and Docker Images](../install.html#install-samples-binaries-and-docker-images). ## Scenario requirements @@ -67,7 +66,7 @@ After the two organizations have agreed to the same price, the asset owner can u ## Running the private asset transfer smart contract -You can use the Fabric test network to run the private asset transfer smart contract. The test network contains two peer organizations, Org1 and Org1, that operate one peer each. In this tutorial, we will deploy the smart contract to a channel of the test network joined by both organizations. We will first create an asset that is owned by Org1. After the two organizations agree on the price, we will transfer the asset from Org1 to Org2. +You can use the Fabric test network to run the private asset transfer smart contract. The test network contains two peer organizations, Org1 and Org2, that operate one peer each. In this tutorial, we will deploy the smart contract to a channel of the test network joined by both organizations. We will first create an asset that is owned by Org1. After the two organizations agree on the price, we will transfer the asset from Org1 to Org2. ## Deploy the test network @@ -91,7 +90,7 @@ The script will deploy the nodes of the network and create a single channel name You can use the test network script to deploy the secured asset transfer smart contract to the channel. Run the following command to deploy the smart contract to `mychannel`: ``` -./network.sh deployCC -ccn secured -ccep "OR('Org1MSP.peer','Org2MSP.peer')" +./network.sh deployCC -ccn secured -ccp ../asset-transfer-secured-agreement/chaincode-go/ -ccl -go -ccep "OR('Org1MSP.peer','Org2MSP.peer')" ``` Note that we are using the `-ccep` flag to deploy the smart contract with an endorsement policy of `"OR('Org1MSP.peer','Org2MSP.peer')"`. This allows either organization to create an asset without receiving an endorsement from the other organization. diff --git a/docs/source/security_model.md b/docs/source/security_model.md new file mode 100644 index 00000000000..d7feb3ff068 --- /dev/null +++ b/docs/source/security_model.md @@ -0,0 +1,140 @@ +# Security Model + +Hyperledger Fabric is a permissioned blockchain where each component and actor has an identity, and policies define access control and governance. +This topic provides an overview of the Fabric security model and includes links to additional information. + +## Identities + +The different actors in a blockchain network include peers, orderers, client applications, administrators and more. +Each of these actors — active elements inside or outside a network able to consume services — has a digital identity encapsulated in an X.509 digital certificate issued by a Certificate Authority (CA). +These identities matter because they determine the exact permissions over resources and access to information that actors have in a blockchain network. + +For more information see the [Identity topic](./identity/identity.html). + +## Membership Service Providers + +For an identity to be verifiable, it must come from a trusted authority. +A membership service provider (MSP) is that trusted authority in Fabric. +More specifically, an MSP is a component that defines the rules that govern the valid identities for an organization. +A Hyperledger Fabric channel defines a set of organization MSPs as members. +The default MSP implementation in Fabric uses X.509 certificates issued by a Certificate Authority (CA) as identities, adopting a traditional Public Key Infrastructure (PKI) hierarchical model. +Identities can be associated with roles within a MSP such as ‘client’ and ‘admin’ by utilizing Node OU roles. +Node OU roles can be used in policy definitions in order to restrict access to Fabric resources to certain MSPs and roles. + +For more information see the [Membership Service Providers (MSPs) topic](./membership/membership.html). + +## Policies + +In Hyperledger Fabric, policies are the mechanism for infrastructure management. +Fabric policies represent how members come to agreement on accepting or rejecting changes to the network, a channel, or a smart contract. +Policies are agreed to by the channel members when the channel is originally configured, but they can also be modified as the channel evolves. +For example, they describe the criteria for adding or removing members from a channel, change how blocks are formed, or specify the number of organizations required to endorse a smart contract. +All of these actions are described by a policy which defines who can perform the action. +Simply put, everything you want to do on a Fabric network is controlled by a policy. +Once they are written, policies evaluate the collection of signatures attached to transactions and proposals and validate if the signatures fulfill the governance agreed to by the network. + +Policies can be used in Channel Policies, Channel Modification Policies, Access Control Lists, Chaincode Lifecycle Policies, and Chaincode Endorsement Policies. + +For more information see the [Policies topic](./policies/policies.html). + +### Channel Policies + +Policies in the channel configuration define various usage and administrative policies on a channel. +For example, the policy for adding a peer organization to a channel is defined within the administrative domain of the peer organizations (known as the Application group). +Similarly, adding ordering nodes in the consenter set of the channel is controlled by a policy inside the Orderer group. +Actions that cross both the peer and orderer organizational domains are contained in the Channel group. + +For more information see the [Channel Policies topic](./policies/policies.html#how-are-policies-implemented). + +### Channel Modification Policies + +Modification policies specify the group of identities required to sign (approve) any channel configuration update. +It is the policy that defines how a channel policy is updated. +Thus, each channel configuration element includes a reference to a policy which governs its modification. + +For more information see the [Modification Policies topic](./policies/policies.html#modification-policies). + +### Access Control Lists + +Access Control Lists (ACLs) provide the ability to configure access to channel resources by associating those resources with existing policies. + +For more information see the [Access Control Lists (ACLs) topic](./access_control.html). + +### Chaincode Lifecycle Policy + +The number of organizations that need to approve a chaincode definition before it can be successfully committed to a channel is governed by the channel’s LifecycleEndorsement policy. + +For more information see the [Chaincode Lifecycle topic](./chaincode_lifecycle.html). + +### Chaincode Endorsement Policies + +Every smart contract inside a chaincode package has an endorsement policy that specifies how many peers belonging to different channel members need to execute and validate a transaction against a given smart contract in order for the transaction to be considered valid. +Hence, the endorsement policies define the organizations (through their peers) who must “endorse” (i.e., sign) the execution of a proposal. + +For more information see the [Endorsement policies topic](./policies/policies.html#chaincode-endorsement-policies). + +## Peers + +Peers are a fundamental element of the network because they host ledgers and smart contracts. +Peers have an identity of their own, and are managed by an administrator of an organization. + +For more information see the [Peers and Identity topic](./peers/peers.html#peers-and-identity) and [Peer Deployment and Administration topic](./deploypeer/peerdeploy.html). + +## Ordering service nodes + +Ordering service nodes order transactions into blocks and then distribute blocks to connected peers for validation and commit. +Ordering service nodes have an identity of their own, and are managed by an administrator of an organization. + +For more information see the [Ordering Nodes and Identity topic](./orderer/ordering_service.html#orderer-nodes-and-identity) and [Ordering Node Deployment and Administration topic](./deployorderer/ordererdeploy.html). + +## Tranport Layer Security (TLS) + +Fabric supports secure communication between nodes using Transport Layer Security (TLS). +TLS communication can use both one-way (server only) and two-way (server and client) authentication. + +For more information see the [Transport Layer Security (TLS) topic](./enable_tls.html). + +## Peer and Ordering service node operations service + +The peer and the orderer host an HTTP server that offers a RESTful “operations” API. +This API is unrelated to the Fabric network services and is intended to be used by operators, not administrators or “users” of the network. + +As the operations service is focused on operations and intentionally unrelated to the Fabric network, it does not use the Membership Services Provider for access control. +Instead, the operations service relies entirely on mutual TLS with client certificate authentication. + +For more information see the [Operations Service topic](./operations_service.html). + +## Hardware Security Modules + +The cryptographic operations performed by Fabric nodes can be delegated to a Hardware Security Module (HSM). +An HSM protects your private keys and handles cryptographic operations, allowing your peers to endorse transactions and orderer nodes to sign blocks without exposing their private keys. + +Fabric currently leverages the PKCS11 standard to communicate with an HSM. + +For more information see the [Hardware Security Module (HSM) topic](./hsm.html). + +## Fabric Applications + +A Fabric application can interact with a blockchain network by submitting transactions to a ledger or querying ledger content. +An application interacts with a blockchain network using one of the Fabric SDKs. + +The Fabric v2.x SDKs only support transaction and query functions and event listening. +Support for administrative functions for channels and nodes has been removed from the SDKs in favor of the CLI tools. + +Applications typically reside in a managed tier of an organization's infrastructure. +The organization may create client identities for the organization at large, or client identities for individual end users of the application. +Client identities only have permission to submit transactions and query the ledger, they do not have administrative or operational permissions on channels or nodes. + +In some use cases the application tier may persist user credentials including the private key and sign transactions. +In other use cases end users of the application may want to keep their private key secret. +To support these use cases, the Node.js SDK supports offline signing of transactions. +In both cases, a Hardware Security Module can be used to store private keys meaning that the client application does not have access to them. + +Regardless of application design, the SDKs do not have any privileged access to peer or orderer services other than that provided by the client identity. +From a security perspective, the SDKs are merely a set of language specific convenience functions for interacting with the gRPC services exposed by the Fabric peers and orderers. +All security enforcement is carried out by Fabric nodes as highlighted earlier in this topic, not the client SDK. + +For more information see the [Applications topic](./developapps/application.html) and [Offline Signing tutorial](https://hyperledger.github.io/fabric-sdk-node/release-2.2/tutorial-sign-transaction-offline.html). + + diff --git a/docs/source/security_model.rst b/docs/source/security_model.rst deleted file mode 100644 index 3ec440b1918..00000000000 --- a/docs/source/security_model.rst +++ /dev/null @@ -1,20 +0,0 @@ -Security Model -============== - -[WIP] - -Hyperledger Fabric allows for different organizations and participants -in a common network to utilize their own certificate authority, and as a -byproduct, implement varying cryptographic algorithms for -signing/verifying/identity attestation. This is done through an MSP -process running on both the ordering service and channel levels. - -Membership service provider (MSP): A set of cryptographic mechanisms and -protocols for issuing and validating certificates and identities -throughout the blockchain network. Identities issued in the scope of a -membership service provider can be evaluated within that membership -service provider’s rules validation policies. - -.. Licensed under Creative Commons Attribution 4.0 International License - https://creativecommons.org/licenses/by/4.0/ - diff --git a/docs/source/test_network.md b/docs/source/test_network.md index f7c6bb53c9c..c18a95e6878 100644 --- a/docs/source/test_network.md +++ b/docs/source/test_network.md @@ -2,19 +2,21 @@ After you have downloaded the Hyperledger Fabric Docker images and samples, you can deploy a test network by using scripts that are provided in the -`fabric-samples` repository. You can use the test network to learn about Fabric -by running nodes on your local machine. More experienced developers can use the +`fabric-samples` repository. The test network is provided for learning about Fabric +by running nodes on your local machine. Developers can use the network to test their smart contracts and applications. The network is meant to -be used only as a tool for education and testing. It should not be used as a -template for deploying a production network. The test network is being introduced -in Fabric v2.0 as the long term replacement for the `first-network` sample. +be used only as a tool for education and testing and not as a model for how to set up +a network. In general, modifications to the scripts are discouraged and could break the network. It is based on a limited configuration that should not be used as a template for deploying a production network: +- It includes two peer organizations and an ordering organization. +- For simplicity, a single node Raft ordering service is configured. +- To reduce complexity, a TLS Certificate Authority (CA) is not deployed. All certificates are issued by the root CAs. +- The sample network deploys a Fabric network with Docker Compose. Because the +nodes are isolated within a Docker Compose network, the test network is not configured to connect to other running Fabric nodes. -The sample network deploys a Fabric network with Docker Compose. Because the -nodes are isolated within a Docker Compose network, the test network is not -configured to connect to other running fabric nodes. +To learn how to use Fabric in production, see [Deploying a production network](deployment_guide_overview.html). **Note:** These instructions have been verified to work against the -latest stable Docker images and the pre-compiled setup utilities within the +latest stable Fabric Docker images and the pre-compiled setup utilities within the supplied tar file. If you run these commands with images or tools from the current master branch, it is possible that you will encounter errors. @@ -24,6 +26,8 @@ Before you can run the test network, you need to clone the `fabric-samples` repository and download the Fabric images. Make sure that you have installed the [Prerequisites](prereqs.html) and [Installed the Samples, Binaries and Docker Images](install.html). +**Note:** The test network has been successfully verified with Docker Desktop version 2.5.0.1 and is the recommended version at this time. Higher versions may not work. + ## Bring up the test network You can find the scripts to bring up the network in the `test-network` directory @@ -60,11 +64,11 @@ Usage: Used with network.sh deployCC -c - Name of channel to deploy chaincode to - -ccn - Chaincode name. This flag can be used to deploy one of the asset transfer samples to a channel. Sample options: basic (default),ledger, private, sbe, secured + -ccn - Chaincode name. -ccl - Programming language of the chaincode to deploy: go (default), java, javascript, typescript -ccv - Chaincode version. 1.0 (default), v2, version3.x, etc -ccs - Chaincode definition sequence. Must be an integer, 1 (default), 2, 3, etc - -ccp - (Optional) File path to the chaincode. When provided, the -ccn flag will be used only for the chaincode name. + -ccp - File path to the chaincode. -ccep - (Optional) Chaincode endorsement policy using signature policy syntax. The default policy requires an endorsement from Org1 and Org2 -cccg - (Optional) File path to private data collections configuration file -cci - (Optional) Name of chaincode initialization function. When a function is provided, the execution of init will be requested and the function will be invoked. @@ -80,7 +84,7 @@ Usage: Examples: network.sh up createChannel -ca -c mychannel -s couchdb -i 2.0.0 network.sh createChannel -c channelName - network.sh deployCC -ccn basic -ccl javascript + network.sh deployCC -ccn basic -ccp ../asset-transfer-basic/chaincode-javascript/ -ccl javascript network.sh deployCC -ccn mychaincode -ccp ./user/mychaincode -ccv 1 -ccl javascript ``` @@ -105,13 +109,15 @@ Creating network "net_test" with the default driver Creating volume "net_orderer.example.com" with default driver Creating volume "net_peer0.org1.example.com" with default driver Creating volume "net_peer0.org2.example.com" with default driver -Creating orderer.example.com ... done Creating peer0.org2.example.com ... done +Creating orderer.example.com ... done Creating peer0.org1.example.com ... done -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -8d0c74b9d6af hyperledger/fabric-orderer:latest "orderer" 4 seconds ago Up Less than a second 0.0.0.0:7050->7050/tcp orderer.example.com -ea1cf82b5b99 hyperledger/fabric-peer:latest "peer node start" 4 seconds ago Up Less than a second 0.0.0.0:7051->7051/tcp peer0.org1.example.com -cd8d9b23cb56 hyperledger/fabric-peer:latest "peer node start" 4 seconds ago Up 1 second 7051/tcp, 0.0.0.0:9051->9051/tcp peer0.org2.example.com +Creating cli ... done +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1667543b5634 hyperledger/fabric-tools:latest "/bin/bash" 1 second ago Up Less than a second cli +b6b117c81c7f hyperledger/fabric-peer:latest "peer node start" 2 seconds ago Up 1 second 0.0.0.0:7051->7051/tcp peer0.org1.example.com +703ead770e05 hyperledger/fabric-orderer:latest "orderer" 2 seconds ago Up Less than a second 0.0.0.0:7050->7050/tcp, 0.0.0.0:7053->7053/tcp orderer.example.com +718d43f5f312 hyperledger/fabric-peer:latest "peer node start" 2 seconds ago Up 1 second 7051/tcp, 0.0.0.0:9051->9051/tcp peer0.org2.example.com ``` If you don't get this result, jump down to [Troubleshooting](#troubleshooting) @@ -130,9 +136,8 @@ docker ps -a ``` Each node and user that interacts with a Fabric network needs to belong to an -organization that is a network member. The group of organizations that are -members of a Fabric network are often referred to as the consortium. The test -network has two consortium members, Org1 and Org2. The network also includes one +organization in order to participate in the network. The test +network includes two peer organizations, Org1 and Org2. It also includes a single orderer organization that maintains the ordering service of the network. [Peers](peers/peers.html) are the fundamental components of any Fabric network. @@ -140,7 +145,7 @@ Peers store the blockchain ledger and validate transactions before they are committed to the ledger. Peers run the smart contracts that contain the business logic that is used to manage the assets on the blockchain ledger. -Every peer in the network needs to belong to a member of the consortium. In the +Every peer in the network needs to belong to an organization. In the test network, each organization operates one peer each, `peer0.org1.example.com` and `peer0.org2.example.com`. @@ -156,15 +161,12 @@ An ordering service allows peers to focus on validating transactions and committing them to the ledger. After ordering nodes receive endorsed transactions from clients, they come to consensus on the order of transactions and then add them to blocks. The blocks are then distributed to peer nodes, which add the -blocks the blockchain ledger. Ordering nodes also operate the system channel -that defines the capabilities of a Fabric network, such as how blocks are made -and which version of Fabric that nodes can use. The system channel defines which -organizations are members of the consortium. +blocks to the blockchain ledger. The sample network uses a single node Raft ordering service that is operated by -the ordering organization. You can see the ordering node running on your machine +the orderer organization. You can see the ordering node running on your machine as `orderer.example.com`. While the test network only uses a single node ordering -service, a real network would have multiple ordering nodes, operated by one or +service, a production network would have multiple ordering nodes, operated by one or multiple orderer organizations. The different ordering nodes would use the Raft consensus algorithm to come to agreement on the order of transactions across the network. @@ -245,15 +247,14 @@ chaincode is ready to be used. After you have used the `network.sh` to create a channel, you can start a chaincode on the channel using the following command: ``` -./network.sh deployCC +./network.sh deployCC -ccn basic -ccp ../asset-transfer-basic/chaincode-go -ccl go ``` The `deployCC` subcommand will install the **asset-transfer (basic)** chaincode on ``peer0.org1.example.com`` and ``peer0.org2.example.com`` and then deploy the chaincode on the channel specified using the channel flag (or `mychannel` if no channel is specified). If you are deploying a chaincode for the first -time, the script will install the chaincode dependencies. By default, The script -installs the Go version of the asset-transfer (basic) chaincode. However, you can use the -language flag, `-l`, to install the typescript or javascript versions of the chaincode. +time, the script will install the chaincode dependencies. You can use the +language flag, `-l`, to install the Go, typescript or javascript versions of the chaincode. You can find the asset-transfer (basic) chaincode in the `asset-transfer-basic` folder of the `fabric-samples` directory. This folder contains sample chaincode that are provided as examples and used by tutorials to highlight Fabric features. @@ -291,7 +292,7 @@ export CORE_PEER_ADDRESS=localhost:7051 The `CORE_PEER_TLS_ROOTCERT_FILE` and `CORE_PEER_MSPCONFIGPATH` environment variables point to the Org1 crypto material in the `organizations` folder. -If you used `./network.sh deployCC` to install and start the asset-transfer (basic) chaincode, you can invoke the `InitLedger` function of the (Go) chaincode to put an initial list of assets on the ledger (if using typescript or javascript `./network.sh deployCC -l javascript` for example, you will invoke the `InitLedger` function of the respective chaincodes). +If you used `./network.sh deployCC -ccl go` to install and start the asset-transfer (basic) chaincode, you can invoke the `InitLedger` function of the (Go) chaincode to put an initial list of assets on the ledger (if using typescript or javascript `./network.sh deployCC -ccl javascript` for example, you will invoke the `InitLedger` function of the respective chaincodes). Run the following command to initialize the ledger with assets: ``` @@ -522,13 +523,7 @@ below provide a guided tour of what happens when you issue the command of the crypto material and MSP folders for all three organizations in the `organizations` folder. -- The script uses configtxgen tool to create the system channel genesis block. - Configtxgen consumes the `TwoOrgsOrdererGenesis` channel profile in the - `configtx/configtx.yaml` file to create the genesis block. The block is stored - in the `system-genesis-block` folder. - -- Once the organization crypto material and the system channel genesis block have - been generated, the `network.sh` can bring up the nodes of the network. The +- Once the organization crypto material has been generated, the `network.sh` can bring up the nodes of the network. The script uses the ``docker-compose-test-net.yaml`` file in the `docker` folder to create the peer and orderer nodes. The `docker` folder also contains the ``docker-compose-e2e.yaml`` file that brings up the nodes of the network @@ -538,11 +533,8 @@ below provide a guided tour of what happens when you issue the command of - If you use the `createChannel` subcommand, `./network.sh` runs the `createChannel.sh` script in the `scripts` folder to create a channel - using the supplied channel name. The script uses the `configtx.yaml` file to - create the channel creation transaction, as well as two anchor peer update - transactions. The script uses the peer cli to create the channel, join - ``peer0.org1.example.com`` and ``peer0.org2.example.com`` to the channel, and - make both of the peers anchor peers. + using the supplied channel name. The script uses the `configtxgen` tool to create the channel genesis block + based on the `TwoOrgsApplicationGenesis` channel profile in the `configtx/configtx.yaml` file. After creating the channel, the script uses the peer cli to join ``peer0.org1.example.com`` and ``peer0.org2.example.com`` to the channel, and make both of the peers anchor peers. - If you issue the `deployCC` command, `./network.sh` runs the ``deployCC.sh`` script to install the **asset-transfer (basic)** chaincode on both peers and then define then @@ -625,6 +617,17 @@ If you have any problems with the tutorial, review the following: ``` Select ``y``. +- If you try to create a channel with the command `./network.sh createChannel`, + and it fails with the following error: + ``` + [comm.tls] ClientHandshake -> ERRO 003 Client TLS handshake failed after 1.908956ms with error: EOF remoteaddress=127.0.0.1:7051 + Error: error getting endorser client for channel: endorser client failed to connect to localhost:7051: failed to create new connection: context deadline exceeded + After 5 attempts, peer0.org1 has failed to join channel 'mychannel' + ``` + + You need to uninstall Docker Desktop and reinstall the recommended version 2.5.0.1. Then, reclone the `fabric-samples` + repository before reattempting the commands. + - If you see an error similar to the following: ``` /bin/bash: ./scripts/createChannel.sh: /bin/bash^M: bad interpreter: No such file or directory @@ -644,16 +647,6 @@ If you have any problems with the tutorial, review the following: :set ff=unix ``` -- If your orderer exits upon creation or if you see that the create channel - command fails due to an inability to connect to your ordering service, use - the `docker logs` command to read the logs from the ordering node. You may see - the following message: - ``` - PANI 007 [channel system-channel] config requires unsupported orderer capabilities: Orderer capability V2_0 is required but not supported: Orderer capability V2_0 is required but not supported - ``` - This occurs when you are trying to run the network using Fabric version 1.4.x - docker images. The test network needs to run using Fabric version 2.x. - If you continue to see errors, share your logs on the **fabric-questions** channel on [Hyperledger Rocket Chat](https://chat.hyperledger.org/home) or on [StackOverflow](https://stackoverflow.com/questions/tagged/hyperledger-fabric). diff --git a/docs/source/tutorial/commercial_paper.diagram.testnet.png b/docs/source/tutorial/commercial_paper.diagram.testnet.png index 320f5956a26..a2c21361b9a 100644 Binary files a/docs/source/tutorial/commercial_paper.diagram.testnet.png and b/docs/source/tutorial/commercial_paper.diagram.testnet.png differ diff --git a/docs/source/tutorial/commercial_paper.md b/docs/source/tutorial/commercial_paper.md index 114c643eccc..d81fe0d8757 100644 --- a/docs/source/tutorial/commercial_paper.md +++ b/docs/source/tutorial/commercial_paper.md @@ -180,10 +180,10 @@ need to horizontally scroll to locate the information): * The Ordering Org CA, `ca_orderer`, is running in container `7b01f5454832` These containers all form a [Docker network](https://docs.docker.com/network/) -called `net_test`. You can view the network with the `docker network` command: +called `fabric_test`. You can view the network with the `docker network` command: ``` -$ docker network inspect net_test +$ docker network inspect fabric_test [ { @@ -404,7 +404,7 @@ and approve the chaincode as administrators of both MagnetoCorp and DigiBank. administrator installs a copy of the `papercontract` onto a MagnetoCorp peer.* Smart contracts are the focus of application development, and are contained -within a Hyperledger Fabric artifact called [chaincode](../chaincode.html). One +within a Hyperledger Fabric artifact called [chaincode](../chaincode4ade.html). One or more smart contracts can be defined within a single chaincode, and installing a chaincode will allow them to be consumed by the different organizations in PaperNet. It means that only administrators need to worry about chaincode; @@ -854,7 +854,7 @@ All the time, the underlying Fabric SDK handles the transaction endorsement, ordering and notification process, making the application's logic straightforward; the SDK uses a [gateway](../developapps/gateway.html) to abstract away network details and -[connectionOptions](../developapps/connectoptions.html) to declare more advanced +[connectionOptions](../developapps/connectionoptions.html) to declare more advanced processing strategies such as transaction retry. Let's now follow the lifecycle of MagnetoCorp 00001 by switching our emphasis diff --git a/docs/source/updating_capabilities.md b/docs/source/updating_capabilities.md index 52cafcb12c9..aedfcae428a 100644 --- a/docs/source/updating_capabilities.md +++ b/docs/source/updating_capabilities.md @@ -2,9 +2,11 @@ *Audience: network administrators, node administrators* +Note: this topic describes the process for updating capabilities on a network that has not been bootstrapped with a system channel genesis block. For a version of this topic that includes information about updating system channel capabilities, check out [Updating the capability level of a channel](https://hyperledger-fabric.readthedocs.io/en/release-2.2/updating_capabilities.html). + If you're not familiar with capabilities, check out [Capabilities](./capabilities_concept.html) before proceeding, paying particular attention to the fact that **peers and orderers that belong to the channel must be upgraded before enabling capabilities**. -For information about any new capability levels in the latest release of Fabric, check out [Upgrading your components](./upgrade_to_newest_version.html#Capabilities). +For information about any new capability levels in the latest release of Fabric, check out [Upgrading your components](./upgrade_to_newest_version.html#capabilities). Note: when we use the term “upgrade” in Hyperledger Fabric, we’re referring to changing the version of a component (for example, going from one version of a binary to the next version). The term “update,” on the other hand, refers not to versions but to configuration changes, such as updating a channel configuration or a deployment script. As there is no data migration, technically speaking, in Fabric, we will not use the term "migration" or "migrate" here. @@ -20,8 +22,6 @@ For this reason, think of enabling channel capabilities as a point of no return. ## Overview -In this tutorial, we will show the process for updating capabilities in all of the parts of the configuration of both the ordering system channel and any application channels. - Whether you will need to update every part of the configuration for all of your channels will depend on the contents of the latest release as well as your own use case. For more information, check out [Upgrading to the latest version of Fabric](./upgrade_to_newest_version.html). Note that it may be necessary to update to the newest capability levels before using the features in the latest release, and it is considered a best practice to always be at the latest binary versions and capability levels. Because updating the capability level of a channel involves the configuration update transaction process, we will be relying on our [Updating a channel configuration](./config_update.html) topic for many of the commands. @@ -32,24 +32,17 @@ As with any channel configuration update, updating capabilities is, at a high le 2. Create a modified channel config 3. Create a config update transaction -We will enable these capabilities in the following order: - -1. [Orderer system channel](#orderer-system-channel-capabilities) - - * Orderer group - * Channel group - -2. [Application channels](#enable-capabilities-on-existing-channels) +Recall from the conceptual topic on capabilities that three types of capabilities exist, reflecting the three areas of administration: * Orderer group * Channel group * Application group -While it is possible to edit multiple parts of the configuration of a channel at the same time, in this tutorial we will show how this process is done incrementally. In other words, we will not bundle a change to the `Orderer` group and the `Channel` group of the system channel into one configuration change. This is because not every release will have both a new `Orderer` group capability and a `Channel` group capability. +While it is possible to edit multiple parts of the configuration of a channel at the same time, in this tutorial we will show how this process is done incrementally. In other words, we will not bundle a change to the `Orderer` group and the `Channel` group into one configuration change. This is because not every release will have both a new `Orderer` group capability and a `Channel` group capability. -Note that in production networks, it will not be possible or desirable for one user to be able to update all of these channels (and parts of configurations) unilaterally. The orderer system channel, for example, is administered exclusively by ordering organization admins (though it is possible to add peer organizations as ordering service organizations). Similarly, updating either the `Orderer` or `Channel` groups of a channel configuration requires the signature of an ordering service organization in addition to peer organizations. Distributed systems require collaborative management. +Note that in production networks, it will not be possible or desirable for one user to be able to update all of these channels (and parts of configurations) unilaterally. Updating either the `Orderer` or `Channel` groups of a channel configuration, for example, requires the signature of an ordering service organization in addition to peer organizations. Distributed systems require collaborative management. -#### Create a capabilities config file +## Create a capabilities config file Note that this tutorial presumes that a file called `capabilities.json` has been created and includes the capability updates you want to make to the various sections of the config. It also uses `jq` to apply the edits to the modified config file. @@ -91,57 +84,9 @@ In this example, the ``capabilities.json`` file looks like this (note: if you ar } ``` -Note that by default peer organizations are not admins of the orderer system channel and will therefore be unable to propose configuration updates to it. An orderer organization admin would have to create a file like this (without the `application` group capability, which does not exist in the system channel) to propose updating the system channel configuration. Note that because application channel copy the system channel configuration by default, unless a different channel profile is created which specifies capability levels, the `Channel` and `Orderer` group capabilities for the application channel will be the same as those in the network's system channel. - -## Orderer system channel capabilities - -Because application channels copy the configuration of the orderer system channel by default, it is considered a best practice to update the capabilities of the system channel before any application channels. This mirrors the process of updating ordering nodes to the newest version before peers, as described in [Upgrading your components](./upgrading_your_components.html). - -Note that the orderer system channel is administered by ordering service organizations. By default this will be a single organization (the organization that created the initial nodes in the ordering service), but more organizations can be added here (for example, if multiple organizations have contributed nodes to the ordering service). - -Make sure all of the ordering nodes in your ordering service have been upgraded to the required binary level before updating the `Orderer` and `Channel` capability. If an ordering node is not at the required level, it will be unable to process the config block with the capability and will crash. Similarly, note that if a new channel is created on this ordering service, all of the peers that will be joined to it must be at least to the node level corresponding to the `Channel` and `Application` capabilities, otherwise they will also crash when attempting to process the config block. For more information, check out [Capabilities](./capabilities_concept.html). - -### Set environment variables - -You will need to export the following variables: - -* `CH_NAME`: the name of the system channel being updated. -* `CORE_PEER_LOCALMSPID`: the MSP ID of the organization proposing the channel update. This will be the MSP of one of the orderer organizations. -* `TLS_ROOT_CA`: the absolute path to the TLS cert of your ordering node(s). -* `CORE_PEER_MSPCONFIGPATH`: the absolute path to the MSP representing your organization. -* `ORDERER_CONTAINER`: the name of an ordering node container. When targeting the ordering service, you can target any particular node in the ordering service. Your requests will be forwarded to the leader automatically. - -### `Orderer` group - -For the commands on how to pull, translate, and scope the channel config, navigate to [Step 1: Pull and translate the config](./config_update.html#step-1-pull-and-translate-the-config). Once you have a `modified_config.json`, add the capabilities to the `Orderer` group of the config (as listed in `capabilities.json`) using this command: - -``` -jq -s '.[0] * {"channel_group":{"groups":{"Orderer": {"values": {"Capabilities": .[1].orderer}}}}}' config.json ./capabilities.json > modified_config.json -``` - -Then, follow the steps at [Step 3: Re-encode and submit the config](./config_update.html#step-3-re-encode-and-submit-the-config). - -Note that because you are updating the system channel, the `mod_policy` for the system channel will only require the signature of ordering service organization admins. - -### `Channel` group - -Once again, navigate to [Step 1: Pull and translate the config](./config_update.html#step-1-pull-and-translate-the-config). Once you have a `modified_config.json`, add the capabilities to the `Channel` group of the config (as listed in `capabilities.json`) using this command: - -``` -jq -s '.[0] * {"channel_group":{"values": {"Capabilities": .[1].channel}}}' config.json ./capabilities.json > modified_config.json -``` - -Then, follow the steps at [Step 3: Re-encode and submit the config](./config_update.html#step-3-re-encode-and-submit-the-config). - -Note that because you are updating the system channel, the `mod_policy` for the system channel will only require the signature of ordering service organization admins. In an application channel, as you'll see, you would normally need to satisfy both the `MAJORITY` `Admins` policy of both the `Application` group (consisting of the MSPs of peer organizations) and the `Orderer` group (consisting of ordering service organizations), assuming you have not changed the default values. - -## Enable capabilities on existing channels - -Now that we have updating the capabilities on the orderer system channel, we need to updating the configuration of any existing application channels you want to update. - -As you will see, the configuration of application channels is very similar to that of the system channel. This is what allows us to re-use `capabilities.json` and the same commands we used for updating the system channel (using different environment variables which we will discuss below). +## Update capabilities on existing channels -**Make sure all of the ordering nodes in your ordering service and peers on the channel have been upgraded to the required binary level before updating capabilities. If a peer or an ordering node is not at the required level, it will be unable to process the config block with the capability and will crash**. For more information, check out [Capabilities](./capabilities_concept.html). +Make sure all of the ordering nodes in your ordering service and peers on the channel have been upgraded to the required binary level before updating capabilities. If a peer or an ordering node is not at the required level, it will be unable to process the config block with the capability and will crash. For more information, check out [Capabilities](./capabilities_concept.html). ### Set environment variables @@ -163,7 +108,7 @@ jq -s '.[0] * {"channel_group":{"groups":{"Orderer": {"values": {"Capabilities": Then, follow the steps at [Step 3: Re-encode and submit the config](./config_update.html#step-3-re-encode-and-submit-the-config). -Note the `mod_policy` for this capability defaults to the `MAJORITY` of the `Admins` of the `Orderer` group (in other words, a majority of the admins of the ordering service). Peer organizations can propose an update to this capability, but their signatures will not satisfy the relevant policy in this case. +Note the `mod_policy` for this capability defaults to the `MAJORITY` of the `Admins` of the `Orderer` group (in other words, a majority of the admins of the ordering service). Peer organizations can propose an update to this capability, but their signatures will not satisfy the default policy in this case. ### `Channel` group diff --git a/docs/source/upgrade.rst b/docs/source/upgrade.rst index 227ded5b753..617108eb663 100644 --- a/docs/source/upgrade.rst +++ b/docs/source/upgrade.rst @@ -12,6 +12,8 @@ high level, a four step process. capability levels, where available. Note that some releases will have capabilities in all groups while other releases may have few or even no new capabilities at all. +5. If you want to migrate your network by removing the orderer system channel, + check out :doc:`create_channel/create_channel_participation`. For more information about capabilities, check out :doc:`capabilities_concept`. diff --git a/docs/source/upgrade_to_newest_version.md b/docs/source/upgrade_to_newest_version.md index 0fc24062915..5d6e94e74c5 100644 --- a/docs/source/upgrade_to_newest_version.md +++ b/docs/source/upgrade_to_newest_version.md @@ -4,7 +4,16 @@ In this topic we'll cover recommendations for upgrading to the newest release fr ## Upgrading from 2.1 to 2.2 -The 2.1 and 2.2 releases of Fabric are stabilization releases, featuring bug fixes and other forms of code hardening. As such there are no particular considerations needed for upgrade, and no new capability levels requiring particular image versions or channel configuration updates. +The v2.1 and v2.2 releases of Fabric are stabilization releases, featuring bug fixes and other forms of code hardening. As such there are no particular considerations needed for upgrade, and no new capability levels requiring particular image versions or channel configuration updates. + +### Upgrading from 2.2 to 2.3 + +The v2.3 release of Fabric includes two main new features: + +1. The ability to take snapshots of the ledgers on peers (and to bootstrap a new peer from a snapshot). For more information, check out [Taking ledger snapshots and using them to join channels](./peer_ledger_snapshot.html). +2. Channels can now be created without first creating a system channel. For more information, check out [Creating a channel without a system channel](./create_channel/create_channel_participation.html). + +Neither of these features require channel updates to capability in order to function. However, you will need to upgrade to v2.3 to take advantage of both features and, in the case of the new channel creation process, need to migrate away from the system channel, which is covered in the [Creating a channel without a system channel](./create_channel/create_channel_participation.html) tutorial. ## Upgrading to 2.2 from the 1.4.x long term support release @@ -24,7 +33,8 @@ For information about how to edit the relevant channel configurations to enable ### Chaincode shim changes (Go chaincode only) -The recommended approach is to vendor the shim in your v1.4 Go chaincode before making upgrades to the peers and channels. If you do this, you do not need to make any additional changes to your chaincode. +The v2.x `ccenv` image that is used to build Go chaincodes no longer automatically vendors the Go chaincode shim dependency like the v1.4 `ccenv` image did. +The recommended approach is to vendor the shim in your v1.4 Go chaincode before making upgrades to the peers and channels, since this approach works with both a v1.4.x and v2.x peer. If you are already using an existing tool such as ``govendor`` you may continue using it to vendor the chaincode shim. Best practice, however, would be to use Go modules to vendor the chaincode shim, as modules are now the de facto standard for dependency management in the Go ecosystem. Note that since Fabric v2.0, chaincode using Go modules without vendored dependencies is also supported. If you do this, you do not need to make any additional changes to your chaincode. If you did not vendor the shim in your v1.4 chaincode, the old v1.4 chaincode images will still technically work after upgrade, but you are in a risky state. If the chaincode image gets deleted from your environment for whatever reason, the next invoke on v2.x peer will try to rebuild the chaincode image and you'll get an error that the shim cannot be found. @@ -86,7 +96,7 @@ For information about how to set new capabilities, check out [Updating the capab ### Define ordering node endpoint per org (recommend) -Starting with version v1.4.2, it was recommended to define orderer endpoints in both the system channel and in all application channels at the organization level by adding a new `OrdererEndpoints` stanza within the channel configuration of an organization, replacing the global `OrdererAddresses` section of channel configuration. If at least one organization has an ordering service endpoint defined at an organizational level, all orderers and peers will ignore the channel level endpoints when connecting to ordering nodes. +Starting with version v1.4.2, it was recommended to define orderer endpoints in all channels at the organization level by adding a new `OrdererEndpoints` stanza within the channel configuration of an organization, replacing the global `OrdererAddresses` section of channel configuration. If at least one organization has an ordering service endpoint defined at an organizational level, all orderers and peers will ignore the channel level endpoints when connecting to ordering nodes. Utilizing organization level orderer endpoints is required when using service discovery with ordering nodes provided by multiple organizations. This allows clients to provide the correct organization TLS certificates. @@ -111,10 +121,10 @@ In this example, we will create a stanza for a single org called `OrdererOrg`. N Then, export the following environment variables: -* `CH_NAME`: the name of the channel being updated. Note that all system channels and application channels should contain organization endpoints for ordering nodes. +* `CH_NAME`: the name of the channel being updated. * `CORE_PEER_LOCALMSPID`: the MSP ID of the organization proposing the channel update. This will be the MSP of one of the orderer organizations. * `CORE_PEER_MSPCONFIGPATH`: the absolute path to the MSP representing your organization. -* `TLS_ROOT_CA`: the absolute path to the root CA certificate of the organization proposing the system channel update. +* `TLS_ROOT_CA`: the absolute path to the root CA certificate of the organization proposing the channel update. * `ORDERER_CONTAINER`: the name of an ordering node container. When targeting the ordering service, you can target any particular node in the ordering service. Your requests will be forwarded to the leader automatically. * `ORGNAME`: The name of the organization you are currently updating. For example, `OrdererOrg`. diff --git a/docs/source/whatsnew.rst b/docs/source/whatsnew.rst index 8cc9a45ea62..64e6b2ea9c0 100644 --- a/docs/source/whatsnew.rst +++ b/docs/source/whatsnew.rst @@ -272,7 +272,11 @@ announced in each of the v2.x releases. * `Fabric v2.1.1 release notes `_. * `Fabric v2.2.0 release notes `_. * `Fabric v2.2.1 release notes `_. +* `Fabric v2.2.2 release notes `_. * `Fabric v2.3.0 release notes `_. +* `Fabric v2.3.1 release notes `_. +* `Fabric v2.3.2 release notes `_. +* `Fabric v2.3.3 release notes `_. .. Licensed under Creative Commons Attribution 4.0 International License https://creativecommons.org/licenses/by/4.0/ diff --git a/docs/source/write_first_app.rst b/docs/source/write_first_app.rst index 3f285d916f2..c4046a51556 100644 --- a/docs/source/write_first_app.rst +++ b/docs/source/write_first_app.rst @@ -138,7 +138,7 @@ Next, let's deploy the chaincode by calling the ``./network.sh`` script with the .. code:: bash - ./network.sh deployCC -ccn basic -ccl javascript + ./network.sh deployCC -ccn basic -ccp ../asset-transfer-basic/chaincode-javascript/ -ccl javascript .. note:: Behind the scenes, this script uses the chaincode lifecycle to package, install, query installed chaincode, approve chaincode for both Org1 and Org2, and finally commit the chaincode. @@ -293,7 +293,7 @@ to interact with the blockchain network. The section of the application code is // in a real application this would be done only when a new user was required to be added // and would be part of an administrative flow - await registerUser(caClient, wallet, userId, 'org1.department1'); + await registerAndEnrollUser(caClient, wallet, mspOrg1, org1UserId, 'org1.department1'); Similar to the admin enrollment, this function uses a CSR to register and enroll ``appUser`` and store its credentials alongside those of ``admin`` in the wallet. We now have diff --git a/docs/wrappers/osnadmin_channel_postscript.md b/docs/wrappers/osnadmin_channel_postscript.md index 83a9436251e..25c1825fd2f 100644 --- a/docs/wrappers/osnadmin_channel_postscript.md +++ b/docs/wrappers/osnadmin_channel_postscript.md @@ -4,13 +4,13 @@ Here's an example of the `osnadmin channel join` command. -* Create and join a sample channel `mychannel` defined by the application channel genesis +* Create and join a sample channel `mychannel` defined by the application channel genesis block contained in file `mychannel-genesis-block.pb`. Use the orderer admin endpoint at `orderer.example.com:9443`. ``` - osnadmin channel join -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel --config-block mychannel-genesis-block.pb + osnadmin channel join -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel --config-block mychannel-genesis-block.pb Status: 201 { @@ -34,7 +34,7 @@ Here are some examples of the `osnadmin channel list` command. system channel (if one exists) and all of the application channels. ``` - osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY + osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY Status: 200 { @@ -49,12 +49,12 @@ Here are some examples of the `osnadmin channel list` command. ``` - Status 200 and the list of channels are returned. + Status 200 and the list of channels are returned. -* Using the `--channel-id` flag to list more details for `mychannel`. +* Using the `--channelID` flag to list more details for `mychannel`. ``` - osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel + osnadmin channel list -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel Status: 200 { @@ -67,7 +67,7 @@ Here are some examples of the `osnadmin channel list` command. ``` - Status 200 and the details of the channels are returned. + Status 200 and the details of the channels are returned. ### osnadmin channel remove example @@ -76,11 +76,11 @@ Here's an example of the `osnadmin channel remove` command. * Removing channel `mychannel` from the orderer at `orderer.example.com:9443`. ``` - osnadmin channel remove -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channel-id mychannel + osnadmin channel remove -o orderer.example.com:9443 --ca-file $CA_FILE --client-cert $CLIENT_CERT --client-key $CLIENT_KEY --channelID mychannel Status: 204 ``` - Status 204 is returned upon successful removal of a channel. + Status 204 is returned upon successful removal of a channel. Creative Commons License
This work is licensed under a Creative Commons Attribution 4.0 International License. diff --git a/docs/wrappers/peer_chaincode_preamble.md b/docs/wrappers/peer_chaincode_preamble.md index aac07f6b327..e55beedb09b 100644 --- a/docs/wrappers/peer_chaincode_preamble.md +++ b/docs/wrappers/peer_chaincode_preamble.md @@ -23,6 +23,18 @@ different chaincode operations that are relevant to a peer. For example, use the the `peer chaincode query` subcommand option to query a chaincode for the current value on a peer's ledger. +Some subcommands take flag `--ctor`, of which the value must be a JSON string +that has either key 'Args' or 'Function' and 'Args'. These keys are +case-insensitive. + +If the JSON string only has the Args key, the key value is an array, where the +first array element is the target function to call, and the subsequent elements +are arguments of the function. If the JSON string has both 'Function' and +'Args', the value of Function is the target function to call, and the value of +Args is an array of arguments of the function. For instance, +`{"Args":["GetAllAssets"]}` is equivalent to +`{"Function":"GetAllAssets", "Args":[]}`. + Each peer chaincode subcommand is described together with its options in its own section in this topic. diff --git a/docs/wrappers/peer_lifecycle_chaincode_postscript.md b/docs/wrappers/peer_lifecycle_chaincode_postscript.md index 1b4fc1627e1..76c9d46c4fa 100644 --- a/docs/wrappers/peer_lifecycle_chaincode_postscript.md +++ b/docs/wrappers/peer_lifecycle_chaincode_postscript.md @@ -228,7 +228,7 @@ also outputs which organizations have approved the chaincode definition. If an organization has approved the chaincode definition specified in the command, the command will return a value of true. You can use this command to learn whether enough channel members have approved a chaincode definition to meet the -`Application/Channel/Endorsement` policy (a majority by default) before the +`/Channel/Application/Endorsement` policy (a majority by default) before the definition can be committed to a channel. * Here is an example of the `peer lifecycle chaincode checkcommitreadiness` command, diff --git a/go.mod b/go.mod index e02b3b122f9..e4ec0242497 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ replace golang.org/x/sys => golang.org/x/sys v0.0.0-20190920190810-ef0ce1748380 require ( code.cloudfoundry.org/clock v1.0.0 + github.com/BDLS-bft/bdls v0.0.0-20230116214250-19cec756c989 github.com/DataDog/zstd v1.4.0 // indirect github.com/Knetic/govaluate v3.0.0+incompatible github.com/Microsoft/hcsshim v0.8.6 // indirect @@ -43,15 +44,14 @@ require ( github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/kr/pretty v0.2.0 github.com/magiconair/properties v1.8.1 // indirect - github.com/mattn/go-runewidth v0.0.4 // indirect github.com/miekg/pkcs11 v1.0.3 github.com/mitchellh/mapstructure v1.3.2 - github.com/onsi/ginkgo v1.8.0 - github.com/onsi/gomega v1.9.0 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.16.0 github.com/opencontainers/runc v1.0.0-rc8 // indirect github.com/pelletier/go-toml v1.8.0 // indirect github.com/pierrec/lz4 v2.5.0+incompatible // indirect - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.1.0 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/spf13/afero v1.3.1 // indirect @@ -60,20 +60,19 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.1.1 - github.com/stretchr/testify v1.5.1 + github.com/stretchr/testify v1.7.1-0.20210116013205-6990a05d54c2 // includes ErrorContains github.com/sykesm/zap-logfmt v0.0.2 - github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 + github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc github.com/willf/bitset v1.1.10 go.etcd.io/etcd v0.5.0-alpha.5.0.20181228115726-23731bf9ba55 go.uber.org/zap v1.14.1 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc // indirect - golang.org/x/sys v0.0.0-20200819091447-39769834ee22 // indirect - golang.org/x/text v0.3.3 // indirect - golang.org/x/tools v0.0.0-20200131233409-575de47986ce + golang.org/x/tools v0.0.0-20210106214847-113979e3529a google.golang.org/grpc v1.31.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/cheggaaa/pb.v1 v1.0.28 gopkg.in/yaml.v2 v2.3.0 ) + +replace github.com/onsi/gomega => github.com/onsi/gomega v1.9.0 diff --git a/go.sum b/go.sum index ab84afd3f0c..cadeb2f8721 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +code.cloudfoundry.org/bytefmt v0.0.0-20211005130812-5bb3c17173e5 h1:tM5+dn2C9xZw1RzgI6WTQW1rGqdUimKB3RFbyu4h6Hc= +code.cloudfoundry.org/bytefmt v0.0.0-20211005130812-5bb3c17173e5/go.mod h1:v4VVB6oBMz/c9fRY6vZrwr5xKRWOH5NPDjQZlPk0Gbs= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BDLS-bft/bdls v0.0.0-20230116214250-19cec756c989 h1:tYmUIxd732iUTZ3R0ewPy7O7Y5CNivrcFgYVPJ0tWGQ= +github.com/BDLS-bft/bdls v0.0.0-20230116214250-19cec756c989/go.mod h1:hesZ3fp+xEnW6dqvjJrbKru9K0v7y/kQloGL7iD4vR4= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= @@ -21,6 +25,7 @@ github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQu github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= @@ -32,6 +37,19 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= +github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -47,9 +65,12 @@ github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -76,8 +97,9 @@ github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/frankban/quicktest v1.9.0 h1:jfEA+Psfr/pHsRJYPpHiNu7PGJnGctNxvTaM3K1EyXk= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsouza/go-dockerclient v1.4.1 h1:W7wuJ3IB48WYZv/UBk9dCTIb9oX805+L9KIm65HcUYs= github.com/fsouza/go-dockerclient v1.4.1/go.mod h1:PUNHxbowDqRXfRgZqMz1OeGtbWC6VKyZvJ99hDjB0qs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -88,10 +110,12 @@ github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80n github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -101,14 +125,12 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -129,7 +151,6 @@ github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+d github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hyperledger/fabric-amcl v0.0.0-20200128223036-d1aa2665426a h1:HgdNn3UYz8PdcZrLEk0IsSU4LRHp7yY2rgjIKcSiJaA= github.com/hyperledger/fabric-amcl v0.0.0-20200128223036-d1aa2665426a/go.mod h1:X+DIyUsaTmalOpmpQfIvFZjKHQedrURQ5t4YqquX7lE= @@ -147,12 +168,17 @@ github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd h1:anPrsicrIi2ColgWTVPk+ github.com/ijc/Gotty v0.0.0-20170406111628-a8b993ba6abd/go.mod h1:3LVOLeyx9XVvwPgrt2be44XgSqndprz1G18rSk8KD84= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -173,8 +199,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -187,13 +213,17 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= @@ -209,8 +239,9 @@ github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bA github.com/pierrec/lz4 v2.5.0+incompatible h1:MbdIZ43A//duwOjQqK3nP+up+65yraNFyX3Vp6Rwues= github.com/pierrec/lz4 v2.5.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -236,6 +267,8 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -261,20 +294,25 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1-0.20210116013205-6990a05d54c2 h1:oevpAKCW58ZYJe1hqfgLqg+1zXmYrQ9xf7HLUdfS+qM= +github.com/stretchr/testify v1.7.1-0.20210116013205-6990a05d54c2/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/sykesm/zap-logfmt v0.0.2 h1:czSzn+PIXCOAP/4NAIHTTziIKB8201PzoDkKTn+VR/8= github.com/sykesm/zap-logfmt v0.0.2/go.mod h1:TerDJT124HaO8UTpZ2wJCipJRAKQ9XONM1mzUabIh6M= -github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= -github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs= +github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc h1:LUUe4cdABGrIJAhl1P1ZpWY76AwukVszFdwkVFVLwIk= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.1-etcd.7/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.5.0-alpha.5.0.20181228115726-23731bf9ba55 h1:YTC92EdyM9lnD0aVRqN28/CILPLZSzdmoomOFAjxBbk= go.etcd.io/etcd v0.5.0-alpha.5.0.20181228115726-23731bf9ba55/go.mod h1:weASp41xM3dk0YHg1s/W8ecdGP5G4teSTMBPpYAaUgA= @@ -291,11 +329,15 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -305,6 +347,10 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -314,19 +360,23 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190920190810-ef0ce1748380 h1:pWpXa9KDLGZQe5bzYO50QaTE0ddH+vzBKtP32311cD8= golang.org/x/sys v0.0.0-20190920190810-ef0ce1748380/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -339,11 +389,16 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200131233409-575de47986ce h1:Uglradbb4KfUWaYasZhlsDsGRwHHvRsHoNAEONef0W8= -golang.org/x/tools v0.0.0-20200131233409-575de47986ce/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -367,17 +422,18 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/gossip/discovery/discovery_impl.go b/gossip/discovery/discovery_impl.go index 1da96254ea4..988c0d7bdb3 100644 --- a/gossip/discovery/discovery_impl.go +++ b/gossip/discovery/discovery_impl.go @@ -869,11 +869,7 @@ func (d *gossipDiscoveryImpl) learnExistingMembers(aliveArr []*protoext.SignedGo alive.lastSeen = time.Now() alive.seqNum = am.Timestamp.SeqNum - if am := d.aliveMembership.MsgByID(m.GetAliveMsg().Membership.PkiId); am == nil { - d.logger.Debug("Adding", am, "to aliveMembership") - msg := &protoext.SignedGossipMessage{GossipMessage: m.GossipMessage, Envelope: am.Envelope} - d.aliveMembership.Put(m.GetAliveMsg().Membership.PkiId, msg) - } else { + if am := d.aliveMembership.MsgByID(m.GetAliveMsg().Membership.PkiId); am != nil { d.logger.Debug("Replacing", am, "in aliveMembership") am.GossipMessage = m.GossipMessage am.Envelope = m.Envelope diff --git a/gossip/gossip/gossip_test.go b/gossip/gossip/gossip_test.go index 5f8985d52e2..8024258f062 100644 --- a/gossip/gossip/gossip_test.go +++ b/gossip/gossip/gossip_test.go @@ -670,7 +670,7 @@ func TestNoMessagesSelfLoop(t *testing.T) { case msg := <-ch: { if protoext.IsDataMsg(msg.GetGossipMessage().GossipMessage) { - t.Fatal("Should not receive data message back, got", msg) + t.Errorf("Should not receive data message back, got %s", msg) } } // Waiting for 2 seconds to make sure we won't diff --git a/gossip/identity/identity.go b/gossip/identity/identity.go index 671044dcbde..217fa16b4e5 100644 --- a/gossip/identity/identity.go +++ b/gossip/identity/identity.go @@ -135,7 +135,7 @@ func (is *identityMapperImpl) Put(pkiID common.PKIidType, identity api.PeerIdent var expirationTimer *time.Timer if !expirationDate.IsZero() { if time.Now().After(expirationDate) { - return errors.New("identity expired") + return errors.New("gossipping peer identity expired") } // Identity would be wiped out a millisecond after its expiration date timeToLive := time.Until(expirationDate.Add(time.Millisecond)) diff --git a/gossip/identity/identity_test.go b/gossip/identity/identity_test.go index 39b3a47c6a8..28da61c0ac5 100644 --- a/gossip/identity/identity_test.go +++ b/gossip/identity/identity_test.go @@ -268,7 +268,7 @@ func TestExpiration(t *testing.T) { err := idStore.Put(x509PkiID, x509Identity) require.NoError(t, err) err = idStore.Put(expiredX509PkiID, expiredX509Identity) - require.Equal(t, "identity expired", err.Error()) + require.Equal(t, "gossipping peer identity expired", err.Error()) err = idStore.Put(nonX509PkiID, nonX509Identity) require.NoError(t, err) err = idStore.Put(notSupportedPkiID, notSupportedIdentity) diff --git a/gossip/privdata/dataretriever.go b/gossip/privdata/dataretriever.go index ac6cbe76bc8..afe2552dd8f 100644 --- a/gossip/privdata/dataretriever.go +++ b/gossip/privdata/dataretriever.go @@ -55,14 +55,27 @@ func (dr *dataRetriever) CollectionRWSet(digests []*protosgossip.PvtDataDigest, // if there is an error getting info from the ledger, we need to try to read from transient store return nil, false, errors.Wrap(err, "wasn't able to read ledger height") } - if height <= blockNum { + + // The condition may be true for either commit or reconciliation case when another peer sends a request to retrieve private data. + // For the commit case, get the private data from the transient store because the block has not been committed. + // For the reconciliation case, this peer is further behind the ledger height than the peer that requested for the private data. + // In this case, the ledger does not have the requested private data. Also, the data cannot be queried in the transient store, + // as the txID in the digest will be missing. + if height <= blockNum { // Check whenever current ledger height is equal or below block sequence num. dr.logger.Debug("Current ledger height ", height, "is below requested block sequence number", blockNum, "retrieving private data from transient store") - } - if height <= blockNum { // Check whenever current ledger height is equal or below block sequence num. results := make(Dig2PvtRWSetWithConfig) for _, dig := range digests { + // skip retrieving from transient store if txid is not available + if dig.TxId == "" { + dr.logger.Infof("Skip querying transient store for chaincode %s, collection name %s, block number %d, sequence in block %d, "+ + "as the txid is missing, perhaps because it is a reconciliation request", + dig.Namespace, dig.Collection, blockNum, dig.SeqInBlock) + + continue + } + filter := map[string]ledger.PvtCollFilter{ dig.Namespace: map[string]bool{ dig.Collection: true, @@ -191,7 +204,7 @@ func (dr *dataRetriever) fromTransientStore(dig *protosgossip.PvtDataDigest, fil colConfigs, found := rws.CollectionConfigs[dig.Namespace] if !found { dr.logger.Error("No collection config was found for chaincode", dig.Namespace, "collection name", - dig.Namespace, "txID", dig.TxId) + dig.Collection, "txID", dig.TxId) continue } diff --git a/gossip/state/config.go b/gossip/state/config.go index e2a3337e714..18071972d98 100644 --- a/gossip/state/config.go +++ b/gossip/state/config.go @@ -30,6 +30,8 @@ type StateConfig struct { StateBlockBufferSize int StateChannelSize int StateEnabled bool + UseLeaderElection bool + OrgLeader bool } func GlobalConfig() *StateConfig { @@ -68,4 +70,8 @@ func (c *StateConfig) loadStateConfig() { if viper.IsSet("peer.gossip.state.enabled") { c.StateEnabled = viper.GetBool("peer.gossip.state.enabled") } + // The below two configuration parameters are used for straggler() which warns + // if our peer is lagging behind the rest and has no way to catch up. + c.UseLeaderElection = viper.GetBool("peer.gossip.useLeaderElection") + c.OrgLeader = viper.GetBool("peer.gossip.orgLeader") } diff --git a/gossip/state/state.go b/gossip/state/state.go index b7f0a4fc363..d05783b2944 100644 --- a/gossip/state/state.go +++ b/gossip/state/state.go @@ -41,9 +41,9 @@ type GossipStateProvider interface { } const ( - defAntiEntropyBatchSize = 10 - - defMaxBlockDistance = 20 + stragglerWarningThreshold = 100 + defAntiEntropyBatchSize = 10 + defMaxBlockDistance = 20 blocking = true nonBlocking = false @@ -761,6 +761,13 @@ func (s *GossipStateProviderImpl) addPayload(payload *proto.Payload, blockingMod } if !blockingMode && payload.SeqNum-height >= uint64(s.config.StateBlockBufferSize) { + if s.straggler(height, payload) { + s.logger.Warningf("[%s] Current block height (%d) is too far behind other peers at height (%d) to be able to receive blocks "+ + "without state transfer which is disabled in the configuration "+ + "(peer.gossip.state.enabled = false). Consider enabling it or setting the peer explicitly to be a leader (peer.gossip.orgLeader = true) "+ + "in order to pull blocks directly from the ordering service.", + s.chainID, height, payload.SeqNum+1) + } return errors.Errorf("Ledger height is at %d, cannot enqueue block with sequence of %d", height, payload.SeqNum) } @@ -773,6 +780,16 @@ func (s *GossipStateProviderImpl) addPayload(payload *proto.Payload, blockingMod return nil } +func (s *GossipStateProviderImpl) straggler(currHeight uint64, receivedPayload *proto.Payload) bool { + // If state transfer is disabled, there is no way to request blocks from peers that their ledger has advanced too far. + stateDisabled := !s.config.StateEnabled + // We are too far behind if we received a block with a sequence number more than stragglerWarningThreshold ahead of our height. + tooFarBehind := currHeight+stragglerWarningThreshold < receivedPayload.SeqNum + // We depend on other peers for blocks if we use leader election, or we are not explicitly configured to be an org leader. + peerDependent := s.config.UseLeaderElection || !s.config.OrgLeader + return stateDisabled && tooFarBehind && peerDependent +} + func (s *GossipStateProviderImpl) commitBlock(block *common.Block, pvtData util.PvtDataCollections) error { t1 := time.Now() diff --git a/gossip/state/state_test.go b/gossip/state/state_test.go index 3ea3a26c4ea..bbd4afd3f95 100644 --- a/gossip/state/state_test.go +++ b/gossip/state/state_test.go @@ -484,6 +484,61 @@ func newBootNode(id int, committer committer.Committer, acceptor peerIdentityAcc return newPeerNodeWithGossipWithValidatorWithMetrics(logger, id, committer, acceptor, nil, v, gossipMetrics) } +func TestStraggler(t *testing.T) { + for _, testCase := range []struct { + stateEnabled bool + orgLeader bool + leaderElection bool + height uint64 + receivedSeq uint64 + expected bool + }{ + { + height: 100, + receivedSeq: 300, + leaderElection: true, + expected: true, + }, + { + height: 100, + receivedSeq: 300, + expected: true, + }, + { + height: 100, + receivedSeq: 300, + orgLeader: true, + }, + { + height: 100, + receivedSeq: 105, + leaderElection: true, + }, + { + height: 100, + receivedSeq: 300, + leaderElection: true, + stateEnabled: true, + }, + } { + description := fmt.Sprintf("%+v", testCase) + t.Run(description, func(t *testing.T) { + s := &GossipStateProviderImpl{ + config: &StateConfig{ + StateEnabled: testCase.stateEnabled, + OrgLeader: testCase.orgLeader, + UseLeaderElection: testCase.leaderElection, + }, + } + + s.straggler(testCase.height, &proto.Payload{ + SeqNum: testCase.receivedSeq, + }) + }) + } + +} + func TestNilDirectMsg(t *testing.T) { mc := &mockCommitter{Mock: &mock.Mock{}} mc.On("LedgerHeight", mock.Anything).Return(uint64(1), nil) diff --git a/images/orderer/Dockerfile b/images/orderer/Dockerfile index 1ca6adf0ffe..1356afd7e17 100644 --- a/images/orderer/Dockerfile +++ b/images/orderer/Dockerfile @@ -9,7 +9,7 @@ RUN apk add --no-cache tzdata # set up nsswitch.conf for Go's "netgo" implementation # - https://github.com/golang/go/blob/go1.9.1/src/net/conf.go#L194-L275 # - docker run --rm debian:stretch grep '^hosts:' /etc/nsswitch.conf -RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf +RUN echo 'hosts: files dns' > /etc/nsswitch.conf FROM golang:${GO_VER}-alpine${ALPINE_VER} as golang RUN apk add --no-cache \ diff --git a/images/tools/Dockerfile b/images/tools/Dockerfile index eb3c154bada..7e76943161e 100644 --- a/images/tools/Dockerfile +++ b/images/tools/Dockerfile @@ -17,7 +17,8 @@ ADD . $GOPATH/src/github.com/hyperledger/fabric WORKDIR $GOPATH/src/github.com/hyperledger/fabric FROM golang as tools -RUN make configtxgen configtxlator cryptogen peer discover idemixgen +ARG GO_TAGS +RUN make configtxgen configtxlator cryptogen peer discover osnadmin idemixgen GO_TAGS=${GO_TAGS} FROM golang:${GO_VER}-alpine # git is required to support `go list -m` diff --git a/internal/configtxlator/integration/cors_test.go b/integration/configtxlator/cors_test.go similarity index 100% rename from internal/configtxlator/integration/cors_test.go rename to integration/configtxlator/cors_test.go diff --git a/internal/configtxlator/integration/integration_suite_test.go b/integration/configtxlator/integration_suite_test.go similarity index 100% rename from internal/configtxlator/integration/integration_suite_test.go rename to integration/configtxlator/integration_suite_test.go diff --git a/integration/discovery/discovery_test.go b/integration/discovery/discovery_test.go index 3af7c19d8df..bec2f08eb4b 100644 --- a/integration/discovery/discovery_test.go +++ b/integration/discovery/discovery_test.go @@ -153,12 +153,13 @@ var _ = Describe("DiscoveryService", func() { It("discovers network configuration, endorsers, and peer membership", func() { By("Updating anchor peers") + initialHeight := nwo.GetLedgerHeight(network, org1Peer0, "testchannel") network.UpdateChannelAnchors(orderer, "testchannel") - + // wait for anchor peer config updates to be committed + nwo.WaitUntilEqualLedgerHeight(network, "testchannel", initialHeight+2, org1Peer0) // // bootstrapping a peer from snapshot // - By("generating a snapshot at current block number on org1Peer0") blockNum := nwo.GetLedgerHeight(network, org1Peer0, "testchannel") - 1 submitSnapshotRequest(network, "testchannel", 0, org1Peer0, "Snapshot request submitted successfully") diff --git a/integration/helpers/images.go b/integration/helpers/images.go index b559a9cd076..343f44ceda2 100644 --- a/integration/helpers/images.go +++ b/integration/helpers/images.go @@ -23,7 +23,7 @@ func AssertImagesExist(imageNames ...string) { for _, imageName := range imageNames { images, err := dockerClient.ListImages(docker.ListImagesOptions{ - Filter: imageName, + Filters: map[string][]string{"reference": {imageName}}, }) ExpectWithOffset(1, err).NotTo(HaveOccurred()) diff --git a/integration/nwo/fabricconfig/core.go b/integration/nwo/fabricconfig/core.go index 106083961dc..f8c674ce639 100644 --- a/integration/nwo/fabricconfig/core.go +++ b/integration/nwo/fabricconfig/core.go @@ -184,6 +184,14 @@ type PKCS11 struct { Pin string `yaml:"Pin,omitempty"` Label string `yaml:"Label,omitempty"` Library string `yaml:"Library,omitempty"` + + AltID string `yaml:"AltID,omitempty"` + KeyIDs []KeyIDMapping `yaml:"KeyIDs,omitempty"` +} + +type KeyIDMapping struct { + SKI string `yaml:"SKI,omitempty"` + ID string `yaml:"ID,omitempty"` } type DeliveryClient struct { diff --git a/integration/pkcs11/pkcs11_test.go b/integration/pkcs11/pkcs11_test.go index f8105543d69..d1f5c8fe285 100644 --- a/integration/pkcs11/pkcs11_test.go +++ b/integration/pkcs11/pkcs11_test.go @@ -35,9 +35,10 @@ import ( var _ = Describe("PKCS11 enabled network", func() { var ( - tempDir string - network *nwo.Network - process ifrit.Process + tempDir string + network *nwo.Network + chaincode nwo.Chaincode + process ifrit.Process ) BeforeEach(func() { @@ -49,26 +50,7 @@ var _ = Describe("PKCS11 enabled network", func() { network.GenerateConfigTree() network.Bootstrap() - By("configuring PKCS11 artifacts") - setupPKCS11(network) - - By("starting fabric processes") - networkRunner := network.NetworkGroupRunner() - process = ifrit.Invoke(networkRunner) - Eventually(process.Ready(), network.EventuallyTimeout).Should(BeClosed()) - }) - - AfterEach(func() { - if process != nil { - process.Signal(syscall.SIGTERM) - Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive()) - } - network.Cleanup() - os.RemoveAll(tempDir) - }) - - It("executes transactions against a basic solo network", func() { - chaincode := nwo.Chaincode{ + chaincode = nwo.Chaincode{ Name: "mycc", Version: "0.0", Path: components.Build("github.com/hyperledger/fabric/integration/chaincode/simple/cmd"), @@ -80,17 +62,68 @@ var _ = Describe("PKCS11 enabled network", func() { InitRequired: true, Label: "my_prebuilt_chaincode", } + }) - orderer := network.Orderer("orderer") - network.CreateAndJoinChannels(orderer) + AfterEach(func() { + if process != nil { + process.Signal(syscall.SIGTERM) + Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive()) + } + network.Cleanup() + os.RemoveAll(tempDir) + }) + + Describe("without mapping", func() { + BeforeEach(func() { + By("configuring PKCS11 artifacts") + setupPKCS11(network, noMapping) + + By("starting fabric processes") + networkRunner := network.NetworkGroupRunner() + process = ifrit.Invoke(networkRunner) + Eventually(process.Ready(), network.EventuallyTimeout).Should(BeClosed()) + }) + + It("executes transactions against a basic solo network", func() { + orderer := network.Orderer("orderer") + network.CreateAndJoinChannels(orderer) + + nwo.EnableCapabilities(network, "testchannel", "Application", "V2_0", orderer, network.PeersWithChannel("testchannel")...) + nwo.DeployChaincode(network, "testchannel", orderer, chaincode) + runQueryInvokeQuery(network, orderer, network.Peer("Org1", "peer0"), "testchannel") + }) + }) - nwo.EnableCapabilities(network, "testchannel", "Application", "V2_0", orderer, network.PeersWithChannel("testchannel")...) - nwo.DeployChaincode(network, "testchannel", orderer, chaincode) - runQueryInvokeQuery(network, orderer, network.Peer("Org1", "peer0"), "testchannel") + Describe("mapping everything", func() { + BeforeEach(func() { + By("configuring PKCS11 artifacts") + setupPKCS11(network, mapAll) + + By("starting fabric processes") + networkRunner := network.NetworkGroupRunner() + process = ifrit.Invoke(networkRunner) + Eventually(process.Ready(), network.EventuallyTimeout).Should(BeClosed()) + }) + + It("executes transactions against a basic solo network", func() { + orderer := network.Orderer("orderer") + network.CreateAndJoinChannels(orderer) + + nwo.EnableCapabilities(network, "testchannel", "Application", "V2_0", orderer, network.PeersWithChannel("testchannel")...) + nwo.DeployChaincode(network, "testchannel", orderer, chaincode) + runQueryInvokeQuery(network, orderer, network.Peer("Org1", "peer0"), "testchannel") + }) }) }) -func setupPKCS11(network *nwo.Network) { +type model uint8 + +const ( + noMapping = model(iota) + mapAll +) + +func setupPKCS11(network *nwo.Network, model model) { lib, pin, label := bpkcs11.FindPKCS11Lib() By("establishing a PKCS11 session") @@ -98,8 +131,22 @@ func setupPKCS11(network *nwo.Network) { defer ctx.Destroy() defer ctx.CloseSession(sess) - configurePeerPKCS11(ctx, sess, network) - configureOrdererPKCS11(ctx, sess, network) + serialNumbers := map[string]*big.Int{} + configurePeerPKCS11(ctx, sess, network, serialNumbers) + configureOrdererPKCS11(ctx, sess, network, serialNumbers) + + var keyConfig []fabricconfig.KeyIDMapping + switch model { + case noMapping: + case mapAll: + updateKeyIdentifiers(ctx, sess, serialNumbers) + for ski, serial := range serialNumbers { + keyConfig = append(keyConfig, fabricconfig.KeyIDMapping{ + SKI: ski, + ID: serial.String(), + }) + } + } bccspConfig := &fabricconfig.BCCSP{ Default: "PKCS11", @@ -109,6 +156,7 @@ func setupPKCS11(network *nwo.Network) { Pin: pin, Label: label, Library: lib, + KeyIDs: keyConfig, }, } @@ -126,7 +174,7 @@ func setupPKCS11(network *nwo.Network) { network.WriteOrdererConfig(orderer, ordererConfig) } -func configurePeerPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network *nwo.Network) { +func configurePeerPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network *nwo.Network, serialNumbers map[string]*big.Int) { for _, peer := range network.Peers { orgName := peer.Organization @@ -144,20 +192,23 @@ func configurePeerPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network *nw By("Updating the peer signcerts") newOrdererPemCert := buildCert(caBytes, orgCAPath, peerCSR, peerSerial, peerPubKey) updateMSPFolder(network.PeerLocalMSPDir(peer), fmt.Sprintf("peer.%s-cert.pem", domain), newOrdererPemCert) + serialNumbers[hex.EncodeToString(skiForKey(peerPubKey))] = peerSerial By("Updating the peer admin user signcerts") newAdminPemCert := buildCert(caBytes, orgCAPath, adminCSR, adminSerial, adminPubKey) orgAdminMSPPath := network.PeerUserMSPDir(peer, "Admin") updateMSPFolder(orgAdminMSPPath, fmt.Sprintf("Admin@%s-cert.pem", domain), newAdminPemCert) + serialNumbers[hex.EncodeToString(skiForKey(adminPubKey))] = adminSerial By("Updating the peer user1 signcerts") newUserPemCert := buildCert(caBytes, orgCAPath, userCSR, userSerial, userPubKey) orgUserMSPPath := network.PeerUserMSPDir(peer, "User1") updateMSPFolder(orgUserMSPPath, fmt.Sprintf("User1@%s-cert.pem", domain), newUserPemCert) + serialNumbers[hex.EncodeToString(skiForKey(userPubKey))] = userSerial } } -func configureOrdererPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network *nwo.Network) { +func configureOrdererPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network *nwo.Network, serialNumbers map[string]*big.Int) { orderer := network.Orderer("orderer") orgName := orderer.Organization domain := network.Organization(orgName).Domain @@ -173,19 +224,24 @@ func configureOrdererPKCS11(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle, network By("Updating the orderer signcerts") newOrdererPemCert := buildCert(caBytes, orgCAPath, ordererCSR, ordererSerial, ordererPubKey) updateMSPFolder(network.OrdererLocalMSPDir(orderer), fmt.Sprintf("orderer.%s-cert.pem", domain), newOrdererPemCert) + serialNumbers[hex.EncodeToString(skiForKey(ordererPubKey))] = ordererSerial By("Updating the orderer admin user signcerts") newAdminPemCert := buildCert(caBytes, orgCAPath, adminCSR, adminSerial, adminPubKey) orgAdminMSPPath := network.OrdererUserMSPDir(orderer, "Admin") updateMSPFolder(orgAdminMSPPath, fmt.Sprintf("Admin@%s-cert.pem", domain), newAdminPemCert) + serialNumbers[hex.EncodeToString(skiForKey(adminPubKey))] = adminSerial } // Creates pkcs11 context and session func setupPKCS11Ctx(lib, label, pin string) (*pkcs11.Ctx, pkcs11.SessionHandle) { ctx := pkcs11.New(lib) - err := ctx.Initialize() - Expect(err).NotTo(HaveOccurred()) + if err := ctx.Initialize(); err != nil { + Expect(err).To(Equal(pkcs11.Error(pkcs11.CKR_CRYPTOKI_ALREADY_INITIALIZED))) + } else { + Expect(err).NotTo(HaveOccurred()) + } slot := findPKCS11Slot(ctx, label) Expect(slot).Should(BeNumerically(">", 0), "Could not find slot with label %s", label) @@ -357,7 +413,7 @@ func generateKeyPair(ctx *pkcs11.Ctx, sess pkcs11.SessionHandle) (*ecdsa.PublicK err = ctx.SetAttributeValue(sess, privK, setskiT) Expect(err).NotTo(HaveOccurred()) - // convert pub key to rsa types + // convert pub key to ansi types nistCurve := elliptic.P256() x, y := elliptic.Unmarshal(nistCurve, ecpt) if x == nil { @@ -406,3 +462,39 @@ func ecPoint(pkcs11lib *pkcs11.Ctx, session pkcs11.SessionHandle, key pkcs11.Obj return ecpt } + +func skiForKey(pk *ecdsa.PublicKey) []byte { + ski := sha256.Sum256(elliptic.Marshal(pk.Curve, pk.X, pk.Y)) + return ski[:] +} + +func updateKeyIdentifiers(pctx *pkcs11.Ctx, sess pkcs11.SessionHandle, serialNumbers map[string]*big.Int) { + for ks, serial := range serialNumbers { + ski, err := hex.DecodeString(ks) + Expect(err).NotTo(HaveOccurred()) + + updateKeyIdentifier(pctx, sess, pkcs11.CKO_PUBLIC_KEY, ski, []byte(serial.String())) + updateKeyIdentifier(pctx, sess, pkcs11.CKO_PRIVATE_KEY, ski, []byte(serial.String())) + } +} + +func updateKeyIdentifier(pctx *pkcs11.Ctx, sess pkcs11.SessionHandle, class uint, currentID, newID []byte) { + pkt := []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), + pkcs11.NewAttribute(pkcs11.CKA_ID, currentID), + } + err := pctx.FindObjectsInit(sess, pkt) + Expect(err).NotTo(HaveOccurred()) + + objs, _, err := pctx.FindObjects(sess, 1) + Expect(err).NotTo(HaveOccurred()) + Expect(objs).To(HaveLen(1)) + + err = pctx.FindObjectsFinal(sess) + Expect(err).NotTo(HaveOccurred()) + + err = pctx.SetAttributeValue(sess, objs[0], []*pkcs11.Attribute{ + pkcs11.NewAttribute(pkcs11.CKA_ID, newID), + }) + Expect(err).NotTo(HaveOccurred()) +} diff --git a/integration/raft/cft_test.go b/integration/raft/cft_test.go index 519c2573437..f3f8279e02d 100644 --- a/integration/raft/cft_test.go +++ b/integration/raft/cft_test.go @@ -27,6 +27,7 @@ import ( conftx "github.com/hyperledger/fabric-config/configtx" "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/msp" + "github.com/hyperledger/fabric-protos-go/orderer/etcdraft" "github.com/hyperledger/fabric/cmd/common/signer" "github.com/hyperledger/fabric/common/configtx" "github.com/hyperledger/fabric/common/util" @@ -233,13 +234,222 @@ var _ = Describe("EndToEnd Crash Fault Tolerance", func() { env = CreateBroadcastEnvelope(network, o1, channelID, make([]byte, 1000)) resp, err := ordererclient.Broadcast(network, o1, env) Expect(err).NotTo(HaveOccurred()) - Expect(resp.Status).To(Equal(common.Status_SUCCESS)) + Eventually(resp.Status, network.EventuallyTimeout).Should(Equal(common.Status_SUCCESS)) - blko1 := FetchBlock(network, o1, 5, channelID) - blko2 := FetchBlock(network, o2, 5, channelID) + for i := 1; i <= 5; i++ { + blko1 := FetchBlock(network, o1, uint64(i), channelID) + blko2 := FetchBlock(network, o2, uint64(i), channelID) - Expect(blko1.Header.DataHash).To(Equal(blko2.Header.DataHash)) + Expect(blko1.Header.DataHash).To(Equal(blko2.Header.DataHash)) + metao1, err := protoutil.GetConsenterMetadataFromBlock(blko1) + Expect(err).NotTo(HaveOccurred()) + metao2, err := protoutil.GetConsenterMetadataFromBlock(blko2) + Expect(err).NotTo(HaveOccurred()) + + bmo1 := &etcdraft.BlockMetadata{} + proto.Unmarshal(metao1.Value, bmo1) + bmo2 := &etcdraft.BlockMetadata{} + proto.Unmarshal(metao2.Value, bmo2) + + Expect(bmo2).To(Equal(bmo1)) + } }) + + It("catches up and replicates consenters metadata", func() { + network = nwo.New(nwo.MultiNodeEtcdRaft(), testDir, client, StartPort(), components) + orderers := []*nwo.Orderer{network.Orderer("orderer1"), network.Orderer("orderer2"), network.Orderer("orderer3")} + peer = network.Peer("Org1", "peer0") + + network.GenerateConfigTree() + network.Bootstrap() + + ordererRunners := []*ginkgomon.Runner{} + orderersMembers := grouper.Members{} + for _, o := range orderers { + runner := network.OrdererRunner(o) + ordererRunners = append(ordererRunners, runner) + orderersMembers = append(orderersMembers, grouper.Member{ + Name: o.ID(), + Runner: runner, + }) + } + + By("Starting ordering service cluster") + ordererGroup := grouper.NewParallel(syscall.SIGTERM, orderersMembers) + ordererProc = ifrit.Invoke(ordererGroup) + Eventually(ordererProc.Ready(), network.EventuallyTimeout).Should(BeClosed()) + + By("Setting up new OSN to be added to the cluster") + o4 := &nwo.Orderer{ + Name: "orderer4", + Organization: "OrdererOrg", + } + ports := nwo.Ports{} + for _, portName := range nwo.OrdererPortNames() { + ports[portName] = network.ReservePort() + } + + network.PortsByOrdererID[o4.ID()] = ports + network.Orderers = append(network.Orderers, o4) + network.GenerateOrdererConfig(o4) + extendNetwork(network) + + ordererCertificatePath := filepath.Join(network.OrdererLocalTLSDir(o4), "server.crt") + ordererCert, err := ioutil.ReadFile(ordererCertificatePath) + Expect(err).NotTo(HaveOccurred()) + + By("Adding new ordering service node") + addConsenter(network, peer, orderers[0], "systemchannel", etcdraft.Consenter{ + ServerTlsCert: ordererCert, + ClientTlsCert: ordererCert, + Host: "127.0.0.1", + Port: uint32(network.OrdererPort(o4, nwo.ClusterPort)), + }) + + // Get the last config block of the system channel + configBlock := nwo.GetConfigBlock(network, peer, orderers[0], "systemchannel") + // Plant it in the file system of orderer, the new node to be onboarded. + err = ioutil.WriteFile(filepath.Join(testDir, "systemchannel_block.pb"), protoutil.MarshalOrPanic(configBlock), 0o644) + + Expect(err).NotTo(HaveOccurred()) + By("Starting new ordering service node") + r4 := network.OrdererRunner(o4) + orderers = append(orderers, o4) + ordererRunners = append(ordererRunners, r4) + o4process := ifrit.Invoke(r4) + Eventually(o4process.Ready(), network.EventuallyTimeout).Should(BeClosed()) + + By("Pick ordering service node to be evicted") + victimIdx := findLeader(ordererRunners) - 1 + victim := orderers[victimIdx] + victimCertBytes, err := ioutil.ReadFile(filepath.Join(network.OrdererLocalTLSDir(victim), "server.crt")) + Expect(err).NotTo(HaveOccurred()) + + assertBlockReception(map[string]int{ + "systemchannel": 1, + }, orderers, peer, network) + + By("Removing OSN from the channel") + remainedOrderers := []*nwo.Orderer{} + remainedRunners := []*ginkgomon.Runner{} + + for i, o := range orderers { + if i == victimIdx { + continue + } + remainedOrderers = append(remainedOrderers, o) + remainedRunners = append(remainedRunners, ordererRunners[i]) + } + + removeConsenter(network, peer, remainedOrderers[0], "systemchannel", victimCertBytes) + + By("Asserting all remaining nodes got last block") + assertBlockReception(map[string]int{ + "systemchannel": 2, + }, remainedOrderers, peer, network) + By("Making sure OSN was evicted and configuration applied") + findLeader(remainedRunners) + + By("Restarting all nodes") + o4process.Signal(syscall.SIGTERM) + Eventually(o4process.Wait(), network.EventuallyTimeout).Should(Receive()) + ordererProc.Signal(syscall.SIGTERM) + Eventually(ordererProc.Wait(), network.EventuallyTimeout).Should(Receive()) + + r1 := network.OrdererRunner(remainedOrderers[1]) + r2 := network.OrdererRunner(remainedOrderers[2]) + orderersMembers = grouper.Members{ + {Name: remainedOrderers[1].ID(), Runner: r1}, + {Name: remainedOrderers[2].ID(), Runner: r2}, + } + + ordererGroup = grouper.NewParallel(syscall.SIGTERM, orderersMembers) + ordererProc = ifrit.Invoke(ordererGroup) + Eventually(ordererProc.Ready(), network.EventuallyTimeout).Should(BeClosed()) + findLeader([]*ginkgomon.Runner{r1, r2}) + + By("Submitting several transactions to trigger snapshot") + env := CreateBroadcastEnvelope(network, remainedOrderers[1], "systemchannel", make([]byte, 2000)) + for i := 3; i <= 10; i++ { + // Note that MaxMessageCount is 1 be default, so every tx results in a new block + resp, err := ordererclient.Broadcast(network, remainedOrderers[1], env) + Expect(err).NotTo(HaveOccurred()) + Expect(resp.Status).To(Equal(common.Status_SUCCESS)) + } + + assertBlockReception(map[string]int{ + "systemchannel": 10, + }, []*nwo.Orderer{remainedOrderers[1], remainedOrderers[2]}, peer, network) + + By("Clean snapshot folder of lagging behind node") + snapDir := path.Join(network.RootDir, "orderers", remainedOrderers[0].ID(), "etcdraft", "snapshot") + snapshots, err := ioutil.ReadDir(snapDir) + Expect(err).NotTo(HaveOccurred()) + + for _, snap := range snapshots { + os.RemoveAll(path.Join(snapDir, snap.Name())) + } + + ordererProc.Signal(syscall.SIGTERM) + Eventually(ordererProc.Wait(), network.EventuallyTimeout).Should(Receive()) + + r0 := network.OrdererRunner(remainedOrderers[0]) + r1 = network.OrdererRunner(remainedOrderers[1]) + orderersMembers = grouper.Members{ + {Name: remainedOrderers[0].ID(), Runner: r0}, + {Name: remainedOrderers[1].ID(), Runner: r1}, + } + + ordererGroup = grouper.NewParallel(syscall.SIGTERM, orderersMembers) + ordererProc = ifrit.Invoke(ordererGroup) + Eventually(ordererProc.Ready(), network.EventuallyTimeout).Should(BeClosed()) + findLeader([]*ginkgomon.Runner{r0, r1}) + + By("Asserting that orderer1 receives and persists snapshot") + Eventually(func() int { + files, err := ioutil.ReadDir(path.Join(snapDir, "systemchannel")) + Expect(err).NotTo(HaveOccurred()) + return len(files) + }, network.EventuallyTimeout).Should(BeNumerically(">", 0)) + + assertBlockReception(map[string]int{ + "systemchannel": 10, + }, []*nwo.Orderer{remainedOrderers[0]}, peer, network) + + By("Make sure we can restart and connect to orderer1 with orderer4") + ordererProc.Signal(syscall.SIGTERM) + Eventually(ordererProc.Wait(), network.EventuallyTimeout).Should(Receive()) + + r0 = network.OrdererRunner(remainedOrderers[0]) + r2 = network.OrdererRunner(remainedOrderers[2]) + orderersMembers = grouper.Members{ + {Name: remainedOrderers[0].ID(), Runner: r0}, + {Name: remainedOrderers[2].ID(), Runner: r2}, + } + + ordererGroup = grouper.NewParallel(syscall.SIGTERM, orderersMembers) + ordererProc = ifrit.Invoke(ordererGroup) + Eventually(ordererProc.Ready(), network.EventuallyTimeout).Should(BeClosed()) + findLeader([]*ginkgomon.Runner{r0, r2}) + + for i := 1; i <= 10; i++ { + blko1 := FetchBlock(network, remainedOrderers[0], uint64(i), "systemchannel") + blko2 := FetchBlock(network, remainedOrderers[2], uint64(i), "systemchannel") + Expect(blko1.Header.DataHash).To(Equal(blko2.Header.DataHash)) + metao1, err := protoutil.GetConsenterMetadataFromBlock(blko1) + Expect(err).NotTo(HaveOccurred()) + metao2, err := protoutil.GetConsenterMetadataFromBlock(blko2) + Expect(err).NotTo(HaveOccurred()) + + bmo1 := &etcdraft.BlockMetadata{} + proto.Unmarshal(metao1.Value, bmo1) + bmo2 := &etcdraft.BlockMetadata{} + proto.Unmarshal(metao2.Value, bmo2) + + Expect(bmo2).To(Equal(bmo1)) + } + }) + }) When("The leader dies", func() { @@ -725,7 +935,7 @@ var _ = Describe("EndToEnd Crash Fault Tolerance", func() { p, err := ordererclient.Broadcast(network, orderer, channelCreateTxn) Expect(err).NotTo(HaveOccurred()) Expect(p.Status).To(Equal(common.Status_BAD_REQUEST)) - Expect(p.Info).To(ContainSubstring("identity expired")) + Expect(p.Info).To(ContainSubstring("broadcast client identity expired")) By("Attempting to fetch a block from orderer and failing") denv := CreateDeliverEnvelope(network, orderer, 0, network.SystemChannel.Name) @@ -734,7 +944,7 @@ var _ = Describe("EndToEnd Crash Fault Tolerance", func() { block, err := ordererclient.Deliver(network, orderer, denv) Expect(err).To(HaveOccurred()) Expect(block).To(BeNil()) - Eventually(runner.Err(), time.Minute, time.Second).Should(gbytes.Say("client identity expired")) + Eventually(runner.Err(), time.Minute, time.Second).Should(gbytes.Say("deliver client identity expired")) By("Killing orderer") ordererProc.Signal(syscall.SIGTERM) diff --git a/integration/raft/config_test.go b/integration/raft/config_test.go index cf4674b8cec..250bbd18df2 100644 --- a/integration/raft/config_test.go +++ b/integration/raft/config_test.go @@ -162,7 +162,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { By("Starting orderer with malformed genesis block") ordererRunner := network.OrdererGroupRunner() process := ifrit.Invoke(ordererRunner) - Eventually(process.Wait, network.EventuallyTimeout).Should(Receive()) // orderer process should exit + Eventually(process.Wait(), network.EventuallyTimeout).Should(Receive()) // orderer process should exit network.Cleanup() os.RemoveAll(testDir) @@ -208,7 +208,7 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { exitCode := network.CreateChannelExitCode(channel, orderer, org1Peer0, org1Peer0, org2Peer0, orderer) Expect(exitCode).NotTo(Equal(0)) - Consistently(process.Wait).ShouldNot(Receive()) // malformed tx should not crash orderer + Consistently(process.Wait()).ShouldNot(Receive()) // malformed tx should not crash orderer Expect(runner.Err()).To(gbytes.Say(`invalid new config metadata: ElectionTick \(10\) must be greater than HeartbeatTick \(10\)`)) By("Submitting channel config update with illegal value") @@ -403,6 +403,9 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { newConsenterCert, err := x509.ParseCertificate(newConsenterCertPem.Bytes) Expect(err).NotTo(HaveOccurred()) + newConsenterHost := "127.0.0.1" + newConsenterPort := uint32(network.OrdererPort(orderer3, nwo.ListenPort)) + current, updated := consenterAdder( network, peer, @@ -411,13 +414,13 @@ var _ = Describe("EndToEnd reconfiguration and onboarding", func() { etcdraft.Consenter{ ServerTlsCert: client.Cert, ClientTlsCert: client.Cert, - Host: "127.0.0.1", - Port: uint32(network.OrdererPort(orderer3, nwo.ListenPort)), + Host: newConsenterHost, + Port: newConsenterPort, }, ) sess = nwo.UpdateOrdererConfigSession(network, orderer, network.SystemChannel.Name, current, updated, peer, orderer) Eventually(sess, network.EventuallyTimeout).Should(gexec.Exit(1)) - Expect(sess.Err).To(gbytes.Say(fmt.Sprintf("BAD_REQUEST -- error applying config update to existing channel 'systemchannel': consensus metadata update for channel config update is invalid: invalid new config metadata: verifying tls client cert with serial number %d: x509: certificate signed by unknown authority", newConsenterCert.SerialNumber))) + Expect(sess.Err).To(gbytes.Say(fmt.Sprintf("BAD_REQUEST -- error applying config update to existing channel 'systemchannel': consensus metadata update for channel config update is invalid: invalid new config metadata: consenter %s:%d has invalid certificate: verifying tls client cert with serial number %d: x509: certificate signed by unknown authority", newConsenterHost, newConsenterPort, newConsenterCert.SerialNumber))) }) }) diff --git a/internal/cryptogen/ca/ca.go b/internal/cryptogen/ca/ca.go index 95ea31e0ffc..f9cacddb81a 100644 --- a/internal/cryptogen/ca/ca.go +++ b/internal/cryptogen/ca/ca.go @@ -167,7 +167,7 @@ func (ca *CA) SignCertificate( return cert, nil } -// compute Subject Key Identifier +// compute Subject Key Identifier using RFC 7093, Section 2, Method 4 func computeSKI(privKey *ecdsa.PrivateKey) []byte { // Marshall the public key raw := elliptic.Marshal(privKey.Curve, privKey.PublicKey.X, privKey.PublicKey.Y) diff --git a/internal/peer/common/common.go b/internal/peer/common/common.go index 46654fdc454..6d87f033e59 100644 --- a/internal/peer/common/common.go +++ b/internal/peer/common/common.go @@ -271,6 +271,14 @@ func configFromEnv(prefix string) (address, override string, clientConfig comm.C secOpts.Certificate = certPEM } clientConfig.SecOpts = secOpts + clientConfig.MaxRecvMsgSize = comm.DefaultMaxRecvMsgSize + if viper.IsSet(prefix + ".maxRecvMsgSize") { + clientConfig.MaxRecvMsgSize = int(viper.GetInt32(prefix + ".maxRecvMsgSize")) + } + clientConfig.MaxSendMsgSize = comm.DefaultMaxSendMsgSize + if viper.IsSet(prefix + ".maxSendMsgSize") { + clientConfig.MaxSendMsgSize = int(viper.GetInt32(prefix + ".maxSendMsgSize")) + } return } diff --git a/internal/peer/common/common_test.go b/internal/peer/common/common_test.go index e3ef3890b02..ffb9c81400c 100644 --- a/internal/peer/common/common_test.go +++ b/internal/peer/common/common_test.go @@ -14,18 +14,21 @@ import ( "path/filepath" "strings" "testing" + "time" cb "github.com/hyperledger/fabric-protos-go/common" pb "github.com/hyperledger/fabric-protos-go/peer" "github.com/hyperledger/fabric/bccsp/factory" "github.com/hyperledger/fabric/bccsp/sw" "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/crypto/tlsgen" "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/util" "github.com/hyperledger/fabric/core/config/configtest" "github.com/hyperledger/fabric/internal/configtxgen/encoder" "github.com/hyperledger/fabric/internal/configtxgen/genesisconfig" "github.com/hyperledger/fabric/internal/peer/common" + "github.com/hyperledger/fabric/internal/pkg/comm" "github.com/hyperledger/fabric/msp" msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools" "github.com/hyperledger/fabric/protoutil" @@ -348,3 +351,63 @@ func TestGetOrdererEndpointFromConfigTx(t *testing.T) { require.EqualError(t, err, "error loading channel config: config must contain a channel group") }) } + +func TestConfigFromEnv(t *testing.T) { + tempdir, err := ioutil.TempDir("", "peer-clientcert") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + // peer client config + address, _, clientConfig, err := common.ConfigFromEnv("peer") + require.NoError(t, err) + require.Equal(t, "", address, "ClientConfig.address by default not set") + require.Equal(t, common.DefaultConnTimeout, clientConfig.Timeout, "ClientConfig.Timeout should be set to default value of %v", common.DefaultConnTimeout) + require.Equal(t, false, clientConfig.SecOpts.UseTLS, "ClientConfig.SecOpts.UseTLS default value should be false") + require.Equal(t, comm.DefaultMaxRecvMsgSize, clientConfig.MaxRecvMsgSize, "ServerConfig.MaxRecvMsgSize should be set to default value %v", comm.DefaultMaxRecvMsgSize) + require.Equal(t, comm.DefaultMaxSendMsgSize, clientConfig.MaxSendMsgSize, "ServerConfig.MaxSendMsgSize should be set to default value %v", comm.DefaultMaxSendMsgSize) + + viper.Set("peer.address", "127.0.0.1") + viper.Set("peer.client.connTimeout", "30s") + viper.Set("peer.maxRecvMsgSize", "1024") + viper.Set("peer.maxSendMsgSize", "2048") + address, _, clientConfig, err = common.ConfigFromEnv("peer") + require.NoError(t, err) + require.Equal(t, "127.0.0.1", address, "ClientConfig.address should be set to 127.0.0.1") + require.Equal(t, 30*time.Second, clientConfig.Timeout, "ClientConfig.Timeout should be set to default value of 30s") + require.Equal(t, 1024, clientConfig.MaxRecvMsgSize, "ClientConfig.MaxRecvMsgSize should be set to 1024") + require.Equal(t, 2048, clientConfig.MaxSendMsgSize, "ClientConfig.maxSendMsgSize should be set to 2048") + + viper.Set("peer.tls.enabled", true) + viper.Set("peer.tls.rootcert.file", "./filenotfound.pem") + _, _, _, err = common.ConfigFromEnv("peer") + require.Error(t, err, "ClientConfig should return with bad root cert file path") + + viper.Set("peer.tls.enabled", false) + viper.Set("peer.tls.clientAuthRequired", true) + viper.Set("peer.tls.clientKey.file", "./filenotfound.pem") + _, _, clientConfig, err = common.ConfigFromEnv("peer") + require.Equal(t, false, clientConfig.SecOpts.UseTLS, "ClientConfig.SecOpts.UseTLS should be false") + require.Error(t, err, "ClientConfig should return with client key file path") + + org1CA, err := tlsgen.NewCA() + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-ca-cert.pem"), org1CA.CertBytes(), 0o644) + require.NoError(t, err) + org1ServerKP, err := org1CA.NewServerCertKeyPair("localhost") + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-peer1-cert.pem"), org1ServerKP.Cert, 0o644) + require.NoError(t, err) + err = ioutil.WriteFile(filepath.Join(tempdir, "org1-peer1-key.pem"), org1ServerKP.Key, 0o600) + require.NoError(t, err) + + viper.Set("peer.tls.enabled", true) + viper.Set("peer.tls.clientAuthRequired", true) + viper.Set("peer.tls.rootcert.file", filepath.Join(tempdir, "org1-ca-cert.pem")) + viper.Set("peer.tls.clientCert.file", filepath.Join(tempdir, "org1-peer1-cert.pem")) + viper.Set("peer.tls.clientKey.file", filepath.Join(tempdir, "org1-peer1-key.pem")) + _, _, clientConfig, err = common.ConfigFromEnv("peer") + require.NoError(t, err) + require.Equal(t, 1, len(clientConfig.SecOpts.ServerRootCAs), "ClientConfig.SecOpts.ServerRootCAs should contain 1 entries") + require.Equal(t, org1ServerKP.Key, clientConfig.SecOpts.Key, "Client.SecOpts.Key should be set to configured key") + require.Equal(t, org1ServerKP.Cert, clientConfig.SecOpts.Certificate, "Client.SecOpts.Certificate shoulbe bet set to configured certificate") +} diff --git a/internal/peer/common/export_test.go b/internal/peer/common/export_test.go new file mode 100644 index 00000000000..e120231a177 --- /dev/null +++ b/internal/peer/common/export_test.go @@ -0,0 +1,10 @@ +/* + SPDX-License-Identifier: Apache-2.0 +*/ + +package common + +var ( + ConfigFromEnv = configFromEnv + DefaultConnTimeout = defaultConnTimeout +) diff --git a/internal/peer/common/peerclient.go b/internal/peer/common/peerclient.go index 9c6d5548159..89c84be9d2e 100644 --- a/internal/peer/common/peerclient.go +++ b/internal/peer/common/peerclient.go @@ -77,6 +77,16 @@ func NewPeerClientForAddress(address, tlsRootCertFile string) (*PeerClient, erro } clientConfig.SecOpts.ServerRootCAs = [][]byte{caPEM} } + + clientConfig.MaxRecvMsgSize = comm.DefaultMaxRecvMsgSize + if viper.IsSet("peer.maxRecvMsgSize") { + clientConfig.MaxRecvMsgSize = int(viper.GetInt32("peer.maxRecvMsgSize")) + } + clientConfig.MaxSendMsgSize = comm.DefaultMaxSendMsgSize + if viper.IsSet("peer.maxSendMsgSize") { + clientConfig.MaxSendMsgSize = int(viper.GetInt32("peer.maxSendMsgSize")) + } + return newPeerClientForClientConfig(address, override, clientConfig) } diff --git a/internal/peer/node/start.go b/internal/peer/node/start.go index ea1dfa76f78..79c576c0174 100644 --- a/internal/peer/node/start.go +++ b/internal/peer/node/start.go @@ -1133,9 +1133,17 @@ func secureDialOpts(credSupport *comm.CredentialSupport) func() []grpc.DialOptio return func() []grpc.DialOption { var dialOpts []grpc.DialOption // set max send/recv msg sizes + maxRecvMsgSize := comm.DefaultMaxRecvMsgSize + if viper.IsSet("peer.maxRecvMsgSize") { + maxRecvMsgSize = int(viper.GetInt32("peer.maxRecvMsgSize")) + } + maxSendMsgSize := comm.DefaultMaxSendMsgSize + if viper.IsSet("peer.maxSendMsgSize") { + maxSendMsgSize = int(viper.GetInt32("peer.maxSendMsgSize")) + } dialOpts = append( dialOpts, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(comm.MaxRecvMsgSize), grpc.MaxCallSendMsgSize(comm.MaxSendMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxRecvMsgSize), grpc.MaxCallSendMsgSize(maxSendMsgSize)), ) // set the keepalive options kaOpts := comm.DefaultKeepaliveOptions diff --git a/internal/pkg/comm/client.go b/internal/pkg/comm/client.go index 191802d2cc8..7fa375e3ad2 100644 --- a/internal/pkg/comm/client.go +++ b/internal/pkg/comm/client.go @@ -24,10 +24,6 @@ type GRPCClient struct { dialOpts []grpc.DialOption // Duration for which to block while established a new connection timeout time.Duration - // Maximum message size the client can receive - maxRecvMsgSize int - // Maximum message size the client can send - maxSendMsgSize int } // NewGRPCClient creates a new implementation of GRPCClient given an address @@ -57,8 +53,18 @@ func NewGRPCClient(config ClientConfig) (*GRPCClient, error) { } client.timeout = config.Timeout // set send/recv message size to package defaults - client.maxRecvMsgSize = MaxRecvMsgSize - client.maxSendMsgSize = MaxSendMsgSize + maxRecvMsgSize := DefaultMaxRecvMsgSize + if config.MaxRecvMsgSize != 0 { + maxRecvMsgSize = config.MaxRecvMsgSize + } + maxSendMsgSize := DefaultMaxSendMsgSize + if config.MaxSendMsgSize != 0 { + maxSendMsgSize = config.MaxSendMsgSize + } + client.dialOpts = append(client.dialOpts, grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(maxRecvMsgSize), + grpc.MaxCallSendMsgSize(maxSendMsgSize), + )) return client, nil } @@ -131,16 +137,6 @@ func (client *GRPCClient) MutualTLSRequired() bool { len(client.tlsConfig.Certificates) > 0 } -// SetMaxRecvMsgSize sets the maximum message size the client can receive -func (client *GRPCClient) SetMaxRecvMsgSize(size int) { - client.maxRecvMsgSize = size -} - -// SetMaxSendMsgSize sets the maximum message size the client can send -func (client *GRPCClient) SetMaxSendMsgSize(size int) { - client.maxSendMsgSize = size -} - // SetServerRootCAs sets the list of authorities used to verify server // certificates based on a list of PEM-encoded X509 certificate authorities func (client *GRPCClient) SetServerRootCAs(serverRoots [][]byte) error { @@ -195,11 +191,6 @@ func (client *GRPCClient) NewConnection(address string, tlsOptions ...TLSOption) dialOpts = append(dialOpts, grpc.WithInsecure()) } - dialOpts = append(dialOpts, grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(client.maxRecvMsgSize), - grpc.MaxCallSendMsgSize(client.maxSendMsgSize), - )) - ctx, cancel := context.WithTimeout(context.Background(), client.timeout) defer cancel() conn, err := grpc.DialContext(ctx, address, dialOpts...) diff --git a/internal/pkg/comm/client_test.go b/internal/pkg/comm/client_test.go index fa5c7338e8c..0714490da10 100644 --- a/internal/pkg/comm/client_test.go +++ b/internal/pkg/comm/client_test.go @@ -477,25 +477,19 @@ func TestSetMessageSize(t *testing.T) { }, } - // set up test client - client, err := comm.NewGRPCClient(comm.ClientConfig{ - Timeout: testTimeout, - }) - if err != nil { - t.Fatalf("error creating test client: %v", err) - } // run tests for _, test := range tests { test := test address := lis.Addr().String() t.Run(test.name, func(t *testing.T) { t.Log(test.name) - if test.maxRecvSize > 0 { - client.SetMaxRecvMsgSize(test.maxRecvSize) - } - if test.maxSendSize > 0 { - client.SetMaxSendMsgSize(test.maxSendSize) - } + // set up test client + client, err := comm.NewGRPCClient(comm.ClientConfig{ + Timeout: testTimeout, + MaxRecvMsgSize: test.maxRecvSize, + MaxSendMsgSize: test.maxSendSize, + }) + require.NoError(t, err, "error creating test client") conn, err := client.NewConnection(address) require.NoError(t, err) defer conn.Close() diff --git a/internal/pkg/comm/config.go b/internal/pkg/comm/config.go index e5c0f71cf57..99fa33c3ca8 100644 --- a/internal/pkg/comm/config.go +++ b/internal/pkg/comm/config.go @@ -18,10 +18,14 @@ import ( ) // Configuration defaults + +// Max send and receive bytes for grpc clients and servers +const ( + DefaultMaxRecvMsgSize = 100 * 1024 * 1024 + DefaultMaxSendMsgSize = 100 * 1024 * 1024 +) + var ( - // Max send and receive bytes for grpc clients and servers - MaxRecvMsgSize = 100 * 1024 * 1024 - MaxSendMsgSize = 100 * 1024 * 1024 // Default peer keepalive options DefaultKeepaliveOptions = KeepaliveOptions{ ClientInterval: time.Duration(1) * time.Minute, // 1 min @@ -64,6 +68,10 @@ type ServerConfig struct { HealthCheckEnabled bool // ServerStatsHandler should be set if metrics on connections are to be reported. ServerStatsHandler *ServerStatsHandler + // Maximum message size the server can receive + MaxRecvMsgSize int + // Maximum message size the server can send + MaxSendMsgSize int } // ClientConfig defines the parameters for configuring a GRPCClient instance @@ -77,6 +85,10 @@ type ClientConfig struct { Timeout time.Duration // AsyncConnect makes connection creation non blocking AsyncConnect bool + // Maximum message size the client can receive + MaxRecvMsgSize int + // Maximum message size the client can send + MaxSendMsgSize int } // Clone clones this ClientConfig diff --git a/internal/pkg/comm/creds_test.go b/internal/pkg/comm/creds_test.go index 68c1e6cbe10..48496f5b753 100644 --- a/internal/pkg/comm/creds_test.go +++ b/internal/pkg/comm/creds_test.go @@ -125,38 +125,25 @@ func TestAddRootCA(t *testing.T) { t.Parallel() caPEM, err := ioutil.ReadFile(filepath.Join("testdata", "certs", "Org1-cert.pem")) - if err != nil { - t.Fatalf("failed to read root certificate: %v", err) - } - - cert := &x509.Certificate{ - EmailAddresses: []string{"test@foobar.com"}, - } + require.NoError(t, err, "failed to read root certificate") expectedCertPool := x509.NewCertPool() ok := expectedCertPool.AppendCertsFromPEM(caPEM) - if !ok { - t.Fatalf("failed to create expected certPool") - } + require.True(t, ok, "failed to create expected certPool") + cert := &x509.Certificate{EmailAddresses: []string{"test@foobar.com"}} expectedCertPool.AddCert(cert) certPool := x509.NewCertPool() ok = certPool.AppendCertsFromPEM(caPEM) - if !ok { - t.Fatalf("failed to create certPool") - } + require.True(t, ok, "failed to create certPool") - tlsConfig := &tls.Config{ - ClientCAs: certPool, - } - config := comm.NewTLSConfig(tlsConfig) - - require.Equal(t, config.Config().ClientCAs, certPool) + config := comm.NewTLSConfig(&tls.Config{ClientCAs: certPool}) + require.Same(t, config.Config().ClientCAs, certPool) + // https://go-review.googlesource.com/c/go/+/229917 config.AddClientRootCA(cert) - - require.Equal(t, config.Config().ClientCAs, expectedCertPool, "The CertPools should be equal") + require.Equal(t, certPool.Subjects(), expectedCertPool.Subjects(), "subjects in the pool should be equal") } func TestSetClientCAs(t *testing.T) { diff --git a/internal/pkg/comm/server.go b/internal/pkg/comm/server.go index 3b1f6c1ddfa..02e79b4f60c 100644 --- a/internal/pkg/comm/server.go +++ b/internal/pkg/comm/server.go @@ -124,9 +124,18 @@ func NewGRPCServerFromListener(listener net.Listener, serverConfig ServerConfig) return nil, errors.New("serverConfig.SecOpts must contain both Key and Certificate when UseTLS is true") } } + // set max send and recv msg sizes - serverOpts = append(serverOpts, grpc.MaxSendMsgSize(MaxSendMsgSize)) - serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(MaxRecvMsgSize)) + maxSendMsgSize := DefaultMaxSendMsgSize + if serverConfig.MaxSendMsgSize != 0 { + maxSendMsgSize = serverConfig.MaxSendMsgSize + } + maxRecvMsgSize := DefaultMaxRecvMsgSize + if serverConfig.MaxRecvMsgSize != 0 { + maxRecvMsgSize = serverConfig.MaxRecvMsgSize + } + serverOpts = append(serverOpts, grpc.MaxSendMsgSize(maxSendMsgSize)) + serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(maxRecvMsgSize)) // set the keepalive options serverOpts = append(serverOpts, ServerKeepaliveOptions(serverConfig.KaOpts)...) // set connection timeout diff --git a/internal/pkg/peer/blocksprovider/blocksprovider.go b/internal/pkg/peer/blocksprovider/blocksprovider.go index fb88596521c..788d8656fa1 100644 --- a/internal/pkg/peer/blocksprovider/blocksprovider.go +++ b/internal/pkg/peer/blocksprovider/blocksprovider.go @@ -96,9 +96,10 @@ type Deliverer struct { Logger *flogging.FabricLogger YieldLeadership bool - MaxRetryDelay time.Duration - InitialRetryDelay time.Duration - MaxRetryDuration time.Duration + BlockGossipDisabled bool + MaxRetryDelay time.Duration + InitialRetryDelay time.Duration + MaxRetryDuration time.Duration // TLSCertHash should be nil when TLS is not enabled TLSCertHash []byte // util.ComputeSHA256(b.credSupport.GetClientCertificate().Certificate[0]) @@ -111,6 +112,9 @@ const backoffExponentBase = 1.2 // DeliverBlocks used to pull out blocks from the ordering service to // distributed them across peers func (d *Deliverer) DeliverBlocks() { + if d.BlockGossipDisabled { + d.Logger.Infof("Will pull blocks without forwarding them to remote peers via gossip") + } failureCounter := 0 totalDuration := time.Duration(0) @@ -256,7 +260,9 @@ func (d *Deliverer) processMsg(msg *orderer.DeliverResponse) error { d.Logger.Warningf("Block [%d] received from ordering service wasn't added to payload buffer: %v", blockNum, err) return errors.WithMessage(err, "could not add block as payload") } - + if d.BlockGossipDisabled { + return nil + } // Gossip messages with other nodes d.Logger.Debugf("Gossiping block [%d]", blockNum) d.Gossip.Gossip(gossipMsg) diff --git a/internal/pkg/peer/blocksprovider/blocksprovider_test.go b/internal/pkg/peer/blocksprovider/blocksprovider_test.go index fa54367be8a..978b1ab2dfb 100644 --- a/internal/pkg/peer/blocksprovider/blocksprovider_test.go +++ b/internal/pkg/peer/blocksprovider/blocksprovider_test.go @@ -533,6 +533,28 @@ var _ = Describe("Blocksprovider", func() { }, })) }) + + When("gossip dissemination is disabled", func() { + BeforeEach(func() { + d.BlockGossipDisabled = true + }) + + It("doesn't gossip, only adds to the payload buffer", func() { + Eventually(fakeGossipServiceAdapter.AddPayloadCallCount).Should(Equal(1)) + channelID, payload := fakeGossipServiceAdapter.AddPayloadArgsForCall(0) + Expect(channelID).To(Equal("channel-id")) + Expect(payload).To(Equal(&gossip.Payload{ + Data: protoutil.MarshalOrPanic(&common.Block{ + Header: &common.BlockHeader{ + Number: 8, + }, + }), + SeqNum: 8, + })) + + Consistently(fakeGossipServiceAdapter.GossipCallCount).Should(Equal(0)) + }) + }) }) When("the deliver client returns a status", func() { diff --git a/msp/configbuilder.go b/msp/configbuilder.go index 05397e7e9b6..92ee0273db0 100644 --- a/msp/configbuilder.go +++ b/msp/configbuilder.go @@ -354,11 +354,12 @@ func getMspConfig(dir string, ID string, sigid *msp.SigningIdentityInfo) (*msp.M FabricNodeOus: nodeOUs, } - fmpsjs, _ := proto.Marshal(fmspconf) - - mspconf := &msp.MSPConfig{Config: fmpsjs, Type: int32(FABRIC)} + fmpsjs, err := proto.Marshal(fmspconf) + if err != nil { + return nil, err + } - return mspconf, nil + return &msp.MSPConfig{Config: fmpsjs, Type: int32(FABRIC)}, nil } func loadCertificateAt(dir, certificatePath string, ouType string) []byte { diff --git a/msp/msp_test.go b/msp/msp_test.go index 1560b7410c5..ff8d465f553 100644 --- a/msp/msp_test.go +++ b/msp/msp_test.go @@ -9,7 +9,11 @@ package msp import ( "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" "crypto/x509" + "crypto/x509/pkix" "encoding/asn1" "encoding/hex" "encoding/pem" @@ -29,6 +33,7 @@ import ( "github.com/hyperledger/fabric/bccsp/sw" "github.com/hyperledger/fabric/bccsp/utils" "github.com/hyperledger/fabric/core/config/configtest" + "github.com/hyperledger/fabric/protoutil" "github.com/stretchr/testify/require" ) @@ -282,6 +287,162 @@ func TestSerializeIdentities(t *testing.T) { } } +func computeSKI(key *ecdsa.PublicKey) []byte { + raw := elliptic.Marshal(key.Curve, key.X, key.Y) + hash := sha256.Sum256(raw) + return hash[:] +} + +func TestValidHostname(t *testing.T) { + tests := []struct { + name string + valid bool + }{ + {"", false}, + {".", false}, + {"example.com", true}, + {"example.com.", true}, + {"*.example.com", true}, + {".example.com", false}, + {"host.*.example.com", false}, + {"localhost", true}, + {"-localhost", false}, + {"Not_Quite.example.com", true}, + {"weird:colon.example.com", true}, + {"1-2-3.example.com", true}, + } + for _, tt := range tests { + if tt.valid { + require.True(t, validHostname(tt.name), "expected %s to be a valid hostname", tt.name) + } else { + require.False(t, validHostname(tt.name), "expected %s to be an invalid hostname", tt.name) + } + } +} + +func TestValidateCANameConstraintsMitigation(t *testing.T) { + // Prior to Go 1.15, if a signing certificate contains a name constraint, the + // leaf certificate does not include a SAN, and the leaf common name looks + // like a valid hostname, the certificate chain would fail to validate. + // (This behavior may have been introduced with Go 1.10.) + // + // In Go 1.15, the behavior has changed and, by default, the same structure + // will validate. This test asserts on the old behavior. + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + leafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + caKeyUsage := x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign | x509.KeyUsageCRLSign + caTemplate := x509.Certificate{ + Subject: pkix.Name{CommonName: "TestCA"}, + SerialNumber: big.NewInt(1), + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(2 * time.Hour), + ExcludedDNSDomains: []string{"example.com"}, + PermittedDNSDomainsCritical: true, + IsCA: true, + BasicConstraintsValid: true, + KeyUsage: caKeyUsage, + SubjectKeyId: computeSKI(caKey.Public().(*ecdsa.PublicKey)), + } + caCertBytes, err := x509.CreateCertificate(rand.Reader, &caTemplate, &caTemplate, caKey.Public(), caKey) + require.NoError(t, err) + ca, err := x509.ParseCertificate(caCertBytes) + require.NoError(t, err) + + leafTemplate := x509.Certificate{ + Subject: pkix.Name{CommonName: "localhost"}, + SerialNumber: big.NewInt(2), + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(2 * time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + SubjectKeyId: computeSKI(leafKey.Public().(*ecdsa.PublicKey)), + } + leafCertBytes, err := x509.CreateCertificate(rand.Reader, &leafTemplate, ca, leafKey.Public(), caKey) + require.NoError(t, err) + + keyBytes, err := x509.MarshalPKCS8PrivateKey(leafKey) + require.NoError(t, err) + + caCertPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCertBytes}) + leafCertPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: leafCertBytes}) + leafKeyPem := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}) + + t.Run("VerifyNameConstraintsSingleCert", func(t *testing.T) { + for _, der := range [][]byte{caCertBytes, leafCertBytes} { + cert, err := x509.ParseCertificate(der) + require.NoError(t, err, "failed to parse certificate") + + err = verifyLegacyNameConstraints([]*x509.Certificate{cert}) + require.NoError(t, err, "single certificate should not trigger legacy constraints") + } + }) + + t.Run("VerifyNameConstraints", func(t *testing.T) { + var certs []*x509.Certificate + for _, der := range [][]byte{leafCertBytes, caCertBytes} { + cert, err := x509.ParseCertificate(der) + require.NoError(t, err, "failed to parse certificate") + certs = append(certs, cert) + } + + err = verifyLegacyNameConstraints(certs) + require.Error(t, err, "certificate chain should trigger legacy constraints") + var cie x509.CertificateInvalidError + require.True(t, errors.As(err, &cie)) + require.Equal(t, x509.NameConstraintsWithoutSANs, cie.Reason) + }) + + t.Run("VerifyNameConstraintsWithSAN", func(t *testing.T) { + caCert, err := x509.ParseCertificate(caCertBytes) + require.NoError(t, err) + + leafTemplate := leafTemplate + leafTemplate.DNSNames = []string{"localhost"} + + leafCertBytes, err := x509.CreateCertificate(rand.Reader, &leafTemplate, caCert, leafKey.Public(), caKey) + require.NoError(t, err) + + leafCert, err := x509.ParseCertificate(leafCertBytes) + require.NoError(t, err) + + err = verifyLegacyNameConstraints([]*x509.Certificate{leafCert, caCert}) + require.NoError(t, err, "signer with name constraints and leaf with SANs should be valid") + }) + + t.Run("ValidationAtSetup", func(t *testing.T) { + fabricMSPConfig := &msp.FabricMSPConfig{ + Name: "ConstraintsMSP", + RootCerts: [][]byte{caCertPem}, + SigningIdentity: &msp.SigningIdentityInfo{ + PublicSigner: leafCertPem, + PrivateSigner: &msp.KeyInfo{ + KeyIdentifier: "Certificate Without SAN", + KeyMaterial: leafKeyPem, + }, + }, + } + mspConfig := &msp.MSPConfig{ + Config: protoutil.MarshalOrPanic(fabricMSPConfig), + } + + ks, err := sw.NewFileBasedKeyStore(nil, filepath.Join(configtest.GetDevMspDir(), "keystore"), true) + require.NoError(t, err) + cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(ks) + require.NoError(t, err) + + testMSP, err := NewBccspMspWithKeyStore(MSPv1_0, ks, cryptoProvider) + require.NoError(t, err) + + err = testMSP.Setup(mspConfig) + require.Error(t, err) + var cie x509.CertificateInvalidError + require.True(t, errors.As(err, &cie)) + require.Equal(t, x509.NameConstraintsWithoutSANs, cie.Reason) + }) +} + func TestIsWellFormed(t *testing.T) { mspMgr := NewMSPManager() diff --git a/msp/mspimpl.go b/msp/mspimpl.go index bbe55adfe2d..e068350d459 100644 --- a/msp/mspimpl.go +++ b/msp/mspimpl.go @@ -10,8 +10,10 @@ import ( "bytes" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "encoding/hex" "encoding/pem" + "strings" "github.com/golang/protobuf/proto" m "github.com/hyperledger/fabric-protos-go/msp" @@ -735,9 +737,107 @@ func (msp *bccspmsp) getUniqueValidationChain(cert *x509.Certificate, opts x509. return nil, errors.Errorf("this MSP only supports a single validation chain, got %d", len(validationChains)) } + // Make the additional verification checks that were done in Go 1.14. + err = verifyLegacyNameConstraints(validationChains[0]) + if err != nil { + return nil, errors.WithMessage(err, "the supplied identity is not valid") + } + return validationChains[0], nil } +var ( + oidExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + oidExtensionNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30} +) + +// verifyLegacyNameConstraints exercises the name constraint validation rules +// that were part of the certificate verification process in Go 1.14. +// +// If a signing certificate contains a name constratint, the leaf certificate +// does not include SAN extensions, and the leaf's common name looks like a +// host name, the validation would fail with an x509.CertificateInvalidError +// and a rason of x509.NameConstraintsWithoutSANs. +func verifyLegacyNameConstraints(chain []*x509.Certificate) error { + if len(chain) < 2 { + return nil + } + + // Leaf certificates with SANs are fine. + if oidInExtensions(oidExtensionSubjectAltName, chain[0].Extensions) { + return nil + } + // Leaf certificates without a hostname in CN are fine. + if !validHostname(chain[0].Subject.CommonName) { + return nil + } + // If an intermediate or root have a name constraint, validation + // would fail in Go 1.14. + for _, c := range chain[1:] { + if oidInExtensions(oidExtensionNameConstraints, c.Extensions) { + return x509.CertificateInvalidError{Cert: chain[0], Reason: x509.NameConstraintsWithoutSANs} + } + } + return nil +} + +func oidInExtensions(oid asn1.ObjectIdentifier, exts []pkix.Extension) bool { + for _, ext := range exts { + if ext.Id.Equal(oid) { + return true + } + } + return false +} + +// validHostname reports whether host is a valid hostname that can be matched or +// matched against according to RFC 6125 2.2, with some leniency to accommodate +// legacy values. +// +// This implementation is sourced from the standaard library. +func validHostname(host string) bool { + host = strings.TrimSuffix(host, ".") + + if len(host) == 0 { + return false + } + + for i, part := range strings.Split(host, ".") { + if part == "" { + // Empty label. + return false + } + if i == 0 && part == "*" { + // Only allow full left-most wildcards, as those are the only ones + // we match, and matching literal '*' characters is probably never + // the expected behavior. + continue + } + for j, c := range part { + if 'a' <= c && c <= 'z' { + continue + } + if '0' <= c && c <= '9' { + continue + } + if 'A' <= c && c <= 'Z' { + continue + } + if c == '-' && j != 0 { + continue + } + if c == '_' || c == ':' { + // Not valid characters in hostnames, but commonly + // found in deployments outside the WebPKI. + continue + } + return false + } + } + + return true +} + func (msp *bccspmsp) getValidationChain(cert *x509.Certificate, isIntermediateChain bool) ([]*x509.Certificate, error) { validationChain, err := msp.getUniqueValidationChain(cert, msp.getValidityOptsForCert(cert)) if err != nil { diff --git a/msp/mspimplsetup.go b/msp/mspimplsetup.go index 66c641299da..73ce30146f3 100644 --- a/msp/mspimplsetup.go +++ b/msp/mspimplsetup.go @@ -10,12 +10,14 @@ import ( "bytes" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "fmt" "time" "github.com/golang/protobuf/proto" m "github.com/hyperledger/fabric-protos-go/msp" "github.com/hyperledger/fabric/bccsp" + "github.com/hyperledger/fabric/bccsp/utils" errors "github.com/pkg/errors" ) @@ -194,6 +196,23 @@ func (msp *bccspmsp) setupAdminsV142(conf *m.FabricMSPConfig) error { return nil } +func isECDSASignatureAlgorithm(algid asn1.ObjectIdentifier) bool { + // This is the set of ECDSA algorithms supported by Go 1.14 for CRL + // signatures. + ecdsaSignaureAlgorithms := []asn1.ObjectIdentifier{ + {1, 2, 840, 10045, 4, 1}, // oidSignatureECDSAWithSHA1 + {1, 2, 840, 10045, 4, 3, 2}, // oidSignatureECDSAWithSHA256 + {1, 2, 840, 10045, 4, 3, 3}, // oidSignatureECDSAWithSHA384 + {1, 2, 840, 10045, 4, 3, 4}, // oidSignatureECDSAWithSHA512 + } + for _, id := range ecdsaSignaureAlgorithms { + if id.Equal(algid) { + return true + } + } + return false +} + func (msp *bccspmsp) setupCRLs(conf *m.FabricMSPConfig) error { // setup the CRL (if present) msp.CRL = make([]*pkix.CertificateList, len(conf.RevocationList)) @@ -203,6 +222,19 @@ func (msp *bccspmsp) setupCRLs(conf *m.FabricMSPConfig) error { return errors.Wrap(err, "could not parse RevocationList") } + // Massage the ECDSA signature values + if isECDSASignatureAlgorithm(crl.SignatureAlgorithm.Algorithm) { + r, s, err := utils.UnmarshalECDSASignature(crl.SignatureValue.RightAlign()) + if err != nil { + return err + } + sig, err := utils.MarshalECDSASignature(r, s) + if err != nil { + return err + } + crl.SignatureValue = asn1.BitString{Bytes: sig, BitLength: 8 * len(sig)} + } + // TODO: pre-verify the signature on the CRL and create a map // of CA certs to respective CRLs so that later upon // validation we can already look up the CRL given the @@ -463,6 +495,7 @@ func (msp *bccspmsp) setupTLSCAs(conf *m.FabricMSPConfig) error { return errors.WithMessagef(err, "CA Certificate problem with Subject Key Identifier extension, (SN: %x)", cert.SerialNumber) } + opts.CurrentTime = cert.NotBefore.Add(time.Second) if err := msp.validateTLSCAIdentity(cert, opts); err != nil { return errors.WithMessagef(err, "CA Certificate is not valid, (SN: %s)", cert.SerialNumber) } diff --git a/msp/mspimplsetup_test.go b/msp/mspimplsetup_test.go index 4553fef83be..4b2ca4014de 100644 --- a/msp/mspimplsetup_test.go +++ b/msp/mspimplsetup_test.go @@ -64,6 +64,21 @@ f0wttSk8l5LfPAvLfL3/NwTT2YcyICA0glWF4D8FDUPKRTiOerR9KByrn4ktIjzd vpx58pjg15TqKgrZF2h+TJ5jFa48O1wBvtMhP8WL6/6O+NjOEP56UnXPGie/3HLC yvhEkMILRkzGUfd091cpuNxd+aGA37mZbwc+8UBpYbZFhq3NORL8zSxUQLzm1NcV U98sznvJPRCkRiwYp5L9C5Xq72CHG/3M6cmoN0Cl0xjZicfpfnZSA/ix +-----END CERTIFICATE-----` + + caExpired = `-----BEGIN CERTIFICATE----- +MIICODCCAd+gAwIBAgIUCpmti37GM0i87c7H9JXnAnXlkeQwCgYIKoZIzj0EAwIw +WDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNh +biBGcmFuY2lzY28xDTALBgNVBAoMBE9yZzIxDTALBgNVBAMMBE9yZzIwHhcNMjIw +MjE1MjA1NzQ5WhcNMjIwMjE2MjA1NzQ5WjBYMQswCQYDVQQGEwJVUzETMBEGA1UE +CAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwE +T3JnMjENMAsGA1UEAwwET3JnMjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD9x +9DArA8shjxhqajd9OjTThUoJAHMCKXEORYaN8p/sofXJYYBJvg9y2zEOuevB7++p +PxhMmNISxt0U5IGlOlSjgYYwgYMwHQYDVR0OBBYEFLqzZtVcEWu2pw4IkpClBg9f +S4EEMB8GA1UdIwQYMBaAFLqzZtVcEWu2pw4IkpClBg9fS4EEMA8GA1UdEwEB/wQF +MAMBAf8wCwYDVR0PBAQDAgGmMA8GA1UdEQQIMAaCBE9yZzIwEgYDVR0TAQH/BAgw +BgEB/wIBADAKBggqhkjOPQQDAgNHADBEAiAccYeHn6h6Q1AA2fZc88sYgReSDSGY +MsALS92an024EQIgcFMjj0D0j2NhcjULCu0L7aGKac1q8XuCcvzfUdfbsdM= -----END CERTIFICATE-----` ) @@ -81,6 +96,17 @@ func TestTLSCAValidation(t *testing.T) { gt.Expect(err).NotTo(gomega.HaveOccurred()) }) + t.Run("ExpiredCert", func(t *testing.T) { + mspImpl := &bccspmsp{ + opts: &x509.VerifyOptions{Roots: x509.NewCertPool(), Intermediates: x509.NewCertPool()}, + } + + err := mspImpl.setupTLSCAs(&msp.FabricMSPConfig{ + TlsRootCerts: [][]byte{[]byte(caExpired)}, + }) + gt.Expect(err).NotTo(gomega.HaveOccurred()) + }) + t.Run("NonCACert", func(t *testing.T) { mspImpl := &bccspmsp{ opts: &x509.VerifyOptions{Roots: x509.NewCertPool(), Intermediates: x509.NewCertPool()}, diff --git a/msp/revocation_test.go b/msp/revocation_test.go index 55f28c32333..b778a6786c2 100644 --- a/msp/revocation_test.go +++ b/msp/revocation_test.go @@ -17,39 +17,110 @@ limitations under the License. package msp import ( + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "math/big" "path/filepath" "testing" + "github.com/golang/protobuf/proto" "github.com/hyperledger/fabric-protos-go/msp" "github.com/hyperledger/fabric/bccsp/sw" "github.com/stretchr/testify/require" ) func TestRevocation(t *testing.T) { - // testdata/revocation - // 1) a key and a signcert (used to populate the default signing identity); - // 2) cacert is the CA that signed the intermediate; - // 3) a revocation list that revokes signcert - thisMSP := getLocalMSP(t, "testdata/revocation") - - id, err := thisMSP.GetDefaultSigningIdentity() - require.NoError(t, err) - - // the certificate associated to this id is revoked and so validation should fail! - err = id.Validate() - require.Error(t, err) - - // This MSP is identical to the previous one, with only 1 difference: - // the signature on the CRL is invalid - thisMSP = getLocalMSP(t, "testdata/revocation2") - - id, err = thisMSP.GetDefaultSigningIdentity() - require.NoError(t, err) - - // the certificate associated to this id is revoked but the signature on the CRL is invalid - // so validation should succeed - err = id.Validate() - require.NoError(t, err, "Identity found revoked although the signature over the CRL is invalid") + t.Run("ValidCRLSignature", func(t *testing.T) { + // testdata/revocation + // 1) a key and a signcert (used to populate the default signing identity); + // 2) cacert is the CA that signed the intermediate; + // 3) a revocation list that revokes signcert + thisMSP := getLocalMSP(t, "testdata/revocation") + + id, err := thisMSP.GetDefaultSigningIdentity() + require.NoError(t, err) + + // the certificate associated to this id is revoked and so validation should fail! + err = id.Validate() + require.Error(t, err) + }) + + t.Run("MalformedCRLSignature", func(t *testing.T) { + // This test appends an extra int to the CRL signature. This extra data is + // ignored in go 1.14, the signature is considered valid, and the identity + // is treated as revoked. + // + // In go 1.15 the CheckCRLSignature implementation is more strict and the + // CRL signature is treated as invalid and the identity is not treated as + // revoked. + // + // This behavior change needs to be mitigated between the two versions. + conf, err := GetLocalMspConfig("testdata/revocation", nil, "SampleOrg") + require.NoError(t, err) + + // Unmarshal the config + var mspConfig msp.FabricMSPConfig + err = proto.Unmarshal(conf.Config, &mspConfig) + require.NoError(t, err) + require.Len(t, mspConfig.RevocationList, 1) + crl, err := x509.ParseCRL(mspConfig.RevocationList[0]) + require.NoError(t, err) + + // Decode the CRL signature + var sig struct{ R, S *big.Int } + _, err = asn1.Unmarshal(crl.SignatureValue.Bytes, &sig) + require.NoError(t, err) + + // Extend the signature with another value + extendedSig := struct{ R, S, T *big.Int }{sig.R, sig.S, big.NewInt(100)} + longSigBytes, err := asn1.Marshal(extendedSig) + require.NoError(t, err) + + // Use the extended signature in the CRL + crl.SignatureValue.Bytes = longSigBytes + crl.SignatureValue.BitLength = 8 * len(longSigBytes) + crlBytes, err := asn1.Marshal(*crl) + require.NoError(t, err) + mspConfig.RevocationList[0] = pem.EncodeToMemory(&pem.Block{Type: "X509 CRL", Bytes: crlBytes}) + + // Remarshal the configuration + conf.Config, err = proto.Marshal(&mspConfig) + require.NoError(t, err) + + ks, err := sw.NewFileBasedKeyStore(nil, filepath.Join("testdata/revocation", "keystore"), true) + require.NoError(t, err) + cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(ks) + require.NoError(t, err) + + thisMSP, err := NewBccspMspWithKeyStore(MSPv1_0, ks, cryptoProvider) + require.NoError(t, err) + + err = thisMSP.Setup(conf) + require.NoError(t, err) + + id, err := thisMSP.GetDefaultSigningIdentity() + require.NoError(t, err) + + // the cert associated with this id is revoked and the extra info on the + // signature is ignored so validation should fail! + err = id.Validate() + require.EqualError(t, err, "could not validate identity against certification chain: The certificate has been revoked") + }) + + t.Run("InvalidCRLSignature", func(t *testing.T) { + // This MSP is identical to the previous one, with only 1 difference: + // the signature on the CRL is invalid + thisMSP := getLocalMSP(t, "testdata/revocation2") + + id, err := thisMSP.GetDefaultSigningIdentity() + require.NoError(t, err) + + // the certificate associated to this id is revoked but the signature on the CRL is invalid + // so validation should succeed + err = id.Validate() + require.NoError(t, err, "Identity found revoked although the signature over the CRL is invalid") + }) } func TestIdentityPolicyPrincipalAgainstRevokedIdentity(t *testing.T) { diff --git a/orderer/common/cluster/comm.go b/orderer/common/cluster/comm.go index 67e4b0a9ce4..c2ba1987c72 100644 --- a/orderer/common/cluster/comm.go +++ b/orderer/common/cluster/comm.go @@ -461,8 +461,11 @@ type RemoteContext struct { // Stream is used to send/receive messages to/from the remote cluster member. type Stream struct { - abortChan <-chan struct{} - sendBuff chan *orderer.StepRequest + abortChan <-chan struct{} + sendBuff chan struct { + request *orderer.StepRequest + report func(error) + } commShutdown chan struct{} abortReason *atomic.Value metrics *Metrics @@ -488,6 +491,11 @@ func (stream *Stream) Canceled() bool { // Send sends the given request to the remote cluster member. func (stream *Stream) Send(request *orderer.StepRequest) error { + return stream.SendWithReport(request, func(_ error) {}) +} + +// SendWithReport sends the given request to the remote cluster member and invokes report on the send result. +func (stream *Stream) SendWithReport(request *orderer.StepRequest, report func(error)) error { if stream.Canceled() { return errors.New(stream.abortReason.Load().(string)) } @@ -498,12 +506,12 @@ func (stream *Stream) Send(request *orderer.StepRequest) error { allowDrop = true } - return stream.sendOrDrop(request, allowDrop) + return stream.sendOrDrop(request, allowDrop, report) } // sendOrDrop sends the given request to the remote cluster member, or drops it // if it is a consensus request and the queue is full. -func (stream *Stream) sendOrDrop(request *orderer.StepRequest, allowDrop bool) error { +func (stream *Stream) sendOrDrop(request *orderer.StepRequest, allowDrop bool, report func(error)) error { msgType := "transaction" if allowDrop { msgType = "consensus" @@ -520,7 +528,10 @@ func (stream *Stream) sendOrDrop(request *orderer.StepRequest, allowDrop bool) e select { case <-stream.abortChan: return errors.Errorf("stream %d aborted", stream.ID) - case stream.sendBuff <- request: + case stream.sendBuff <- struct { + request *orderer.StepRequest + report func(error) + }{request: request, report: report}: return nil case <-stream.commShutdown: return nil @@ -528,19 +539,17 @@ func (stream *Stream) sendOrDrop(request *orderer.StepRequest, allowDrop bool) e } // sendMessage sends the request down the stream -func (stream *Stream) sendMessage(request *orderer.StepRequest) { +func (stream *Stream) sendMessage(request *orderer.StepRequest, report func(error)) { start := time.Now() var err error defer func() { - if !stream.Logger.IsEnabledFor(zap.DebugLevel) { - return - } - var result string + message := fmt.Sprintf("Send of %s to %s(%s) took %v", + requestAsString(request), stream.NodeName, stream.Endpoint, time.Since(start)) if err != nil { - result = fmt.Sprintf("but failed due to %s", err.Error()) + stream.Logger.Warnf("%s but failed due to %s", message, err.Error()) + } else { + stream.Logger.Debug(message) } - stream.Logger.Debugf("Send of %s to %s(%s) took %v %s", requestAsString(request), - stream.NodeName, stream.Endpoint, time.Since(start), result) }() f := func() (*orderer.StepResponse, error) { @@ -551,16 +560,21 @@ func (stream *Stream) sendMessage(request *orderer.StepRequest) { return nil, err } - _, err = stream.operateWithTimeout(f) + _, err = stream.operateWithTimeout(f, report) } func (stream *Stream) serviceStream() { - defer stream.Cancel(errAborted) + streamStartTime := time.Now() + defer func() { + stream.Cancel(errAborted) + stream.Logger.Debugf("Stream %d to (%s) terminated with total lifetime of %s", + stream.ID, stream.Endpoint, time.Since(streamStartTime)) + }() for { select { - case msg := <-stream.sendBuff: - stream.sendMessage(msg) + case reqReport := <-stream.sendBuff: + stream.sendMessage(reqReport.request, reqReport.report) case <-stream.abortChan: return case <-stream.commShutdown: @@ -583,11 +597,11 @@ func (stream *Stream) Recv() (*orderer.StepResponse, error) { return stream.Cluster_StepClient.Recv() } - return stream.operateWithTimeout(f) + return stream.operateWithTimeout(f, func(_ error) {}) } // operateWithTimeout performs the given operation on the stream, and blocks until the timeout expires. -func (stream *Stream) operateWithTimeout(invoke StreamOperation) (*orderer.StepResponse, error) { +func (stream *Stream) operateWithTimeout(invoke StreamOperation, report func(error)) (*orderer.StepResponse, error) { timer := time.NewTimer(stream.Timeout) defer timer.Stop() @@ -610,11 +624,13 @@ func (stream *Stream) operateWithTimeout(invoke StreamOperation) (*orderer.StepR select { case r := <-responseChan: + report(r.err) if r.err != nil { stream.Cancel(r.err) } return r.res, r.err case <-timer.C: + report(errTimeout) stream.Logger.Warningf("Stream %d to %s(%s) was forcibly terminated because timeout (%v) expired", stream.ID, stream.NodeName, stream.Endpoint, stream.Timeout) stream.Cancel(errTimeout) @@ -660,32 +676,34 @@ func (rc *RemoteContext) NewStream(timeout time.Duration) (*Stream, error) { var canceled uint32 abortChan := make(chan struct{}) - - abort := func() { - cancel() - rc.streamsByID.Delete(streamID) - rc.Metrics.reportEgressStreamCount(rc.Channel, atomic.LoadUint32(&rc.streamsByID.size)) - rc.Logger.Debugf("Stream %d to %s(%s) is aborted", streamID, nodeName, rc.endpoint) - atomic.StoreUint32(&canceled, 1) - close(abortChan) - } + abortReason := &atomic.Value{} once := &sync.Once{} - abortReason := &atomic.Value{} + cancelWithReason := func(err error) { - abortReason.Store(err.Error()) - once.Do(abort) + once.Do(func() { + abortReason.Store(err.Error()) + cancel() + rc.streamsByID.Delete(streamID) + rc.Metrics.reportEgressStreamCount(rc.Channel, atomic.LoadUint32(&rc.streamsByID.size)) + rc.Logger.Debugf("Stream %d to %s(%s) is aborted", streamID, nodeName, rc.endpoint) + atomic.StoreUint32(&canceled, 1) + close(abortChan) + }) } logger := flogging.MustGetLogger("orderer.common.cluster.step") stepLogger := logger.WithOptions(zap.AddCallerSkip(1)) s := &Stream{ - Channel: rc.Channel, - metrics: rc.Metrics, - abortReason: abortReason, - abortChan: abortChan, - sendBuff: make(chan *orderer.StepRequest, rc.SendBuffSize), + Channel: rc.Channel, + metrics: rc.Metrics, + abortReason: abortReason, + abortChan: abortChan, + sendBuff: make(chan struct { + request *orderer.StepRequest + report func(error) + }, rc.SendBuffSize), commShutdown: rc.shutdownSignal, NodeName: nodeName, Logger: stepLogger, diff --git a/orderer/common/cluster/comm_test.go b/orderer/common/cluster/comm_test.go index f22896deaa9..3adf931c15b 100644 --- a/orderer/common/cluster/comm_test.go +++ b/orderer/common/cluster/comm_test.go @@ -530,6 +530,55 @@ func TestUnavailableHosts(t *testing.T) { require.Contains(t, err.Error(), "connection") } +func TestStreamAbortReportCorrectError(t *testing.T) { + // Scenario: node 1 acquires a stream to node 2 and then the stream + // encounters an error and as a result, the stream is aborted. + // We ensure the error reported is the first error, even after + // multiple attempts of using it. + + node1 := newTestNode(t) + defer node1.stop() + + node2 := newTestNode(t) + defer node2.stop() + + node1.c.Configure(testChannel, []cluster.RemoteNode{node2.nodeInfo}) + node2.c.Configure(testChannel, []cluster.RemoteNode{node1.nodeInfo}) + + node2.handler.On("OnSubmit", testChannel, node1.nodeInfo.ID, mock.Anything).Return(errors.Errorf("whoops")).Once() + + rm1, err := node1.c.Remote(testChannel, node2.nodeInfo.ID) + require.NoError(t, err) + var streamTerminated sync.WaitGroup + streamTerminated.Add(1) + + stream := assertEventualEstablishStream(t, rm1) + + l, err := zap.NewDevelopment() + require.NoError(t, err) + stream.Logger = flogging.NewFabricLogger(l, zap.Hooks(func(entry zapcore.Entry) error { + if strings.Contains(entry.Message, "Stream 1 to") && strings.Contains(entry.Message, "terminated") { + streamTerminated.Done() + } + return nil + })) + + // Probe the stream for the first time + err = stream.Send(wrapSubmitReq(testReq)) + require.NoError(t, err) + + // We should receive back the crafted error + _, err = stream.Recv() + require.Contains(t, err.Error(), "whoops") + + // Wait for the stream to be terminated from within the communication infrastructure + streamTerminated.Wait() + + // We should still receive the original crafted error despite the stream being terminated + err = stream.Send(wrapSubmitReq(testReq)) + require.Contains(t, err.Error(), "whoops") +} + func TestStreamAbort(t *testing.T) { // Scenarios: node 1 is connected to node 2 in 2 channels, // and the consumer of the communication calls receive. diff --git a/orderer/common/cluster/rpc.go b/orderer/common/cluster/rpc.go index 6504c8fedae..8aff4791d3b 100644 --- a/orderer/common/cluster/rpc.go +++ b/orderer/common/cluster/rpc.go @@ -8,6 +8,7 @@ package cluster import ( "context" + "io" "sync" "time" @@ -64,6 +65,14 @@ const ( SubmitOperation ) +func (ot OperationType) String() string { + if ot == SubmitOperation { + return "transaction" + } + + return "consensus" +} + // SendConsensus passes the given ConsensusRequest message to the raft.Node instance. func (s *RPC) SendConsensus(destination uint64, msg *orderer.ConsensusRequest) error { if s.Logger.IsEnabledFor(zapcore.DebugLevel) { @@ -86,14 +95,14 @@ func (s *RPC) SendConsensus(destination uint64, msg *orderer.ConsensusRequest) e err = stream.Send(req) if err != nil { - s.unMapStream(destination, ConsensusOperation) + s.unMapStream(destination, ConsensusOperation, stream.ID) } return err } // SendSubmit sends a SubmitRequest to the given destination node. -func (s *RPC) SendSubmit(destination uint64, request *orderer.SubmitRequest) error { +func (s *RPC) SendSubmit(destination uint64, request *orderer.SubmitRequest, report func(error)) error { if s.Logger.IsEnabledFor(zapcore.DebugLevel) { defer s.submitSent(time.Now(), destination, request) } @@ -109,12 +118,20 @@ func (s *RPC) SendSubmit(destination uint64, request *orderer.SubmitRequest) err }, } + unmapOnFailure := func(err error) { + if err != nil && err.Error() == io.EOF.Error() { + s.Logger.Infof("Un-mapping transaction stream to %d because encountered a stale stream", destination) + s.unMapStream(destination, SubmitOperation, stream.ID) + } + report(err) + } + s.submitLock.Lock() defer s.submitLock.Unlock() - err = stream.Send(req) + err = stream.SendWithReport(req, unmapOnFailure) if err != nil { - s.unMapStream(destination, SubmitOperation) + s.unMapStream(destination, SubmitOperation, stream.ID) } return err } @@ -128,7 +145,7 @@ func (s *RPC) consensusSent(start time.Time, to uint64, msg *orderer.ConsensusRe } // getOrCreateStream obtains a Submit stream for the given destination node -func (s *RPC) getOrCreateStream(destination uint64, operationType OperationType) (orderer.Cluster_StepClient, error) { +func (s *RPC) getOrCreateStream(destination uint64, operationType OperationType) (*Stream, error) { stream := s.getStream(destination, operationType) if stream != nil { return stream, nil @@ -158,9 +175,21 @@ func (s *RPC) mapStream(destination uint64, stream *Stream, operationType Operat s.cleanCanceledStreams(operationType) } -func (s *RPC) unMapStream(destination uint64, operationType OperationType) { +func (s *RPC) unMapStream(destination uint64, operationType OperationType, streamIDToUnmap uint64) { s.lock.Lock() defer s.lock.Unlock() + + stream, exists := s.StreamsByType[operationType][destination] + if !exists { + s.Logger.Debugf("No %s stream to %d found, nothing to unmap", operationType, destination) + return + } + + if stream.ID != streamIDToUnmap { + s.Logger.Debugf("Stream for %s to %d has an ID of %d, not %d", operationType, destination, stream.ID, streamIDToUnmap) + return + } + delete(s.StreamsByType[operationType], destination) } diff --git a/orderer/common/cluster/rpc_test.go b/orderer/common/cluster/rpc_test.go index edf21f34832..30daa77ed79 100644 --- a/orderer/common/cluster/rpc_test.go +++ b/orderer/common/cluster/rpc_test.go @@ -26,6 +26,80 @@ import ( "google.golang.org/grpc" ) +func noopReport(_ error) { +} + +func TestSendSubmitWithReport(t *testing.T) { + t.Parallel() + node1 := newTestNode(t) + node2 := newTestNode(t) + + var receptionWaitGroup sync.WaitGroup + receptionWaitGroup.Add(1) + node2.handler.On("OnSubmit", testChannel, mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + receptionWaitGroup.Done() + }) + + defer node1.stop() + defer node2.stop() + + config := []cluster.RemoteNode{node1.nodeInfo, node2.nodeInfo} + node1.c.Configure(testChannel, config) + node2.c.Configure(testChannel, config) + + node1RPC := &cluster.RPC{ + Logger: flogging.MustGetLogger("test"), + Timeout: time.Hour, + StreamsByType: cluster.NewStreamsByType(), + Channel: testChannel, + Comm: node1.c, + } + + // Wait for connections to be established + time.Sleep(time.Second * 5) + + err := node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("1")}}, noopReport) + require.NoError(t, err) + receptionWaitGroup.Wait() // Wait for message to be received + + // Restart the node + node2.stop() + node2.resurrect() + + /* + * allow the node2 to restart completely + * if restart not complete, the existing stream able to successfully send + * the next SubmitRequest which makes the testcase fails. Hence this delay + * required + */ + time.Sleep(time.Second * 5) + + var wg2 sync.WaitGroup + wg2.Add(1) + + reportSubmitFailed := func(err error) { + defer wg2.Done() + require.EqualError(t, err, io.EOF.Error()) + } + + err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("2")}}, reportSubmitFailed) + require.NoError(t, err) + + wg2.Wait() + + // Ensure stale stream is cleaned up and removed from the mapping + require.Len(t, node1RPC.StreamsByType[cluster.SubmitOperation], 0) + + // Wait for connection to be re-established + time.Sleep(time.Second * 5) + + // Send again, this time it should be received + receptionWaitGroup.Add(1) + err = node1RPC.SendSubmit(node2.nodeInfo.ID, &orderer.SubmitRequest{Channel: testChannel, Payload: &common.Envelope{Payload: []byte("3")}}, noopReport) + require.NoError(t, err) + receptionWaitGroup.Wait() +} + func TestRPCChangeDestination(t *testing.T) { // We send a Submit() to 2 different nodes - 1 and 2. // The first invocation of Submit() establishes a stream with node 1 @@ -82,8 +156,8 @@ func TestRPCChangeDestination(t *testing.T) { streamToNode1.On("Recv").Return(nil, io.EOF) streamToNode2.On("Recv").Return(nil, io.EOF) - rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}) - rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}) + rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) + rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) sent.Wait() streamToNode1.AssertNumberOfCalls(t, "Send", 1) @@ -111,7 +185,7 @@ func TestSend(t *testing.T) { } submit := func(rpc *cluster.RPC) error { - err := rpc.SendSubmit(1, submitRequest) + err := rpc.SendSubmit(1, submitRequest, noopReport) return err } @@ -291,7 +365,7 @@ func TestRPCGarbageCollection(t *testing.T) { defineMocks(1) - rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}) + rpc.SendSubmit(1, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // Wait for the message to arrive sent.Wait() // Ensure the stream is initialized in the mapping @@ -311,7 +385,7 @@ func TestRPCGarbageCollection(t *testing.T) { defineMocks(2) // Send a message to a different node. - rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}) + rpc.SendSubmit(2, &orderer.SubmitRequest{Channel: "mychannel"}, noopReport) // The mapping should be now cleaned from the previous stream. require.Len(t, mapping[cluster.SubmitOperation], 1) require.Equal(t, uint64(2), mapping[cluster.SubmitOperation][2].ID) diff --git a/orderer/common/localconfig/config.go b/orderer/common/localconfig/config.go index e28c462d406..09ea4ebedf6 100644 --- a/orderer/common/localconfig/config.go +++ b/orderer/common/localconfig/config.go @@ -15,6 +15,7 @@ import ( "github.com/hyperledger/fabric/common/flogging" "github.com/hyperledger/fabric/common/viperutil" coreconfig "github.com/hyperledger/fabric/core/config" + "github.com/hyperledger/fabric/internal/pkg/comm" ) var logger = flogging.MustGetLogger("localconfig") @@ -49,6 +50,8 @@ type General struct { LocalMSPID string BCCSP *bccsp.FactoryOpts Authentication Authentication + MaxRecvMsgSize int32 + MaxSendMsgSize int32 } type Cluster struct { @@ -240,6 +243,8 @@ var Defaults = TopLevel{ Authentication: Authentication{ TimeWindow: time.Duration(15 * time.Minute), }, + MaxRecvMsgSize: comm.DefaultMaxRecvMsgSize, + MaxSendMsgSize: comm.DefaultMaxSendMsgSize, }, FileLedger: FileLedger{ Location: "/var/hyperledger/production/orderer", @@ -489,6 +494,12 @@ func (c *TopLevel) completeInitialization(configDir string) { case c.Admin.TLS.Enabled && !c.Admin.TLS.ClientAuthRequired: logger.Panic("Admin.TLS.ClientAuthRequired must be set to true if Admin.TLS.Enabled is set to true") + case c.General.MaxRecvMsgSize == 0: + logger.Infof("General.MaxRecvMsgSize is unset, setting to %v", Defaults.General.MaxRecvMsgSize) + c.General.MaxRecvMsgSize = Defaults.General.MaxRecvMsgSize + case c.General.MaxSendMsgSize == 0: + logger.Infof("General.MaxSendMsgSize is unset, setting to %v", Defaults.General.MaxSendMsgSize) + c.General.MaxSendMsgSize = Defaults.General.MaxSendMsgSize default: return } diff --git a/orderer/common/msgprocessor/expiration.go b/orderer/common/msgprocessor/expiration.go index 796828d7c24..09d6e603ab4 100644 --- a/orderer/common/msgprocessor/expiration.go +++ b/orderer/common/msgprocessor/expiration.go @@ -51,5 +51,5 @@ func (exp *expirationRejectRule) Apply(message *common.Envelope) error { if expirationTime.IsZero() || time.Now().Before(expirationTime) { return nil } - return errors.New("identity expired") + return errors.New("broadcast client identity expired") } diff --git a/orderer/common/msgprocessor/expiration_test.go b/orderer/common/msgprocessor/expiration_test.go index 1b5086ef021..c4adad44943 100644 --- a/orderer/common/msgprocessor/expiration_test.go +++ b/orderer/common/msgprocessor/expiration_test.go @@ -109,7 +109,7 @@ func TestExpirationRejectRule(t *testing.T) { mockCapabilities.ExpirationCheckReturns(true) err := NewExpirationRejectRule(mockResources).Apply(env) require.Error(t, err) - require.Equal(t, err.Error(), "identity expired") + require.Equal(t, err.Error(), "broadcast client identity expired") mockCapabilities.ExpirationCheckReturns(false) err = NewExpirationRejectRule(mockResources).Apply(env) diff --git a/orderer/common/multichannel/mocks/read_writer.go b/orderer/common/multichannel/mocks/read_writer.go index a82ab3cd5d2..be2768bc0c5 100644 --- a/orderer/common/multichannel/mocks/read_writer.go +++ b/orderer/common/multichannel/mocks/read_writer.go @@ -44,6 +44,19 @@ type ReadWriter struct { result1 blockledger.Iterator result2 uint64 } + RetrieveBlockByNumberStub func(uint64) (*common.Block, error) + retrieveBlockByNumberMutex sync.RWMutex + retrieveBlockByNumberArgsForCall []struct { + arg1 uint64 + } + retrieveBlockByNumberReturns struct { + result1 *common.Block + result2 error + } + retrieveBlockByNumberReturnsOnCall map[int]struct { + result1 *common.Block + result2 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -54,15 +67,16 @@ func (fake *ReadWriter) Append(arg1 *common.Block) error { fake.appendArgsForCall = append(fake.appendArgsForCall, struct { arg1 *common.Block }{arg1}) + stub := fake.AppendStub + fakeReturns := fake.appendReturns fake.recordInvocation("Append", []interface{}{arg1}) fake.appendMutex.Unlock() - if fake.AppendStub != nil { - return fake.AppendStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1 } - fakeReturns := fake.appendReturns return fakeReturns.result1 } @@ -113,15 +127,16 @@ func (fake *ReadWriter) Height() uint64 { ret, specificReturn := fake.heightReturnsOnCall[len(fake.heightArgsForCall)] fake.heightArgsForCall = append(fake.heightArgsForCall, struct { }{}) + stub := fake.HeightStub + fakeReturns := fake.heightReturns fake.recordInvocation("Height", []interface{}{}) fake.heightMutex.Unlock() - if fake.HeightStub != nil { - return fake.HeightStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1 } - fakeReturns := fake.heightReturns return fakeReturns.result1 } @@ -166,15 +181,16 @@ func (fake *ReadWriter) Iterator(arg1 *orderer.SeekPosition) (blockledger.Iterat fake.iteratorArgsForCall = append(fake.iteratorArgsForCall, struct { arg1 *orderer.SeekPosition }{arg1}) + stub := fake.IteratorStub + fakeReturns := fake.iteratorReturns fake.recordInvocation("Iterator", []interface{}{arg1}) fake.iteratorMutex.Unlock() - if fake.IteratorStub != nil { - return fake.IteratorStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.iteratorReturns return fakeReturns.result1, fakeReturns.result2 } @@ -223,6 +239,70 @@ func (fake *ReadWriter) IteratorReturnsOnCall(i int, result1 blockledger.Iterato }{result1, result2} } +func (fake *ReadWriter) RetrieveBlockByNumber(arg1 uint64) (*common.Block, error) { + fake.retrieveBlockByNumberMutex.Lock() + ret, specificReturn := fake.retrieveBlockByNumberReturnsOnCall[len(fake.retrieveBlockByNumberArgsForCall)] + fake.retrieveBlockByNumberArgsForCall = append(fake.retrieveBlockByNumberArgsForCall, struct { + arg1 uint64 + }{arg1}) + stub := fake.RetrieveBlockByNumberStub + fakeReturns := fake.retrieveBlockByNumberReturns + fake.recordInvocation("RetrieveBlockByNumber", []interface{}{arg1}) + fake.retrieveBlockByNumberMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ReadWriter) RetrieveBlockByNumberCallCount() int { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + return len(fake.retrieveBlockByNumberArgsForCall) +} + +func (fake *ReadWriter) RetrieveBlockByNumberCalls(stub func(uint64) (*common.Block, error)) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = stub +} + +func (fake *ReadWriter) RetrieveBlockByNumberArgsForCall(i int) uint64 { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + argsForCall := fake.retrieveBlockByNumberArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReadWriter) RetrieveBlockByNumberReturns(result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + fake.retrieveBlockByNumberReturns = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + +func (fake *ReadWriter) RetrieveBlockByNumberReturnsOnCall(i int, result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + if fake.retrieveBlockByNumberReturnsOnCall == nil { + fake.retrieveBlockByNumberReturnsOnCall = make(map[int]struct { + result1 *common.Block + result2 error + }) + } + fake.retrieveBlockByNumberReturnsOnCall[i] = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + func (fake *ReadWriter) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -232,6 +312,8 @@ func (fake *ReadWriter) Invocations() map[string][][]interface{} { defer fake.heightMutex.RUnlock() fake.iteratorMutex.RLock() defer fake.iteratorMutex.RUnlock() + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/orderer/common/multichannel/registrar.go b/orderer/common/multichannel/registrar.go index 568f72cea24..be78763d69d 100644 --- a/orderer/common/multichannel/registrar.go +++ b/orderer/common/multichannel/registrar.go @@ -32,6 +32,7 @@ import ( "github.com/hyperledger/fabric/orderer/common/msgprocessor" "github.com/hyperledger/fabric/orderer/common/types" "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/orderer/consensus/etcdraft" "github.com/hyperledger/fabric/protoutil" "github.com/pkg/errors" ) @@ -73,16 +74,18 @@ type Registrar struct { // ConfigBlockOrPanic retrieves the last configuration block from the given ledger. // Panics on failure. func ConfigBlockOrPanic(reader blockledger.Reader) *cb.Block { - lastBlock := blockledger.GetBlock(reader, reader.Height()-1) + lastBlock, err := blockledger.GetBlockByNumber(reader, reader.Height()-1) + if err != nil { + logger.Panicw("Failed to retrieve block", "blockNum", reader.Height()-1, "error", err) + } index, err := protoutil.GetLastConfigIndexFromBlock(lastBlock) if err != nil { - logger.Panicf("Chain did not have appropriately encoded last config in its latest block: %s", err) + logger.Panicw("Chain did not have appropriately encoded last config in its latest block", "error", err) } - configBlock := blockledger.GetBlock(reader, index) - if configBlock == nil { - logger.Panicf("Config block does not exist") + configBlock, err := blockledger.GetBlockByNumber(reader, index) + if err != nil { + logger.Panicw("Failed to retrieve config block", "blockNum", index, "error", err) } - return configBlock } @@ -531,7 +534,10 @@ func (r *Registrar) CreateChain(chainName string) { if chain != nil { logger.Infof("A chain of type %T for channel %s already exists. "+ "Halting it.", chain.Chain, chainName) + r.lock.Lock() chain.Halt() + delete(r.chains, chainName) + r.lock.Unlock() } r.newChain(configTx(lf)) } @@ -540,6 +546,20 @@ func (r *Registrar) newChain(configtx *cb.Envelope) { r.lock.Lock() defer r.lock.Unlock() + channelName, err := channelNameFromConfigTx(configtx) + if err != nil { + logger.Warnf("Failed extracting channel name: %v", err) + return + } + + // fixes https://github.com/hyperledger/fabric/issues/2931 + if existingChain, exists := r.chains[channelName]; exists { + if _, isRaftChain := existingChain.Chain.(*etcdraft.Chain); isRaftChain { + logger.Infof("Channel %s already created, skipping its creation", channelName) + return + } + } + cs := r.createNewChain(configtx) cs.start() logger.Infof("Created and started new channel %s", cs.ChannelID()) @@ -1110,3 +1130,21 @@ func (r *Registrar) ReportConsensusRelationAndStatusMetrics(channelID string, re r.channelParticipationMetrics.reportConsensusRelation(channelID, relation) r.channelParticipationMetrics.reportStatus(channelID, status) } + +func channelNameFromConfigTx(configtx *cb.Envelope) (string, error) { + payload, err := protoutil.UnmarshalPayload(configtx.Payload) + if err != nil { + return "", errors.WithMessage(err, "error umarshaling envelope to payload") + } + + if payload.Header == nil { + return "", errors.New("missing channel header") + } + + chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) + if err != nil { + return "", errors.WithMessage(err, "error unmarshalling channel header") + } + + return chdr.ChannelId, nil +} diff --git a/orderer/common/multichannel/registrar_test.go b/orderer/common/multichannel/registrar_test.go index 352fd864348..5124ef507e1 100644 --- a/orderer/common/multichannel/registrar_test.go +++ b/orderer/common/multichannel/registrar_test.go @@ -38,8 +38,10 @@ import ( "github.com/hyperledger/fabric/orderer/common/multichannel/mocks" "github.com/hyperledger/fabric/orderer/common/types" "github.com/hyperledger/fabric/orderer/consensus" + "github.com/hyperledger/fabric/orderer/consensus/etcdraft" "github.com/hyperledger/fabric/protoutil" "github.com/pkg/errors" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -657,6 +659,35 @@ func TestCreateChain(t *testing.T) { close(chain2.Chain.(*mockChainCluster).queue) }) + t.Run("chain of type etcdraft.Chain is already created", func(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "registrar_test-") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + lf, _ := newLedgerAndFactory(tmpdir, "testchannelid", genesisBlockSys) + + consenter := &mocks.Consenter{} + consenter.HandleChainCalls(handleChain) + consenters := map[string]consensus.Consenter{confSys.Orderer.OrdererType: consenter} + + manager := NewRegistrar(localconfig.TopLevel{}, lf, mockCrypto(), &disabled.Provider{}, cryptoProvider, nil) + manager.Initialize(consenters) + + testChainSupport := &ChainSupport{Chain: &etcdraft.Chain{}} + manager.chains["test"] = testChainSupport + + orglessChannelConf := genesisconfig.Load(genesisconfig.SampleSingleMSPChannelProfile, configtest.GetDevConfigDir()) + envConfigUpdate, err := encoder.MakeChannelCreationTransaction("test", mockCrypto(), orglessChannelConf) + require.NoError(t, err, "Constructing chain creation tx") + + manager.newChain(envConfigUpdate) + + testChainSupport2 := manager.GetChain("test") + require.NotNil(t, testChainSupport2) + + assert.Same(t, testChainSupport, testChainSupport2) + }) + // This test brings up the entire system, with the mock consenter, including the broadcasters etc. and creates a new chain t.Run("New chain", func(t *testing.T) { expectedLastConfigSeq := uint64(1) @@ -1785,3 +1816,66 @@ func createLedgerAndChain(t *testing.T, r *Registrar, lf blockledger.Factory, b r.CreateChain(channel) require.NotNil(t, r.GetChain(channel)) } + +func TestRegistrar_ConfigBlockOrPanic(t *testing.T) { + t.Run("Panics when ledger is empty", func(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "file-ledger") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + _, l := newLedgerAndFactory(tmpdir, "testchannelid", nil) + + require.PanicsWithValue(t, "Failed to retrieve block", func() { + ConfigBlockOrPanic(l) + }) + }) + + t.Run("Panics when config block not complete", func(t *testing.T) { + block := protoutil.NewBlock(0, nil) + block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = []byte("bad metadata") + + tmpdir, err := ioutil.TempDir("", "file-ledger") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + _, l := newLedgerAndFactory(tmpdir, "testchannelid", block) + + require.PanicsWithValue(t, "Chain did not have appropriately encoded last config in its latest block", func() { + ConfigBlockOrPanic(l) + }) + }) + + t.Run("Panics when block referenes invalid config block", func(t *testing.T) { + block := protoutil.NewBlock(0, nil) + block.Metadata.Metadata[cb.BlockMetadataIndex_SIGNATURES] = protoutil.MarshalOrPanic(&cb.Metadata{ + Value: protoutil.MarshalOrPanic(&cb.OrdererBlockMetadata{ + LastConfig: &cb.LastConfig{Index: 2}, + }), + }) + + tmpdir, err := ioutil.TempDir("", "file-ledger") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + _, l := newLedgerAndFactory(tmpdir, "testchannelid", block) + + require.PanicsWithValue(t, "Failed to retrieve config block", func() { + ConfigBlockOrPanic(l) + }) + }) + + t.Run("Returns valid config block", func(t *testing.T) { + confSys := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir()) + genesisBlockSys := encoder.New(confSys).GenesisBlock() + + tmpdir, err := ioutil.TempDir("", "file-ledger") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + + _, l := newLedgerAndFactory(tmpdir, "testchannelid", genesisBlockSys) + + cBlock := ConfigBlockOrPanic(l) + assert.Equal(t, genesisBlockSys.Header, cBlock.Header) + assert.Equal(t, genesisBlockSys.Data, cBlock.Data) + }) +} diff --git a/orderer/common/onboarding/mocks/read_writer.go b/orderer/common/onboarding/mocks/read_writer.go index a82ab3cd5d2..be2768bc0c5 100644 --- a/orderer/common/onboarding/mocks/read_writer.go +++ b/orderer/common/onboarding/mocks/read_writer.go @@ -44,6 +44,19 @@ type ReadWriter struct { result1 blockledger.Iterator result2 uint64 } + RetrieveBlockByNumberStub func(uint64) (*common.Block, error) + retrieveBlockByNumberMutex sync.RWMutex + retrieveBlockByNumberArgsForCall []struct { + arg1 uint64 + } + retrieveBlockByNumberReturns struct { + result1 *common.Block + result2 error + } + retrieveBlockByNumberReturnsOnCall map[int]struct { + result1 *common.Block + result2 error + } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -54,15 +67,16 @@ func (fake *ReadWriter) Append(arg1 *common.Block) error { fake.appendArgsForCall = append(fake.appendArgsForCall, struct { arg1 *common.Block }{arg1}) + stub := fake.AppendStub + fakeReturns := fake.appendReturns fake.recordInvocation("Append", []interface{}{arg1}) fake.appendMutex.Unlock() - if fake.AppendStub != nil { - return fake.AppendStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1 } - fakeReturns := fake.appendReturns return fakeReturns.result1 } @@ -113,15 +127,16 @@ func (fake *ReadWriter) Height() uint64 { ret, specificReturn := fake.heightReturnsOnCall[len(fake.heightArgsForCall)] fake.heightArgsForCall = append(fake.heightArgsForCall, struct { }{}) + stub := fake.HeightStub + fakeReturns := fake.heightReturns fake.recordInvocation("Height", []interface{}{}) fake.heightMutex.Unlock() - if fake.HeightStub != nil { - return fake.HeightStub() + if stub != nil { + return stub() } if specificReturn { return ret.result1 } - fakeReturns := fake.heightReturns return fakeReturns.result1 } @@ -166,15 +181,16 @@ func (fake *ReadWriter) Iterator(arg1 *orderer.SeekPosition) (blockledger.Iterat fake.iteratorArgsForCall = append(fake.iteratorArgsForCall, struct { arg1 *orderer.SeekPosition }{arg1}) + stub := fake.IteratorStub + fakeReturns := fake.iteratorReturns fake.recordInvocation("Iterator", []interface{}{arg1}) fake.iteratorMutex.Unlock() - if fake.IteratorStub != nil { - return fake.IteratorStub(arg1) + if stub != nil { + return stub(arg1) } if specificReturn { return ret.result1, ret.result2 } - fakeReturns := fake.iteratorReturns return fakeReturns.result1, fakeReturns.result2 } @@ -223,6 +239,70 @@ func (fake *ReadWriter) IteratorReturnsOnCall(i int, result1 blockledger.Iterato }{result1, result2} } +func (fake *ReadWriter) RetrieveBlockByNumber(arg1 uint64) (*common.Block, error) { + fake.retrieveBlockByNumberMutex.Lock() + ret, specificReturn := fake.retrieveBlockByNumberReturnsOnCall[len(fake.retrieveBlockByNumberArgsForCall)] + fake.retrieveBlockByNumberArgsForCall = append(fake.retrieveBlockByNumberArgsForCall, struct { + arg1 uint64 + }{arg1}) + stub := fake.RetrieveBlockByNumberStub + fakeReturns := fake.retrieveBlockByNumberReturns + fake.recordInvocation("RetrieveBlockByNumber", []interface{}{arg1}) + fake.retrieveBlockByNumberMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1, ret.result2 + } + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *ReadWriter) RetrieveBlockByNumberCallCount() int { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + return len(fake.retrieveBlockByNumberArgsForCall) +} + +func (fake *ReadWriter) RetrieveBlockByNumberCalls(stub func(uint64) (*common.Block, error)) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = stub +} + +func (fake *ReadWriter) RetrieveBlockByNumberArgsForCall(i int) uint64 { + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() + argsForCall := fake.retrieveBlockByNumberArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ReadWriter) RetrieveBlockByNumberReturns(result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + fake.retrieveBlockByNumberReturns = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + +func (fake *ReadWriter) RetrieveBlockByNumberReturnsOnCall(i int, result1 *common.Block, result2 error) { + fake.retrieveBlockByNumberMutex.Lock() + defer fake.retrieveBlockByNumberMutex.Unlock() + fake.RetrieveBlockByNumberStub = nil + if fake.retrieveBlockByNumberReturnsOnCall == nil { + fake.retrieveBlockByNumberReturnsOnCall = make(map[int]struct { + result1 *common.Block + result2 error + }) + } + fake.retrieveBlockByNumberReturnsOnCall[i] = struct { + result1 *common.Block + result2 error + }{result1, result2} +} + func (fake *ReadWriter) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() @@ -232,6 +312,8 @@ func (fake *ReadWriter) Invocations() map[string][][]interface{} { defer fake.heightMutex.RUnlock() fake.iteratorMutex.RLock() defer fake.iteratorMutex.RUnlock() + fake.retrieveBlockByNumberMutex.RLock() + defer fake.retrieveBlockByNumberMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/orderer/common/server/main.go b/orderer/common/server/main.go index 89a6b1dc536..f53a8a0a09f 100644 --- a/orderer/common/server/main.go +++ b/orderer/common/server/main.go @@ -533,10 +533,12 @@ func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.Serve func initializeClusterClientConfig(conf *localconfig.TopLevel) comm.ClientConfig { cc := comm.ClientConfig{ - AsyncConnect: true, - KaOpts: comm.DefaultKeepaliveOptions, - Timeout: conf.General.Cluster.DialTimeout, - SecOpts: comm.SecureOptions{}, + AsyncConnect: true, + KaOpts: comm.DefaultKeepaliveOptions, + Timeout: conf.General.Cluster.DialTimeout, + SecOpts: comm.SecureOptions{}, + MaxRecvMsgSize: int(conf.General.MaxRecvMsgSize), + MaxSendMsgSize: int(conf.General.MaxSendMsgSize), } reuseGrpcListener := reuseListener(conf) @@ -667,6 +669,8 @@ func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics. grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)), ), }, + MaxRecvMsgSize: int(conf.General.MaxRecvMsgSize), + MaxSendMsgSize: int(conf.General.MaxSendMsgSize), } } diff --git a/orderer/consensus/bdls/chain.go b/orderer/consensus/bdls/chain.go new file mode 100644 index 00000000000..cc6a7776b5a --- /dev/null +++ b/orderer/consensus/bdls/chain.go @@ -0,0 +1,46 @@ +/* + Copyright {} All Rights Reserved. + + SPDX-License-Identifier: Apache-2.0 +*/ + +package bdls + +import ( + bdls "github.com/BDLS-bft/bdls" + "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/orderer" + "github.com/hyperledger/fabric/common/flogging" + "github.com/hyperledger/fabric/orderer/consensus" +) + +// Chain implements consensus.Chain interface. +type Chain struct { + consensus *bdls.Consensus + Logger *flogging.FabricLogger + support consensus.ConsenterSupport + Config *bdls.Config + Metrics *Metrics + submitC chan *submit + channelID string +} + +type submit struct { + req *orderer.SubmitRequest + leader chan uint64 +} + +// Order submits normal type transactions for ordering. +func (c *Chain) Order(env *common.Envelope, configSeq uint64) error { + + //TODO + c.Metrics.NormalProposalsReceived.Add(1) + return nil // c.Submit(&orderer.SubmitRequest{LastValidationSeq: configSeq, Payload: env, Channel: c.channelID}, 0) +} + +// Configure submits config type transactions for ordering. +func (c *Chain) Configure(env *common.Envelope, configSeq uint64) error { + // create a consensus config to validate this message at the correct height + + return nil +} diff --git a/orderer/consensus/bdls/metrics.go b/orderer/consensus/bdls/metrics.go new file mode 100644 index 00000000000..2db70d2c963 --- /dev/null +++ b/orderer/consensus/bdls/metrics.go @@ -0,0 +1,120 @@ +/* + Copyright {} All Rights Reserved. + + SPDX-License-Identifier: Apache-2.0 +*/ + +package bdls + +import "github.com/hyperledger/fabric/common/metrics" + +var ( + clusterSizeOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "cluster_size", + Help: "Number of nodes in this channel.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + isLeaderOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "is_leader", + Help: "The leadership status of the current node: 1 if it is the leader else 0.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + ActiveNodesOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "active_nodes", + Help: "Number of active nodes in this channel.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + committedBlockNumberOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "committed_block_number", + Help: "The block number of the latest block committed.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + snapshotBlockNumberOpts = metrics.GaugeOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "snapshot_block_number", + Help: "The block number of the latest snapshot.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + leaderChangesOpts = metrics.CounterOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "leader_changes", + Help: "The number of leader changes since process start.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + proposalFailuresOpts = metrics.CounterOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "proposal_failures", + Help: "The number of proposal failures.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + dataPersistDurationOpts = metrics.HistogramOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "data_persist_duration", + Help: "The time taken for bdls data to be persisted in storage (in seconds).", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + normalProposalsReceivedOpts = metrics.CounterOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "normal_proposals_received", + Help: "The total number of proposals received for normal type transactions.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } + configProposalsReceivedOpts = metrics.CounterOpts{ + Namespace: "consensus", + Subsystem: "bdls", + Name: "config_proposals_received", + Help: "The total number of proposals received for config type transactions.", + LabelNames: []string{"channel"}, + StatsdFormat: "%{#fqname}.%{channel}", + } +) + +type Metrics struct { + ClusterSize metrics.Gauge + IsLeader metrics.Gauge + ActiveNodes metrics.Gauge + CommittedBlockNumber metrics.Gauge + SnapshotBlockNumber metrics.Gauge + LeaderChanges metrics.Counter + ProposalFailures metrics.Counter + DataPersistDuration metrics.Histogram + NormalProposalsReceived metrics.Counter + ConfigProposalsReceived metrics.Counter +} + +func NewMetrics(p metrics.Provider) *Metrics { + return &Metrics{ + ClusterSize: p.NewGauge(clusterSizeOpts), + IsLeader: p.NewGauge(isLeaderOpts), + ActiveNodes: p.NewGauge(ActiveNodesOpts), + CommittedBlockNumber: p.NewGauge(committedBlockNumberOpts), + SnapshotBlockNumber: p.NewGauge(snapshotBlockNumberOpts), + LeaderChanges: p.NewCounter(leaderChangesOpts), + ProposalFailures: p.NewCounter(proposalFailuresOpts), + DataPersistDuration: p.NewHistogram(dataPersistDurationOpts), + NormalProposalsReceived: p.NewCounter(normalProposalsReceivedOpts), + ConfigProposalsReceived: p.NewCounter(configProposalsReceivedOpts), + } +} diff --git a/orderer/consensus/etcdraft/chain.go b/orderer/consensus/etcdraft/chain.go index 788d71dda6d..26c7a047264 100644 --- a/orderer/consensus/etcdraft/chain.go +++ b/orderer/consensus/etcdraft/chain.go @@ -74,7 +74,7 @@ type Configurator interface { // RPC is used to mock the transport layer in tests. type RPC interface { SendConsensus(dest uint64, msg *orderer.ConsensusRequest) error - SendSubmit(dest uint64, request *orderer.SubmitRequest) error + SendSubmit(dest uint64, request *orderer.SubmitRequest, report func(err error)) error } //go:generate counterfeiter -o mocks/mock_blockpuller.go . BlockPuller @@ -92,7 +92,8 @@ type CreateBlockPuller func() (BlockPuller, error) // Options contains all the configurations relevant to the chain. type Options struct { - RaftID uint64 + RPCTimeout time.Duration + RaftID uint64 Clock clock.Clock @@ -542,8 +543,7 @@ func (c *Chain) Submit(req *orderer.SubmitRequest, sender uint64) error { } if lead != c.raftID { - if err := c.rpc.SendSubmit(lead, req); err != nil { - c.Metrics.ProposalFailures.Add(1) + if err := c.forwardToLeader(lead, req); err != nil { return err } } @@ -556,6 +556,38 @@ func (c *Chain) Submit(req *orderer.SubmitRequest, sender uint64) error { return nil } +func (c *Chain) forwardToLeader(lead uint64, req *orderer.SubmitRequest) error { + c.logger.Infof("Forwarding transaction to the leader %d", lead) + timer := time.NewTimer(c.opts.RPCTimeout) + defer timer.Stop() + + sentChan := make(chan struct{}) + atomicErr := &atomic.Value{} + + report := func(err error) { + if err != nil { + atomicErr.Store(err.Error()) + c.Metrics.ProposalFailures.Add(1) + } + close(sentChan) + } + + c.rpc.SendSubmit(lead, req, report) + + select { + case <-sentChan: + case <-c.doneC: + return errors.Errorf("chain is stopped") + case <-timer.C: + return errors.Errorf("timed out (%v) waiting on forwarding to %d", c.opts.RPCTimeout, lead) + } + + if atomicErr.Load() != nil { + return errors.Errorf(atomicErr.Load().(string)) + } + return nil +} + type apply struct { entries []raftpb.Entry soft *raft.SoftState @@ -953,12 +985,18 @@ func (c *Chain) catchUp(snap *raftpb.Snapshot) error { } func (c *Chain) commitBlock(block *common.Block) { + // read consenters metadata to write into the replicated block + blockMeta, err := protoutil.GetConsenterMetadataFromBlock(block) + if err != nil { + c.logger.Panicf("Failed to obtain metadata: %s", err) + } + if !protoutil.IsConfigBlock(block) { - c.support.WriteBlock(block, nil) + c.support.WriteBlock(block, blockMeta.Value) return } - c.support.WriteConfigBlock(block, nil) + c.support.WriteConfigBlock(block, blockMeta.Value) configMembership := c.detectConfChange(block) diff --git a/orderer/consensus/etcdraft/chain_test.go b/orderer/consensus/etcdraft/chain_test.go index 0f136846d0a..6c0a12f9244 100644 --- a/orderer/consensus/etcdraft/chain_test.go +++ b/orderer/consensus/etcdraft/chain_test.go @@ -184,6 +184,7 @@ var _ = Describe("Chain", func() { fakeFields = newFakeMetricsFields() opts = etcdraft.Options{ + RPCTimeout: time.Second * 5, RaftID: 1, Clock: clock, TickInterval: interval, @@ -2640,7 +2641,28 @@ var _ = Describe("Chain", func() { }) }) + When("gRPC stream to leader is stuck", func() { + BeforeEach(func() { + c2.opts.RPCTimeout = time.Second + network.Lock() + network.delayWG.Add(1) + network.Unlock() + }) + It("correctly times out", func() { + err := c2.Order(env, 0) + Expect(err).To(MatchError("timed out (1s) waiting on forwarding to 1")) + network.delayWG.Done() + }) + }) + When("leader is disconnected", func() { + It("correctly returns a failure to the client when forwarding from a follower", func() { + network.disconnect(1) + + err := c2.Order(env, 0) + Expect(err).To(MatchError("connection lost")) + }) + It("proactively steps down to follower", func() { network.disconnect(1) @@ -3377,6 +3399,7 @@ func newChain( fakeFields := newFakeMetricsFields() opts := etcdraft.Options{ + RPCTimeout: timeout, RaftID: uint64(id), Clock: clock, TickInterval: interval, @@ -3543,6 +3566,7 @@ func (c *chain) getStepFunc() stepFunc { } type network struct { + delayWG sync.WaitGroup sync.RWMutex leader uint64 @@ -3621,21 +3645,30 @@ func (n *network) addChain(c *chain) { return c.step(dest, msg) } - c.rpc.SendSubmitStub = func(dest uint64, msg *orderer.SubmitRequest) error { + c.rpc.SendSubmitStub = func(dest uint64, msg *orderer.SubmitRequest, f func(error)) error { if !n.linked(c.id, dest) { - return errors.Errorf("connection refused") + err := errors.Errorf("connection refused") + f(err) + return err } if !n.connected(c.id) || !n.connected(dest) { - return errors.Errorf("connection lost") + err := errors.Errorf("connection lost") + f(err) + return err } n.RLock() target := n.chains[dest] n.RUnlock() go func() { + n.Lock() + n.delayWG.Wait() + n.Unlock() + defer GinkgoRecover() target.Submit(msg, c.id) + f(nil) }() return nil } diff --git a/orderer/consensus/etcdraft/consenter.go b/orderer/consensus/etcdraft/consenter.go index a37eb312942..b34e14030b1 100644 --- a/orderer/consensus/etcdraft/consenter.go +++ b/orderer/consensus/etcdraft/consenter.go @@ -194,12 +194,13 @@ func (c *Consenter) HandleChain(support consensus.ConsenterSupport, metadata *co } else { tickInterval, err = time.ParseDuration(c.EtcdRaftConfig.TickIntervalOverride) if err != nil { - return nil, errors.Errorf("failed parsing Consensus.TickIntervalOverride: %s: %v", c.EtcdRaftConfig.TickIntervalOverride, err) + return nil, errors.WithMessage(err, "failed parsing Consensus.TickIntervalOverride") } c.Logger.Infof("TickIntervalOverride is set, overriding channel configuration tick interval to %v", tickInterval) } opts := Options{ + RPCTimeout: c.OrdererConfig.General.Cluster.RPCTimeout, RaftID: id, Clock: clock.NewClock(), MemoryStorage: raft.NewMemoryStorage(), diff --git a/orderer/consensus/etcdraft/consenter_test.go b/orderer/consensus/etcdraft/consenter_test.go index b5fab1618b8..604eca907bf 100644 --- a/orderer/consensus/etcdraft/consenter_test.go +++ b/orderer/consensus/etcdraft/consenter_test.go @@ -537,7 +537,8 @@ var _ = Describe("Consenter", func() { consenter.EtcdRaftConfig.TickIntervalOverride = "seven" _, err := consenter.HandleChain(support, nil) - Expect(err).To(MatchError("failed parsing Consensus.TickIntervalOverride: seven: time: invalid duration seven")) + Expect(err).To(MatchError(HavePrefix("failed parsing Consensus.TickIntervalOverride:"))) + Expect(err).To(MatchError(ContainSubstring("seven"))) }) }) diff --git a/orderer/consensus/etcdraft/mocks/mock_rpc.go b/orderer/consensus/etcdraft/mocks/mock_rpc.go index 9c42fc13500..91d28129171 100644 --- a/orderer/consensus/etcdraft/mocks/mock_rpc.go +++ b/orderer/consensus/etcdraft/mocks/mock_rpc.go @@ -21,11 +21,12 @@ type FakeRPC struct { sendConsensusReturnsOnCall map[int]struct { result1 error } - SendSubmitStub func(uint64, *orderer.SubmitRequest) error + SendSubmitStub func(uint64, *orderer.SubmitRequest, func(err error)) error sendSubmitMutex sync.RWMutex sendSubmitArgsForCall []struct { arg1 uint64 arg2 *orderer.SubmitRequest + arg3 func(err error) } sendSubmitReturns struct { result1 error @@ -98,17 +99,18 @@ func (fake *FakeRPC) SendConsensusReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeRPC) SendSubmit(arg1 uint64, arg2 *orderer.SubmitRequest) error { +func (fake *FakeRPC) SendSubmit(arg1 uint64, arg2 *orderer.SubmitRequest, arg3 func(err error)) error { fake.sendSubmitMutex.Lock() ret, specificReturn := fake.sendSubmitReturnsOnCall[len(fake.sendSubmitArgsForCall)] fake.sendSubmitArgsForCall = append(fake.sendSubmitArgsForCall, struct { arg1 uint64 arg2 *orderer.SubmitRequest - }{arg1, arg2}) - fake.recordInvocation("SendSubmit", []interface{}{arg1, arg2}) + arg3 func(err error) + }{arg1, arg2, arg3}) + fake.recordInvocation("SendSubmit", []interface{}{arg1, arg2, arg3}) fake.sendSubmitMutex.Unlock() if fake.SendSubmitStub != nil { - return fake.SendSubmitStub(arg1, arg2) + return fake.SendSubmitStub(arg1, arg2, arg3) } if specificReturn { return ret.result1 @@ -123,17 +125,17 @@ func (fake *FakeRPC) SendSubmitCallCount() int { return len(fake.sendSubmitArgsForCall) } -func (fake *FakeRPC) SendSubmitCalls(stub func(uint64, *orderer.SubmitRequest) error) { +func (fake *FakeRPC) SendSubmitCalls(stub func(uint64, *orderer.SubmitRequest, func(err error)) error) { fake.sendSubmitMutex.Lock() defer fake.sendSubmitMutex.Unlock() fake.SendSubmitStub = stub } -func (fake *FakeRPC) SendSubmitArgsForCall(i int) (uint64, *orderer.SubmitRequest) { +func (fake *FakeRPC) SendSubmitArgsForCall(i int) (uint64, *orderer.SubmitRequest, func(err error)) { fake.sendSubmitMutex.RLock() defer fake.sendSubmitMutex.RUnlock() argsForCall := fake.sendSubmitArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 } func (fake *FakeRPC) SendSubmitReturns(result1 error) { diff --git a/orderer/consensus/etcdraft/util.go b/orderer/consensus/etcdraft/util.go index f7cdd6c9931..50a110b2a9f 100644 --- a/orderer/consensus/etcdraft/util.go +++ b/orderer/consensus/etcdraft/util.go @@ -247,7 +247,7 @@ func VerifyConfigMetadata(metadata *etcdraft.ConfigMetadata, verifyOpts x509.Ver return errors.Errorf("metadata has nil consenter") } if err := validateConsenterTLSCerts(consenter, verifyOpts, true); err != nil { - return err + return errors.WithMessagef(err, "consenter %s:%d has invalid certificate", consenter.Host, consenter.Port) } } diff --git a/protoutil/commonutils.go b/protoutil/commonutils.go index 4b4874682fd..76c903cdbd9 100644 --- a/protoutil/commonutils.go +++ b/protoutil/commonutils.go @@ -212,6 +212,10 @@ func IsConfigBlock(block *cb.Block) bool { // ChannelHeader returns the *cb.ChannelHeader for a given *cb.Envelope. func ChannelHeader(env *cb.Envelope) (*cb.ChannelHeader, error) { + if env == nil { + return nil, errors.New("Invalid envelope payload. can't be nil") + } + envPayload, err := UnmarshalPayload(env.Payload) if err != nil { return nil, err diff --git a/release_notes/v2.3.1.md b/release_notes/v2.3.1.md new file mode 100644 index 00000000000..d83d4ed9d6e --- /dev/null +++ b/release_notes/v2.3.1.md @@ -0,0 +1,114 @@ +v2.3.1 Wednesday, February 3, 2021 +================================== + +Fixes +----- + +**peer - incorrect handling of values set to empty byte array in node chaincode** + +Peer should handle key values set to nil or empty byte arrays as a delete of the key. +While the behavior worked as expected when using Go chaincode and Java chaincode, +if using node chaincode it did not work correctly when setting key values to empty byte arrays. +This fix ensures that peer will interpret empty byte arrays as deletes even for node chaincodes. +If using node chaincode with private data, if you had set private data values to an empty +byte array, the private data hash would have been committed incorrectly to the state database. +To repair the state database, after applying the fix, with the peer stopped, request that +the state database be rebuilt by calling "peer node rebuild-dbs" or by deleting the state database. +Upon the next start, the peer will rebuild the state database from the already processed block store. +If subsequent transactions had referenced the existence of such a private data hash by calling +GetPrivateDataHash, then the subsequent transactions may have been processed incorrectly and +the peer will need to additionally reprocess blocks, which can be triggered by calling +"peer node reset" instead of "peer node rebuild-dbs". +If the peer joined channels from a snapshot, "peer node rebuild-dbs" and "peer node reset" +are not available since the peer does not have all blocks since the genesis block. In +these cases the peer will need to be replaced with a new peer that re-joins from the snapshots. +If using regular channel data only and not private data, the empty byte array will not +have been committed, and therefore no action is required on the peer beyond applying the fix. + +**orderer - incorrect osnadmin flag --channel-id** + +The osnadmin CLI introduced in v2.3.0 used an incorrect flag --channel-id. +The flag has been corrected to be --channelID in order to be consistent +with other CLIs. + + +Dependencies +------------ +Fabric v2.3.1 has been tested with the following dependencies: +* Go 1.14.12 +* CouchDB v3.1.1 + + +Deprecations (existing) +----------------------- + +**FAB-15754: The 'Solo' consensus type is deprecated.** + +The 'Solo' consensus type has always been marked non-production and should be in +use only in test environments, however for compatibility it is still available, +but may be removed entirely in a future release. + +**FAB-16408: The 'Kafka' consensus type is deprecated.** + +The 'Raft' consensus type was introduced in v1.4.1 and has become the preferred +production consensus type. There is a documented and tested migration path from +Kafka to Raft, and existing users should migrate to the newer Raft consensus type. +For compatibility with existing deployments, Kafka is still supported, +but may be removed entirely in a future release. +Additionally, the fabric-kafka and fabric-zookeeper docker images are no longer updated, maintained, or published. + +**Fabric CouchDB image is deprecated** + +v2.2.0 added support for CouchDB 3.1.0 as the recommended and tested version of CouchDB. +If prior versions are utilized, a Warning will appear in peer log. +Note that CouchDB 3.1.0 requires that an admin username and password be set, +while this was optional in CouchDB v2.x. See the +[Fabric CouchDB documentation](https://hyperledger-fabric.readthedocs.io/en/v2.2.0/couchdb_as_state_database.html#couchdb-configuration) +for configuration details. +Also note that CouchDB 3.1.0 default max_document_size is reduced to 8MB. Set a higher value if needed in your environment. +Finally, the fabric-couchdb docker image will not be updated to v3.1.0 and will no longer be updated, maintained, or published. +Users can utilize the official CouchDB docker image maintained by the Apache CouchDB project instead. + +**FAB-7559: Support for specifying orderer endpoints at the global level in channel configuration is deprecated.** + +Utilize the new 'OrdererEndpoints' stanza within the channel configuration of an organization instead. +Configuring orderer endpoints at the organization level accommodates +scenarios where orderers are run by different organizations. Using +this configuration ensures that only the TLS CA certificates of that organization +are used for orderer communications, in contrast to the global channel level endpoints which +would cause an aggregation of all orderer TLS CA certificates across +all orderer organizations to be used for orderer communications. + +**FAB-17428: Support for configtxgen flag `--outputAnchorPeersUpdate` is deprecated.** + +The `--outputAnchorPeersUpdate` mechanism for updating anchor peers has always had +limitations (for instance, it only works the first time anchor peers are updated). +Instead, anchor peer updates should be performed through the normal config update flow. + +**FAB-15406: The fabric-tools docker image is deprecated** + +The fabric-tools docker image will not be published in future Fabric releases. +Instead of using the fabric-tools docker image, users should utilize the +published Fabric binaries. The Fabric binaries can be used to make client calls +to Fabric runtime components, regardless of where the Fabric components are running. + +**FAB-15317: Block dissemination via gossip is deprecated** + +Block dissemination via gossip is deprecated and may be removed in a future release. +Fabric peers can be configured to receive blocks directly from an ordering service +node by using the following configuration: +``` +peer.gossip.orgLeader: true +peer.gossip.useLeaderElection: false +peer.gossip.state.enabled: false +``` + +**FAB-15061: Legacy chaincode lifecycle is deprecated** + +The legacy chaincode lifecycle from v1.x is deprecated and will be removed +in a future release. To prepare for the eventual removal, utilize the v2.x +chaincode lifecycle instead, by enabling V2_0 application capability on all +channels, and redeploying all chaincodes using the v2.x lifecycle. The new +chaincode lifecycle provides a more flexible and robust governance model +for chaincodes. For more details see the +[documentation for enabling the new lifecycle](https://hyperledger-fabric.readthedocs.io/en/release-2.2/enable_cc_lifecycle.html). diff --git a/release_notes/v2.3.2.md b/release_notes/v2.3.2.md new file mode 100644 index 00000000000..52f5ba3959c --- /dev/null +++ b/release_notes/v2.3.2.md @@ -0,0 +1,121 @@ +v2.3.2 Release Notes - April 23, 2021 +===================================== + +Improvements +------------ + +**peer and orderer - Implement legacy name constraints verification for Go 1.15** + +These changes reproduce the Go 1.14 name constraint verification in the MSP. +Without these changes, certificate chains that would fail verification in Go 1.14 would +successfully validate in Go 1.15 due to the change mentioned in the [Go 1.15 release notes](https://golang.org/doc/go1.15#commonname). +Specifically, if a signing certificate contains a name constraint, the leaf certificate +does not include SAN extensions, and the leaf's common name looks like a host name, +then the additional verification is performed to ensure deterministic behavior relative +to prior Fabric releases. + + +Fixes +----- + +**FAB-18427: orderer - Report correct reason of stream abort in orderer cluster** + +This commit fixes a bug that makes the cluster communication infrastructure +always report an "aborted" reason after a stream terminates. + +**FAB-18424: peer - Ledger snapshot request submission with special value "blockNumber 0"** + +If a ledger snapshot request is submitted with the special value "blockNumber 0", peer is expected to translate the request to last committed block. +This patch fixes the issue where, it may happen sometimes that the request is translated to block number 1 instead of last committed block. +This leads to the situation where no snapshot gets generated, including any future snapshot requests. +If you have ever used this special value, we encourage you to check the list of pending snapshots requests with `peer snapshot listpending`. +If you notice one or more pending requests that are for the the block numbers lower than the latest committed block, cancel such requests with `peer snapshot cancelrequest` to enable the further snapshot requests to be processed. + +**FAB-18304: peer and orderer - Fix leveldb manifest corruption** + +This fix updates the version of goleveldb. The prior version of goleveldb had a bug which +could cause manifest corruption in crash scenarios, especially in disk full crash scenarios. +With a corrupted goleveldb database, the peer or orderer would fail to start with error +"panic: Error opening leveldb: leveldb: manifest corrupted". + + +Dependencies +------------ +Fabric v2.3.2 has been tested with the following dependencies: +* Go 1.15.7 +* CouchDB v3.1.1 + + +Deprecations (existing) +----------------------- + +**FAB-15754: The 'Solo' consensus type is deprecated.** + +The 'Solo' consensus type has always been marked non-production and should be in +use only in test environments, however for compatibility it is still available, +but may be removed entirely in a future release. + +**FAB-16408: The 'Kafka' consensus type is deprecated.** + +The 'Raft' consensus type was introduced in v1.4.1 and has become the preferred +production consensus type. There is a documented and tested migration path from +Kafka to Raft, and existing users should migrate to the newer Raft consensus type. +For compatibility with existing deployments, Kafka is still supported, +but may be removed entirely in a future release. +Additionally, the fabric-kafka and fabric-zookeeper docker images are no longer updated, maintained, or published. + +**Fabric CouchDB image is deprecated** + +v2.2.0 added support for CouchDB 3.1.0 as the recommended and tested version of CouchDB. +If prior versions are utilized, a Warning will appear in peer log. +Note that CouchDB 3.1.0 requires that an admin username and password be set, +while this was optional in CouchDB v2.x. See the +[Fabric CouchDB documentation](https://hyperledger-fabric.readthedocs.io/en/v2.2.0/couchdb_as_state_database.html#couchdb-configuration) +for configuration details. +Also note that CouchDB 3.1.0 default max_document_size is reduced to 8MB. Set a higher value if needed in your environment. +Finally, the fabric-couchdb docker image will not be updated to v3.1.0 and will no longer be updated, maintained, or published. +Users can utilize the official CouchDB docker image maintained by the Apache CouchDB project instead. + +**FAB-7559: Support for specifying orderer endpoints at the global level in channel configuration is deprecated.** + +Utilize the new 'OrdererEndpoints' stanza within the channel configuration of an organization instead. +Configuring orderer endpoints at the organization level accommodates +scenarios where orderers are run by different organizations. Using +this configuration ensures that only the TLS CA certificates of that organization +are used for orderer communications, in contrast to the global channel level endpoints which +would cause an aggregation of all orderer TLS CA certificates across +all orderer organizations to be used for orderer communications. + +**FAB-17428: Support for configtxgen flag `--outputAnchorPeersUpdate` is deprecated.** + +The `--outputAnchorPeersUpdate` mechanism for updating anchor peers has always had +limitations (for instance, it only works the first time anchor peers are updated). +Instead, anchor peer updates should be performed through the normal config update flow. + +**FAB-15406: The fabric-tools docker image is deprecated** + +The fabric-tools docker image will not be published in future Fabric releases. +Instead of using the fabric-tools docker image, users should utilize the +published Fabric binaries. The Fabric binaries can be used to make client calls +to Fabric runtime components, regardless of where the Fabric components are running. + +**FAB-15317: Block dissemination via gossip is deprecated** + +Block dissemination via gossip is deprecated and may be removed in a future release. +Fabric peers can be configured to receive blocks directly from an ordering service +node by using the following configuration: +``` +peer.gossip.orgLeader: true +peer.gossip.useLeaderElection: false +peer.gossip.state.enabled: false +``` + +**FAB-15061: Legacy chaincode lifecycle is deprecated** + +The legacy chaincode lifecycle from v1.x is deprecated and will be removed +in a future release. To prepare for the eventual removal, utilize the v2.x +chaincode lifecycle instead, by enabling V2_0 application capability on all +channels, and redeploying all chaincodes using the v2.x lifecycle. The new +chaincode lifecycle provides a more flexible and robust governance model +for chaincodes. For more details see the +[documentation for enabling the new lifecycle](https://hyperledger-fabric.readthedocs.io/en/release-2.2/enable_cc_lifecycle.html). diff --git a/release_notes/v2.3.3.md b/release_notes/v2.3.3.md new file mode 100644 index 00000000000..f5164f66e7e --- /dev/null +++ b/release_notes/v2.3.3.md @@ -0,0 +1,127 @@ +v2.3.3 Release Notes - September 8, 2021 +======================================== + +Improvements +------------ + +**peer - New configuration option to disable gossip block forwarding** + +If all peers in an organization explicitly set `peer.deliveryclient.blockGossipEnabled` to false, +no peer in the organization gossips blocks to any other peer in that organization. +Use this setting when all peers pull blocks from ordering service. For more +information see deprecation announcement below: **FAB-15317: Block dissemination via gossip is deprecated**. + +**orderer - [FAB-18484] Return transaction forwarding result back to the client synchronously** + +With this improvement a Raft follower waits for the transaction to be forwarded to the Raft leader, +and returns the result (success or failure) back to the client accordingly. +Prior to this improvement, the Raft follower returned success after enqueueing it into the message queue, +which might have resulted in the transaction being dropped but a success being returned to the client. +Application clients should still monitor transaction commit events, since the Raft leader is not guaranteed +to deliver the transaction into a block in exception scenarios, but this improvement avoids +transactions from being dropped when there are connection issues between a Raft follower and Raft leader. + +**peer and orderer - Make gRPC maximum message size configurable** + +This improvement makes gRPC maximum message size configurable in peer and orderer. +Previously the maximum message size was hardcoded to 100 megabytes. +Since all nodes should be consistent it is recommended to keep +the default value of 100 megabytes for MaxRecvMsgSize & MaxSendMsgSize. +The value can be configured if needed however. +Configure in peer core.yaml with `peer.maxRecvMsgSize` and `peer.maxSendMsgSize`. +Configure in orderer orderer.yaml with `General.MaxRecvMsgSize` and `General.MaxSendMsgSize`. + + +Fixes +----- + +**orderer - [FAB-18521] Consenters' metadata is not replicated while OSN catches up with snapshot** + +If an ordering service node crashes while replicating blocks from another ordering service, +the consenters metadata will not be available and the ordering service node will not be +able to reconnect to the consenter set upon restart. This fix ensures that an ordering +service node that is replicating blocks persists the consenters metadata so that it +can reconnect to the consenter set. + + +Dependencies +------------ +Fabric v2.3.3 has been tested with the following dependencies: +* Go 1.16.7 +* CouchDB v3.1.1 + + +Deprecations (existing) +----------------------- + +**FAB-15754: The 'Solo' consensus type is deprecated.** + +The 'Solo' consensus type has always been marked non-production and should be in +use only in test environments, however for compatibility it is still available, +but may be removed entirely in a future release. + +**FAB-16408: The 'Kafka' consensus type is deprecated.** + +The 'Raft' consensus type was introduced in v1.4.1 and has become the preferred +production consensus type. There is a documented and tested migration path from +Kafka to Raft, and existing users should migrate to the newer Raft consensus type. +For compatibility with existing deployments, Kafka is still supported, +but may be removed entirely in a future release. +Additionally, the fabric-kafka and fabric-zookeeper docker images are no longer updated, maintained, or published. + +**Fabric CouchDB image is deprecated** + +v2.2.0 added support for CouchDB 3.1.0 as the recommended and tested version of CouchDB. +If prior versions are utilized, a Warning will appear in peer log. +Note that CouchDB 3.1.0 requires that an admin username and password be set, +while this was optional in CouchDB v2.x. See the +[Fabric CouchDB documentation](https://hyperledger-fabric.readthedocs.io/en/v2.2.0/couchdb_as_state_database.html#couchdb-configuration) +for configuration details. +Also note that CouchDB 3.1.0 default max_document_size is reduced to 8MB. Set a higher value if needed in your environment. +Finally, the fabric-couchdb docker image will not be updated to v3.1.0 and will no longer be updated, maintained, or published. +Users can utilize the official CouchDB docker image maintained by the Apache CouchDB project instead. + +**FAB-7559: Support for specifying orderer endpoints at the global level in channel configuration is deprecated.** + +Utilize the new 'OrdererEndpoints' stanza within the channel configuration of an organization instead. +Configuring orderer endpoints at the organization level accommodates +scenarios where orderers are run by different organizations. Using +this configuration ensures that only the TLS CA certificates of that organization +are used for orderer communications, in contrast to the global channel level endpoints which +would cause an aggregation of all orderer TLS CA certificates across +all orderer organizations to be used for orderer communications. + +**FAB-17428: Support for configtxgen flag `--outputAnchorPeersUpdate` is deprecated.** + +The `--outputAnchorPeersUpdate` mechanism for updating anchor peers has always had +limitations (for instance, it only works the first time anchor peers are updated). +Instead, anchor peer updates should be performed through the normal config update flow. + +**FAB-15406: The fabric-tools docker image is deprecated** + +The fabric-tools docker image will not be published in future Fabric releases. +Instead of using the fabric-tools docker image, users should utilize the +published Fabric binaries. The Fabric binaries can be used to make client calls +to Fabric runtime components, regardless of where the Fabric components are running. + +**FAB-15317: Block dissemination via gossip is deprecated** + +Block dissemination via gossip is deprecated and may be removed in a future release. +Fabric peers can be configured to receive blocks directly from an ordering service +node by using the following configuration: +``` +peer.gossip.orgLeader: true +peer.gossip.useLeaderElection: false +peer.gossip.state.enabled: false +peer.deliveryclient.blockGossipEnabled: false +``` + +**FAB-15061: Legacy chaincode lifecycle is deprecated** + +The legacy chaincode lifecycle from v1.x is deprecated and will be removed +in a future release. To prepare for the eventual removal, utilize the v2.x +chaincode lifecycle instead, by enabling V2_0 application capability on all +channels, and redeploying all chaincodes using the v2.x lifecycle. The new +chaincode lifecycle provides a more flexible and robust governance model +for chaincodes. For more details see the +[documentation for enabling the new lifecycle](https://hyperledger-fabric.readthedocs.io/en/release-2.2/enable_cc_lifecycle.html). diff --git a/sampleconfig/configtx.yaml b/sampleconfig/configtx.yaml index 86d64de17f9..b901c1b12c7 100644 --- a/sampleconfig/configtx.yaml +++ b/sampleconfig/configtx.yaml @@ -287,6 +287,8 @@ Orderer: &OrdererDefaults # this value will be rejected by ordering. If the "kafka" OrdererType is # selected, set 'message.max.bytes' and 'replica.fetch.max.bytes' on # the Kafka brokers to a value that is larger than this one. + # Based on networking configuration the value needs to be tuned. With + # default 100 MB node grpc msg configuration, 49 MB is the max can be set. AbsoluteMaxBytes: 10 MB # Preferred Max Bytes: The preferred maximum number of bytes allowed diff --git a/sampleconfig/core.yaml b/sampleconfig/core.yaml index 9cdc483e21e..d21a9f50740 100644 --- a/sampleconfig/core.yaml +++ b/sampleconfig/core.yaml @@ -230,7 +230,13 @@ peer: # indicates whenever state transfer is enabled or not # default value is true, i.e. state transfer is active # and takes care to sync up missing blocks allowing - # lagging peer to catch up to speed with rest network + # lagging peer to catch up to speed with rest network. + # Keep in mind that when peer.gossip.useLeaderElection is true + # and there are several peers in the organization, + # or peer.gossip.useLeaderElection is false alongside with + # peer.gossip.orgleader being false, the peer's ledger may lag behind + # the rest of the peers and will never catch up due to state transfer + # being disabled. enabled: false # checkInterval interval to check whether peer is lagging behind enough to # request blocks via state transfer from another peer. @@ -342,6 +348,11 @@ peer: # Delivery service related config deliveryclient: + # Enables this peer to disseminate blocks it pulled from the ordering service + # via gossip. + # Note that 'gossip.state.enabled' controls point to point block replication + # of blocks committed in the past. + blockGossipEnabled: true # It sets the total time the delivery service may spend in reconnection # attempts until its retry logic gives up and returns an error reconnectTotalTimeThreshold: 3600s @@ -457,6 +468,13 @@ peer: # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. deliverService: 2500 + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes GRPC server and client can receive + maxRecvMsgSize: 104857600 + # Max message size in bytes GRPC server and client can send + maxSendMsgSize: 104857600 + ############################################################################### # # VM section @@ -551,6 +569,7 @@ chaincode: # List of directories to treat as external builders and launchers for # chaincode. The external builder detection processing will iterate over the # builders in the order specified below. + # To override this property via env variable use CORE_CHAINCODE_EXTERNALBUILDERS: [{name: x, path: dir1}, {name: y, path: dir2}] externalBuilders: [] # - path: /path/to/directory # name: descriptive-builder-name diff --git a/sampleconfig/orderer.yaml b/sampleconfig/orderer.yaml index a1021bd7ca0..ffaa58d450d 100644 --- a/sampleconfig/orderer.yaml +++ b/sampleconfig/orderer.yaml @@ -50,6 +50,14 @@ General: # ServerTimeout is the duration the server waits for a response from # a client before closing the connection. ServerTimeout: 20s + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes the GRPC server and client can receive + MaxRecvMsgSize: 104857600 + # Max message size in bytes the GRPC server and client can send + MaxSendMsgSize: 104857600 + # Cluster settings for ordering service nodes that communicate with other ordering service nodes # such as Raft based ordering service. Cluster: diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh index ce30a4afca3..bba17ea6d41 100755 --- a/scripts/bootstrap.sh +++ b/scripts/bootstrap.sh @@ -6,9 +6,9 @@ # # if version not passed in, default to latest released version -VERSION=2.2.1 +VERSION=2.3.3 # if ca version not passed in, default to latest released version -CA_VERSION=1.4.9 +CA_VERSION=1.5.2 ARCH=$(echo "$(uname -s|tr '[:upper:]' '[:lower:]'|sed 's/mingw64_nt.*/windows/')-$(uname -m | sed 's/x86_64/amd64/g')") MARCH=$(uname -m) @@ -21,8 +21,8 @@ printHelp() { echo "-s : bypass fabric-samples repo clone" echo "-b : bypass download of platform-specific binaries" echo - echo "e.g. bootstrap.sh 2.2.1 1.4.9 -s" - echo "will download docker images and binaries for Fabric v2.2.1 and Fabric CA v1.4.9" + echo "e.g. bootstrap.sh 2.3.3 1.5.2 -s" + echo "will download docker images and binaries for Fabric v2.3.3 and Fabric CA v1.5.2" } # dockerPull() pulls docker images from fabric and chaincode repositories diff --git a/scripts/generateHelpDocs.sh b/scripts/generateHelpDocs.sh index e2da4cf58a5..8aea5057a52 100755 --- a/scripts/generateHelpDocs.sh +++ b/scripts/generateHelpDocs.sh @@ -29,7 +29,7 @@ generateHelpText(){ ## $x \`\`\` -$($x --help 2>&1) +$($x --help 2>&1 | sed -E 's/[[:space:]]+$//g') \`\`\` EOF diff --git a/scripts/run-integration-tests.sh b/scripts/run-integration-tests.sh index 75a8db1f7db..3d1c978cb0d 100755 --- a/scripts/run-integration-tests.sh +++ b/scripts/run-integration-tests.sh @@ -14,11 +14,16 @@ fabric_dir="$(cd "$(dirname "$0")/.." && pwd)" cd "$fabric_dir" declare -a test_dirs -while IFS='' read -r line; do test_dirs+=("$line"); done < <( - go list -f '{{ if or (len .TestGoFiles | ne 0) (len .XTestGoFiles | ne 0) }}{{ println .Dir }}{{ end }}' ./... | \ - grep integration | \ - sed s,"${fabric_dir}",.,g -) +if [ $# -eq 0 ] + then + while IFS='' read -r line; do test_dirs+=("$line"); done < <( + go list -f '{{ if or (len .TestGoFiles | ne 0) (len .XTestGoFiles | ne 0) }}{{ println .Dir }}{{ end }}' ./... | \ + grep integration | \ + sed s,"${fabric_dir}",.,g + ) +else + for arg in "$@"; do test_dirs+=("./integration/$arg"); done +fi total_agents=${SYSTEM_TOTALJOBSINPHASE:-1} # standard VSTS variables available using parallel execution; total number of parallel jobs running agent_number=${SYSTEM_JOBPOSITIONINPHASE:-1} # current job position diff --git a/vagrant/golang.sh b/vagrant/golang.sh index ec2411a72e3..218e77c5fda 100644 --- a/vagrant/golang.sh +++ b/vagrant/golang.sh @@ -5,7 +5,7 @@ # SPDX-License-Identifier: Apache-2.0 GOROOT='/opt/go' -GO_VERSION=1.14.12 +GO_VERSION=1.16.7 # ---------------------------------------------------------------- # Install Golang diff --git a/vendor/github.com/BDLS-bft/bdls/.gitattributes b/vendor/github.com/BDLS-bft/bdls/.gitattributes new file mode 100644 index 00000000000..dfe0770424b --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/vendor/github.com/BDLS-bft/bdls/.gitignore b/vendor/github.com/BDLS-bft/bdls/.gitignore new file mode 100644 index 00000000000..2f44eb9dc75 --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/.gitignore @@ -0,0 +1,3 @@ + +bdls.test +cpu.out diff --git a/vendor/github.com/BDLS-bft/bdls/CONTRIBUTING.md b/vendor/github.com/BDLS-bft/bdls/CONTRIBUTING.md new file mode 100644 index 00000000000..06ed5f25bd2 --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/CONTRIBUTING.md @@ -0,0 +1,17 @@ +We welcome contributions to this project, please find the bi-weekly lab meeting. +``` +Next meeting: Thursday, December-08, 2022 +9:30am to 10:00am Eastern Time +Repeats: +Every 2 weeks on Thursday +Location: +https://zoom.us/my/hyperledger.community.3?pwd=UE90WHhEaHRqOGEyMkV3cldKa2d2dz09 +``` + +The lab runs bi-weekly open community calls and you are welcome to join to learn more about what the lab is, how to use it and how to get involved and help out. +You can join the calls every other Thursday at 9:30 AM Eastern at: + + +You can also talk with the developers of the lab in the **`#bdls`** channel on the Hyperledger Discord server at: + +https://chat.hyperledger.org/ diff --git a/vendor/github.com/BDLS-bft/bdls/LICENSE b/vendor/github.com/BDLS-bft/bdls/LICENSE new file mode 100644 index 00000000000..da6debcd2c7 --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/BDLS-bft/bdls/MAINTAINERS.md b/vendor/github.com/BDLS-bft/bdls/MAINTAINERS.md new file mode 100644 index 00000000000..fb269b9db67 --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/MAINTAINERS.md @@ -0,0 +1,12 @@ +## Maintainers + +### Active Maintainers +| name | Github | Discord | +|-------------------|-----------|----------------| +| Yongge Wang | [@yonggewang](https://github.com/yonggewang) | | +| Ahmed Al Salih | [@ahmed82](https://github.com/ahmed82) | | + +### Emeritus Maintainers +| name | Github | Discord | +|-------------------|-----------|----------------| +| Li Fu | [@xtaci](https://github.com/xtaci) | | diff --git a/vendor/github.com/BDLS-bft/bdls/README.md b/vendor/github.com/BDLS-bft/bdls/README.md new file mode 100644 index 00000000000..c054b7f48ef --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/README.md @@ -0,0 +1,111 @@ +# BDLS +Efficient BFT in partial synchronous networks + +[![GoDoc][1]][2] [![License][3]][4] [![Build Status][5]][6] [![Go Report Card][7]][8] [![Coverage Statusd][9]][10] [![Sourcegraph][11]][12] + +[1]: https://godoc.org/github.com/Sperax/bdls?status.svg +[2]: https://godoc.org/github.com/Sperax/bdls +[3]: https://img.shields.io/badge/License-Apache_2.0-blue.svg +[4]: LICENSE +[5]: https://travis-ci.org/Sperax/bdls.svg?branch=master +[6]: https://travis-ci.org/Sperax/bdls +[7]: https://goreportcard.com/badge/github.com/Sperax/bdls?bdls +[8]: https://goreportcard.com/report/github.com/Sperax/bdls +[9]: https://codecov.io/gh/Sperax/bdls/branch/master/graph/badge.svg +[10]: https://codecov.io/gh/Sperax/bdls +[11]: https://sourcegraph.com/github.com/Sperax/bdls/-/badge.svg +[12]: https://sourcegraph.com/github.com/Sperax/bdls?badge + +# BDLS Consensus + +## Introduction + +BDLS is an innovative BFT consensus algorithm that features safety and liveness by +presenting a mathematically proven secure BFT protocol that is resilient in open networks such as +the Internet. BDLS overcomes many problems, such as the deadlock problem caused by unreliable +p2p/broadcast channels. These problems are all very relevant to existing realistic open +network scenarios, and are the focus of extensive work in improving Internet security, but it +is an area largely ignored by most in mainstream BFT protocol design. +(Paper: https://eprint.iacr.org/2019/1460.pdf or https://dl.acm.org/doi/abs/10.1145/3538227 or https://doi.org/10.1145/3538227 or https://www.doi.org/10.1007/978-3-030-91859-0_2 ) + +For this library, to make the runtime behavior of consensus algorithm predictable as function: +y = f(x, t), where 'x' is the message it received, and 't' is the time while being called, + then'y' is the deterministic status of consensus after 'x' and 't' applied to 'f', +it has been designed in a deterministic scheme, without parallel computing, networking, and +the correctness of program implementation can be proven with proper test cases. + +## Features + +1. Pure algorithm implementation in deterministic and predictable behavior, easily to be integrated into existing projects, refer to [DFA](https://en.wikipedia.org/wiki/Deterministic_finite_automaton) for more. +2. Well-tested on various platforms with complicated cases. +3. Auto back-off under heavy payload, guaranteed finalization(worst case gurantee). +4. Easy integratation into Blockchain & non-Blockchain consensus, like [WAL replication](https://en.wikipedia.org/wiki/Replication_(computing)#Database_replication) in database. +5. Builtin network emulation for various network latency with comprehensive statistics. + +## Documentation + +For complete documentation, see the associated [Godoc](https://pkg.go.dev/github.com/Sperax/bdls). + + +## Install BDLS on Ubuntu Server 20.04 + +``` +sudo apt-get update +sudo apt-get -y upgrade +sudo apt-get install autoconf automake libtool curl make g++ unzip +cd /tmp +wget https://go.dev/dl/go1.17.5.linux-amd64.tar.gz +sudo tar -xvf go1.17.5.linux-amd64.tar.gz +sudo mv go /usr/local +cd +echo 'export GOROOT=/usr/local/go' >> .profile +echo 'export GOPATH=$HOME/go' >> .profile +echo 'export PATH=$GOPATH/bin:$GOROOT/bin:$PATH' >> .profile +source ~/.profile +go version +go env +git clone https://github.com/hyperledger-labs/bdls.git +cd bdls/ +git checkout master +cd cmd/emucon/ +go build . +./emucon help genkeys +./emucon genkeys --count 4 + +[open four terminals to run four participants. if you log to remote Linux, +you may use tmux commands. In tmux, you can switch termian using "ctrl+b d" +and use "tmux attach -t 0" to enter the terminal. Use "tmux list-session" +to check the current active terminals] + + +./emucon run --id 0 --listen ":4680" +./emucon run --id 1 --listen ":4681" +./emucon run --id 2 --listen ":4682" +./emucon run --id 3 --listen ":4683" + +cd ../.. +go test -v -cpuprofile=cpu.out -memprofile=mem.out -timeout 2h +``` +## Regenerate go.mod and go.sum +``` +rm go.* +go mod init github.com/hyperledger-labs/bdls +go mod tidy +go mod vendor +``` + +See benchmark ourput at: [AMD-NORMAL.TXT](benchmarks/AMD-NORMAL.TXT) and [PI4-OVERLOAD.TXT](benchmarks/PI4-OVERLOAD.TXT) + +## Specification + +1. Consensus messages are specified in [message.proto](message.proto), users of this library can encapsulate this message in a carrier message, like gossip in TCP. +2. Consensus algorithm is **NOT** thread-safe, it **MUST** be protected by some synchronization mechanism, like `sync.Mutex` or `chan` + `goroutine`. + +## Usage + +1. A testing IPC peer -- [ipc_peer.go](ipc_peer.go) +2. A testing TCP node -- [TCP based Consensus Emualtor](cmd/emucon) + +## Status + +On-going diff --git a/vendor/github.com/BDLS-bft/bdls/config.go b/vendor/github.com/BDLS-bft/bdls/config.go new file mode 100644 index 00000000000..14167a4b0ba --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/config.go @@ -0,0 +1,72 @@ + +package bdls + +import ( + "crypto/ecdsa" + "time" +) + +const ( + // ConfigMinimumParticipants is the minimum number of participant allow in consensus protocol + ConfigMinimumParticipants = 4 +) + +// Config is to config the parameters of BDLS consensus protocol +type Config struct { + // the starting time point for consensus + Epoch time.Time + // CurrentHeight + CurrentHeight uint64 + // PrivateKey + PrivateKey *ecdsa.PrivateKey + // Consensus Group + Participants []Identity + // EnableCommitUnicast sets to true to enable message to be delivered via unicast + // if not(by default), message will be broadcasted + EnableCommitUnicast bool + + // StateCompare is a function from user to compare states, + // The result will be 0 if a==b, -1 if a < b, and +1 if a > b. + // Usually this will lead to block header comparsion in blockchain, or replication log in database, + // users should check fields in block header to make comparison. + StateCompare func(a State, b State) int + + // StateValidate is a function from user to validate the integrity of + // state data. + StateValidate func(State) bool + + // MessageValidator is an external validator to be called when a message inputs into ReceiveMessage + MessageValidator func(c *Consensus, m *Message, signed *SignedProto) bool + + // MessageOutCallback will be called if not nil before a message send out + MessageOutCallback func(m *Message, signed *SignedProto) + + // Identity derviation from ecdsa.PublicKey + // (optional). Default to DefaultPubKeyToIdentity + PubKeyToIdentity func(pubkey *ecdsa.PublicKey) (ret Identity) +} + +// VerifyConfig verifies the integrity of this config when creating new consensus object +func VerifyConfig(c *Config) error { + if c.Epoch.IsZero() { + return ErrConfigEpoch + } + + if c.StateCompare == nil { + return ErrConfigStateCompare + } + + if c.StateValidate == nil { + return ErrConfigStateValidate + } + + if c.PrivateKey == nil { + return ErrConfigPrivateKey + } + + if len(c.Participants) < ConfigMinimumParticipants { + return ErrConfigParticipants + } + + return nil +} diff --git a/vendor/github.com/BDLS-bft/bdls/consensus.go b/vendor/github.com/BDLS-bft/bdls/consensus.go new file mode 100644 index 00000000000..6f0f9af3bdd --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/consensus.go @@ -0,0 +1,1661 @@ +package bdls + +import ( + "bytes" + "container/list" + "crypto/ecdsa" + "crypto/elliptic" + "net" + "sort" + "time" + + //"fmt" + + + "github.com/BDLS-bft/bdls/crypto/blake2b" + + proto "github.com/gogo/protobuf/proto" +) + +const ( + // the current BDLS protocol version, + // version will be sent along with messages for protocol upgrading. + ProtocolVersion = 1 + // DefaultConsensusLatency is the default propagation latency setting for + // consensus protocol, user can adjust consensus object's latency setting + // via Consensus.SetLatency() + DefaultConsensusLatency = 300 * time.Millisecond + + // MaxConsensusLatency is the ceiling of latencies + MaxConsensusLatency = 10 * time.Second +) + +type ( + // State is the data to participant in consensus. This could be candidate + // blocks in blockchain systems + State []byte + // StateHash = H(State) + StateHash [blake2b.Size256]byte +) + +// defaultHash is the system default hash function +func defaultHash(s State) StateHash { return blake2b.Sum256(s) } + +type ( + // consensusStage defines the status of consensus automata + consensusStage byte +) + +// status definitions for consensus state machine +const ( + // stages are strictly ordered, do not change! + stageRoundChanging consensusStage = iota + stageLock + stageCommit + stageLockRelease +) + +type messageTuple struct { + StateHash StateHash // computed while adding + Message *Message // the decoded message + Signed *SignedProto // the encoded message with signature +} + +// a sorter for messageTuple slice +type tupleSorter struct { + tuples []messageTuple + by func(t1, t2 *messageTuple) bool +} + +// Len implements sort.Interface +func (s *tupleSorter) Len() int { return len(s.tuples) } + +// Swap implements sort.Interface +func (s *tupleSorter) Swap(i, j int) { s.tuples[i], s.tuples[j] = s.tuples[j], s.tuples[i] } + +// Less implements sort.Interface +func (s *tupleSorter) Less(i, j int) bool { return s.by(&s.tuples[i], &s.tuples[j]) } + +// consensusRound maintains exchanging messages in a round. +type consensusRound struct { + c *Consensus // the consensus object belongs to + Stage consensusStage // indicates current status in consensus automata + RoundNumber uint64 // round number + LockedState State // leader's locked state + LockedStateHash StateHash // hash of the leaders's locked state + RoundChangeSent bool // mark if the message of this round has sent + CommitSent bool // mark if this round has sent commit message once + + // NOTE: we MUST keep the original message, to re-marshal the message may + // result in different BITS LAYOUT, and different hash of course. + roundChanges []messageTuple // stores message tuples of this round + commits []messageTuple // stores message tuples of this round + + // track current max proposed state in , we don't have to compute this for + // a non-leader participant, or if there're no more than 2t+1 messages for leader. + MaxProposedState State + MaxProposedCount int +} + +// newConsensusRound creates a new round, and sets the round number +func newConsensusRound(round uint64, c *Consensus) *consensusRound { + r := new(consensusRound) + r.RoundNumber = round + r.c = c + return r +} + +// AddRoundChange adds a message to this round, and +// checks to accept only one message from one participant, +// to prevent multiple proposals attack. +func (r *consensusRound) AddRoundChange(sp *SignedProto, m *Message) bool { + for k := range r.roundChanges { + if r.roundChanges[k].Signed.X == sp.X && r.roundChanges[k].Signed.Y == sp.Y { + return false + } + } + + r.roundChanges = append(r.roundChanges, messageTuple{StateHash: r.c.stateHash(m.State), Message: m, Signed: sp}) + return true +} + +// FindRoundChange will try to find a from a given participant, +// and returns index, -1 if not found +func (r *consensusRound) FindRoundChange(X PubKeyAxis, Y PubKeyAxis) int { + for k := range r.roundChanges { + if r.roundChanges[k].Signed.X == X && r.roundChanges[k].Signed.Y == Y { + return k + } + } + return -1 +} + +// RemoveRoundChange removes the given message at idx +func (r *consensusRound) RemoveRoundChange(idx int) { + // swap to the end and shrink slice + n := len(r.roundChanges) - 1 + r.roundChanges[idx], r.roundChanges[n] = r.roundChanges[n], r.roundChanges[idx] + r.roundChanges[n] = messageTuple{} // set to nil to avoid memory leak + r.roundChanges = r.roundChanges[:n] +} + +// NumRoundChanges returns count of messages. +func (r *consensusRound) NumRoundChanges() int { return len(r.roundChanges) } + +// SignedRoundChanges converts and returns []*SignedProto(as slice) +func (r *consensusRound) SignedRoundChanges() []*SignedProto { + proof := make([]*SignedProto, 0, len(r.roundChanges)) + for k := range r.roundChanges { + proof = append(proof, r.roundChanges[k].Signed) + } + return proof +} + +// RoundChangeStates returns all non-nil state in exchanging round change message as slice +func (r *consensusRound) RoundChangeStates() []State { + states := make([]State, 0, len(r.roundChanges)) + for k := range r.roundChanges { + if r.roundChanges[k].Message.State != nil { + states = append(states, r.roundChanges[k].Message.State) + } + } + return states +} + +// AddCommit adds decoded messages along with its original signed message unchanged, +// also, messages will be de-duplicated to prevent multiple proposals attack. +func (r *consensusRound) AddCommit(sp *SignedProto, m *Message) bool { + for k := range r.commits { + if r.commits[k].Signed.X == sp.X && r.commits[k].Signed.Y == sp.Y { + return false + } + } + r.commits = append(r.commits, messageTuple{StateHash: r.c.stateHash(m.State), Message: m, Signed: sp}) + return true +} + +// NumCommitted counts messages which points to what the leader has locked. +func (r *consensusRound) NumCommitted() int { + var count int + for k := range r.commits { + if r.commits[k].StateHash == r.LockedStateHash { + count++ + } + } + return count +} + +// SignedCommits converts and returns []*SignedProto +func (r *consensusRound) SignedCommits() []*SignedProto { + proof := make([]*SignedProto, 0, len(r.commits)) + for k := range r.commits { + proof = append(proof, r.commits[k].Signed) + } + return proof +} + +// GetMaxProposed finds the most agreed-on non-nil state, if these is any. +func (r *consensusRound) GetMaxProposed() (s State, count int) { + if len(r.roundChanges) == 0 { + return nil, 0 + } + + // sort by hash, to group identical hashes together + // O(n*logn) + sorter := tupleSorter{ + tuples: r.roundChanges, + // sort by it's hash lexicographically + by: func(t1, t2 *messageTuple) bool { + return bytes.Compare(t1.StateHash[:], t2.StateHash[:]) < 0 + }, + } + sort.Sort(&sorter) + + // find the maximum occurred hash + // O(n) + maxCount := 1 + maxState := r.roundChanges[0] + curCount := 1 + + n := len(r.roundChanges) + for i := 1; i < n; i++ { + if r.roundChanges[i].StateHash == r.roundChanges[i-1].StateHash { + curCount++ + } else { + if curCount > maxCount { + maxCount = curCount + maxState = r.roundChanges[i-1] + } + curCount = 1 + } + } + + // if the last hash is the maximum occurred + if curCount > maxCount { + maxCount = curCount + maxState = r.roundChanges[n-1] + } + + return maxState.Message.State, maxCount +} + +// Consensus implements a deterministic BDLS consensus protocol. +// +// It has no internal clocking or IO, and no parallel processing. +// The runtime behavior is predictable and deterministic. +// Users should write their own timing and IO function to feed in +// messages and ticks to trigger timeouts. +type Consensus struct { + latestState State // latest confirmed state of current height + latestHeight uint64 // latest confirmed height + latestRound uint64 // latest confirmed round + latestProof *SignedProto // latest message to prove the state + + unconfirmed []State // data awaiting to be confirmed at next height + + rounds list.List // all rounds at next height(consensus round in progress) + currentRound *consensusRound // current round which has collected >=2t+1 + + // timeouts in different stage + rcTimeout time.Time // roundchange status timeout: Delta_0 + lockTimeout time.Time // lock status timeout: Delta_1 + commitTimeout time.Time // commit status timeout: Delta_2 + lockReleaseTimeout time.Time // lock-release status timeout: Delta_3 + + // locked states, along with its signatures and hashes in tuple + locks []messageTuple + + // the StateCompare function from config + stateCompare func(State, State) int + // the StateValidate function from config + stateValidate func(State) bool + // message in callback + messageValidator func(c *Consensus, m *Message, sp *SignedProto) bool + // message out callback + messageOutCallback func(m *Message, sp *SignedProto) + // public key to identity function + pubKeyToIdentity func(pubkey *ecdsa.PublicKey) Identity + + // the StateHash function to identify a state + stateHash func(State) StateHash + + // private key + privateKey *ecdsa.PrivateKey + // my publickey coodinate + identity Identity + // curve retrieved from private key + curve elliptic.Curve + + // transmission delay + latency time.Duration + + // all connected peers + peers []PeerInterface + + // participants is the consensus group, current leader is r % quorum + participants []Identity + + // count num of individual identities + numIdentities int //[YONGGE WANG' comments:] make sure this is synchronized with []Identity + + // set to true to enable message unicast + enableCommitUnicast bool + + // NOTE: fixed leader for testing purpose + fixedLeader *Identity + + // broadcasting messages being sent to myself + loopback [][]byte + + // the last message which caused round change + lastRoundChangeProof []*SignedProto +} + +// NewConsensus creates a BDLS consensus object to participant in consensus procedure, +// the consensus object returned is data in memory without goroutines or other +// non-deterministic objects, and errors will be returned if there is problem, with +// the given config. +func NewConsensus(config *Config) (*Consensus, error) { + err := VerifyConfig(config) + if err != nil { + return nil, err + } + + c := new(Consensus) + c.init(config) + return c, nil +} + +// init consensus with config +func (c *Consensus) init(config *Config) { + // setting current state & height + c.latestHeight = config.CurrentHeight + c.participants = config.Participants + c.stateCompare = config.StateCompare + c.stateValidate = config.StateValidate + c.messageValidator = config.MessageValidator + c.messageOutCallback = config.MessageOutCallback + c.privateKey = config.PrivateKey + c.pubKeyToIdentity = config.PubKeyToIdentity + c.enableCommitUnicast = config.EnableCommitUnicast + + // if config has not set hash function, use the default + if c.stateHash == nil { + c.stateHash = defaultHash + } + // if config has not set public key to identity function, use the default + if c.pubKeyToIdentity == nil { + c.pubKeyToIdentity = DefaultPubKeyToIdentity + } + c.identity = c.pubKeyToIdentity(&c.privateKey.PublicKey) + c.curve = c.privateKey.Curve + + // initial default parameters settings + c.latency = DefaultConsensusLatency + + // and initiated the first proposal + c.switchRound(0) + c.currentRound.Stage = stageRoundChanging + c.broadcastRoundChange() + // set rcTimeout to lockTimeout + c.rcTimeout = config.Epoch.Add(c.roundchangeDuration(0)) + + // count number of individual identites + ids := make(map[Identity]bool) + for _, id := range c.participants { + ids[id] = true + } + c.numIdentities = len(ids) +} + +// calculates roundchangeDuration +func (c *Consensus) roundchangeDuration(round uint64) time.Duration { + d := 2 * c.latency * (1 << round) + if d > MaxConsensusLatency { + d = MaxConsensusLatency + } + return d +} + +// calculates collectDuration +func (c *Consensus) collectDuration(round uint64) time.Duration { + d := 2 * c.latency * (1 << round) + if d > MaxConsensusLatency { + d = MaxConsensusLatency + } + return d +} + +// calculates lockDuration +func (c *Consensus) lockDuration(round uint64) time.Duration { + d := 4 * c.latency * (1 << round) + if d > MaxConsensusLatency { + d = MaxConsensusLatency + } + return d +} + +// calculates commitDuration +func (c *Consensus) commitDuration(round uint64) time.Duration { + d := 2 * c.latency * (1 << round) + if d > MaxConsensusLatency { + d = MaxConsensusLatency + } + return d +} + +// calculates lockReleaseDuration +func (c *Consensus) lockReleaseDuration(round uint64) time.Duration { + d := 2 * c.latency * (1 << round) + if d > MaxConsensusLatency { + d = MaxConsensusLatency + } + return d +} + +// maximalLocked finds the maximum locked data in this round, +// with regard to StateCompare function in config. +func (c *Consensus) maximalLocked() State { + if len(c.locks) > 0 { + maxState := c.locks[0].Message.State + for i := 1; i < len(c.locks); i++ { + if c.stateCompare(maxState, c.locks[i].Message.State) < 0 { + maxState = c.locks[i].Message.State + } + } + return maxState + } + return nil +} + +// maximalUnconfirmed finds the maximal unconfirmed data with, +// regard to the StateCompare function in config. +func (c *Consensus) maximalUnconfirmed() State { + if len(c.unconfirmed) > 0 { + maxState := c.unconfirmed[0] + for i := 1; i < len(c.unconfirmed); i++ { + if c.stateCompare(maxState, c.unconfirmed[i]) < 0 { + maxState = c.unconfirmed[i] + } + } + return maxState + } + return nil +} + +// verifyMessage verifies message signature against it's & , +// and also checks if the signer is a valid participant. +// returns it's decoded 'Message' object if signature has proved authentic. +// returns nil and error if message has not been correctly signed or from an unknown participant. +func (c *Consensus) verifyMessage(signed *SignedProto) (*Message, error) { + if signed == nil { + return nil, ErrMessageIsEmpty + } + + // check signer's identity, all participants have proven + // public key + knownParticipants := false + coord := c.pubKeyToIdentity(signed.PublicKey(c.curve)) + for k := range c.participants { + if coord == c.participants[k] { + knownParticipants = true + } + } + + if !knownParticipants { + return nil, ErrMessageUnknownParticipant + } + + /* + // public key validation + p := defaultCurve.Params().P + x := new(big.Int).SetBytes(signed.X[:]) + y := new(big.Int).SetBytes(signed.Y[:]) + if x.Cmp(p) >= 0 || y.Cmp(p) >= 0 { + return nil, ErrMessageSignature + } + if !defaultCurve.IsOnCurve(x, y) { + return nil, ErrMessageSignature + } + */ + + // as public key is proven , we don't have to verify the public key + if !signed.Verify(c.curve) { + return nil, ErrMessageSignature + } + + // decode message + m := new(Message) + err := proto.Unmarshal(signed.Message, m) + if err != nil { + return nil, err + } + return m, nil +} + +// verify message +func (c *Consensus) verifyRoundChangeMessage(m *Message) error { + // check message height + if m.Height != c.latestHeight+1 { + return ErrRoundChangeHeightMismatch + } + + // check round in protocol + if m.Round < c.currentRound.RoundNumber { + return ErrRoundChangeRoundLower + } + + // state data validation for non-null + if m.State != nil { + if !c.stateValidate(m.State) { + return ErrRoundChangeStateValidation + } + } + + return nil +} + +// verifyLockMessage verifies proofs from messages, +// a lock message must contain at least 2t+1 individual +// messages on B' +func (c *Consensus) verifyLockMessage(m *Message, signed *SignedProto) error { + // check message height + if m.Height != c.latestHeight+1 { + return ErrLockHeightMismatch + } + + // check round in protocol + if m.Round < c.currentRound.RoundNumber { + return ErrLockRoundLower + } + + // a message from leader MUST include data along with the message + if m.State == nil { + return ErrLockEmptyState + } + + // state data validation + if !c.stateValidate(m.State) { + return ErrLockStateValidation + } + + // make sure this message has been signed by the leader + leaderKey := c.roundLeader(m.Round) + if c.pubKeyToIdentity(signed.PublicKey(c.curve)) != leaderKey { + return ErrLockNotSignedByLeader + } + + // validate proofs enclosed in the message one by one + rcs := make(map[Identity]State) + for _, proof := range m.Proof { + // first we need to verify the signature,and identity of this proof + mProof, err := c.verifyMessage(proof) + if err != nil { + if err == ErrMessageUnknownParticipant { + return ErrLockProofUnknownParticipant + } + return err + } + + // then we need to check the message type + if mProof.Type != MessageType_RoundChange { + return ErrLockProofTypeMismatch + } + + // and we also need to check the height & round field, + // all messages must be in the same round as the lock message + if mProof.Height != m.Height { + return ErrLockProofHeightMismatch + } + + if mProof.Round != m.Round { + return ErrLockProofRoundMismatch + } + + // state data validation in proofs + if mProof.State != nil { + if !c.stateValidate(mProof.State) { + return ErrLockProofStateValidation + } + } + + // use map to guarantee we will only accept at most 1 message from one + // individual participant + rcs[c.pubKeyToIdentity(proof.PublicKey(c.curve))] = mProof.State + } + + // count individual proofs to B', which has already guaranteed to be the maximal one. + var numValidateProofs int + mHash := c.stateHash(m.State) + for _, v := range rcs { + if c.stateHash(v) == mHash { // B' + numValidateProofs++ + } + } + + // check if valid proofs count is less that 2*t+1 + if numValidateProofs < 2*c.t()+1 { + return ErrLockProofInsufficient + } + return nil +} + +// verifyLockReleaseMessage will verify LockRelease field in a messages, +// returns the embedded message if valid +func (c *Consensus) verifyLockReleaseMessage(signed *SignedProto) (*Message, error) { + // not in lock release status, omit this message + if c.currentRound.Stage != stageLockRelease { + return nil, ErrLockReleaseStatus + } + + // verify and decode the embedded lock message + lockmsg, err := c.verifyMessage(signed) + if err != nil { + return nil, err + } + + // recursively verify proofs in lock message + err = c.verifyLockMessage(lockmsg, signed) + if err != nil { + return nil, err + } + return lockmsg, nil +} + +// verifySelectMessage verifies proofs from message MUST contain at least 2t+1 individual messages, but +// proofs from + if m.State != nil { + if !c.stateValidate(m.State) { + return ErrSelectStateValidation + } + } + + // make sure this message has been signed by the leader + leaderKey := c.roundLeader(m.Round) + if c.pubKeyToIdentity(signed.PublicKey(c.curve)) != leaderKey { + return ErrSelectNotSignedByLeader + } + + rcs := make(map[Identity]State) + for _, proof := range m.Proof { + mProof, err := c.verifyMessage(proof) + if err != nil { + if err == ErrMessageUnknownParticipant { + return ErrSelectProofUnknownParticipant + } + return err + } + + if mProof.Type != MessageType_RoundChange { + return ErrSelectProofTypeMismatch + } + + if mProof.Height != m.Height { + return ErrSelectProofHeightMismatch + } + + if mProof.Round != m.Round { + return ErrSelectProofRoundMismatch + } + + // state data validation in proofs + if mProof.State != nil { + if !c.stateValidate(mProof.State) { + return ErrSelectProofStateValidation + } + } + + // we also need to check the B'' selected by leader is the maximal one, + // if data has been proposed. + if mProof.State != nil && m.State != nil { + if c.stateCompare(m.State, mProof.State) < 0 { + return ErrSelectProofNotTheMaximal + } + } + + // we also stores B'' == NULL for counting + rcs[c.pubKeyToIdentity(proof.PublicKey(c.curve))] = mProof.State + } + + // check we have at least 2*t+1 proof + if len(rcs) < 2*c.t()+1 { + return ErrSelectProofInsufficient + } + + // count maximum proofs with B' != NULL with identical data hash, + // to prevent leader cheating on select. + dataProposals := make(map[StateHash]int) + for _, data := range rcs { + if data != nil { + dataProposals[c.stateHash(data)]++ + } + } + + // if m.State == NULL, but there are non-NULL proofs, + // the leader may be cheating + if m.State == nil && len(dataProposals) > 0 { + return ErrSelectStateMismatch + } + + // find the highest proposed B'(not NULL) + var maxProposed int + for _, count := range dataProposals { + if count > maxProposed { + maxProposed = count + } + } + + // if these are more than 2*t+1 valid proofs to B', + // this also suggests that the leader may cheat. + if maxProposed >= 2*c.t()+1 { + return ErrSelectProofExceeded + } + + return nil +} + +// verifyCommitMessage will check if this message is acceptable to consensus +func (c *Consensus) verifyCommitMessage(m *Message) error { + // the leader has to be in COMMIT status to process this message + if c.currentRound.Stage != stageCommit { + return ErrCommitStatus + } + + // a message from participants MUST includes data along with the message + if m.State == nil { + return ErrCommitEmptyState + } + + // state data validation + if !c.stateValidate(m.State) { + return ErrCommitStateValidation + } + + // check height + if m.Height != c.latestHeight+1 { + return ErrCommitHeightMismatch + } + + // only accept commits to current round + if c.currentRound.RoundNumber != m.Round { + return ErrCommitRoundMismatch + } + + // check state match + if c.stateHash(m.State) != c.currentRound.LockedStateHash { + return ErrCommitStateMismatch + } + + return nil +} + +// ValidateDecideMessage validates a message for non-participants, +// the consensus core must be correctly initialized to validate. +// the targetState is to compare the target state enclosed in decide message +func (c *Consensus) ValidateDecideMessage(bts []byte, targetState []byte) error { + signed, err := DecodeSignedMessage(bts) + if err != nil { + return err + } + + return c.validateDecideMessage(signed, targetState) +} + +// DecodeSignedMessage decodes a binary representation of signed consensus message. +func DecodeSignedMessage(bts []byte) (*SignedProto, error) { + signed := new(SignedProto) + err := proto.Unmarshal(bts, signed) + if err != nil { + return nil, err + } + return signed, nil +} + +// DecodeMessage decodes a binary representation of consensus message. +func DecodeMessage(bts []byte) (*Message, error) { + msg := new(Message) + err := proto.Unmarshal(bts, msg) + if err != nil { + return nil, err + } + return msg, nil +} + +// validateDecideMessage validates a decoded message for non-participants, +// the consensus core must be correctly initialized to validate. +func (c *Consensus) validateDecideMessage(signed *SignedProto, targetState []byte) error { + // check message version + if signed.Version != ProtocolVersion { + return ErrMessageVersion + } + + // check message signature & qualifications + m, err := c.verifyMessage(signed) + if err != nil { + return err + } + + // compare state + if !bytes.Equal(m.State, targetState) { + return ErrMismatchedTargetState + } + + // verify decide message + if m.Type == MessageType_Decide { + err := c.verifyDecideMessage(m, signed) + if err != nil { + return err + } + return nil + } + return ErrMessageUnknownMessageType +} + +// verifyDecideMessage verifies proofs from message, which MUST +// contain at least 2t+1 individual messages to B'. +func (c *Consensus) verifyDecideMessage(m *Message, signed *SignedProto) error { + // a message from leader MUST include data along with the message + if m.State == nil { + return ErrDecideEmptyState + } + + // state data validation + if !c.stateValidate(m.State) { + return ErrDecideStateValidation + } + + // check height + if m.Height <= c.latestHeight { + return ErrDecideHeightLower + } + + // make sure this message has been signed by the leader + leaderKey := c.roundLeader(m.Round) + if c.pubKeyToIdentity(signed.PublicKey(c.curve)) != leaderKey { + return ErrDecideNotSignedByLeader + } + + commits := make(map[Identity]State) + for _, proof := range m.Proof { + mProof, err := c.verifyMessage(proof) + if err != nil { + if err == ErrMessageUnknownParticipant { + return ErrDecideProofUnknownParticipant + } + return err + } + + if mProof.Type != MessageType_Commit { + return ErrDecideProofTypeMismatch + } + + if mProof.Height != m.Height { + return ErrDecideProofHeightMismatch + } + + if mProof.Round != m.Round { + return ErrDecideProofRoundMismatch + } + + if !c.stateValidate(mProof.State) { + return ErrDecideProofStateValidation + } + + // state data validation in proofs + if mProof.State != nil { + if !c.stateValidate(mProof.State) { + return ErrSelectProofStateValidation + } + } + + commits[c.pubKeyToIdentity(proof.PublicKey(c.curve))] = mProof.State + } + + // count proofs to m.State + var numValidateProofs int + mHash := c.stateHash(m.State) + for _, v := range commits { + if c.stateHash(v) == mHash { + numValidateProofs++ + } + } + + // check to see if the message has at least 2*t+1 valid proofs, + // if not, the leader may cheat. + if numValidateProofs < 2*c.t()+1 { + return ErrDecideProofInsufficient + } + return nil +} + +// broadcastRoundChange will broadcast messages on +// current round, taking the maximal B' from unconfirmed data. +func (c *Consensus) broadcastRoundChange() { + // if has sent in this round, + // then just ignore. But if we are in roundchanging state, + // we should send repeatedly, for boostrap process. + if c.currentRound.RoundChangeSent && c.currentRound.Stage != stageRoundChanging { + return + } + + // first we need to check if there is any locked data, + // locked data must be sent if there is any. + data := c.maximalLocked() + if data == nil { + // if there's none locked data, we pick the maximum unconfirmed data to propose + data = c.maximalUnconfirmed() + // if still null, return + if data == nil { + return + } + } + + var m Message + m.Type = MessageType_RoundChange + m.Height = c.latestHeight + 1 + m.Round = c.currentRound.RoundNumber + m.State = data + c.broadcast(&m) + c.currentRound.RoundChangeSent = true + //log.Println("broadcast:") +} + +// broadcastLock will broadcast messages on current round, +// the currentRound should have a chosen data in this round. +func (c *Consensus) broadcastLock() { + var m Message + m.Type = MessageType_Lock + m.Height = c.latestHeight + 1 + m.Round = c.currentRound.RoundNumber + m.State = c.currentRound.LockedState + m.Proof = c.currentRound.SignedRoundChanges() + c.broadcast(&m) + //log.Println("broadcast:") +} + +// broadcastLockRelease will broadcast messages, +func (c *Consensus) broadcastLockRelease(signed *SignedProto) { + var m Message + m.Type = MessageType_LockRelease + m.Height = c.latestHeight + 1 + m.Round = c.currentRound.RoundNumber + m.LockRelease = signed + c.broadcast(&m) + //log.Println("broadcast:") +} + +// broadcastSelect will broadcast a ", m.State) +} + +// broadcastDecide will broadcast a message by the leader, +// from current round with proofs. +func (c *Consensus) broadcastDecide() *SignedProto { + var m Message + m.Type = MessageType_Decide + m.Height = c.latestHeight + 1 + m.Round = c.currentRound.RoundNumber + m.State = c.currentRound.LockedState + m.Proof = c.currentRound.SignedCommits() + return c.broadcast(&m) + //log.Println("broadcast:") +} + +// broadcastResync will broadcast a message by the leader, +// from current round with proofs. +func (c *Consensus) broadcastResync() { + if c.lastRoundChangeProof == nil { + return + } + + var m Message + m.Type = MessageType_Resync + // we only care about messages in resync + m.Proof = c.lastRoundChangeProof + c.broadcast(&m) + //log.Println("broadcast:") +} + +// sendCommit will send a message by participants to the leader +// from received message. +func (c *Consensus) sendCommit(msgLock *Message) { + if c.currentRound.CommitSent { + return + } + + var m Message + m.Type = MessageType_Commit + m.Height = msgLock.Height // h + m.Round = msgLock.Round // r + m.State = msgLock.State // B'j + if c.enableCommitUnicast { + c.sendTo(&m, c.roundLeader(m.Round)) + } else { + c.broadcast(&m) + } + c.currentRound.CommitSent = true + //log.Println("send:") +} + +// broadcast signs the message with private key before broadcasting to all peers. +func (c *Consensus) broadcast(m *Message) *SignedProto { + // sign + sp := new(SignedProto) + sp.Version = ProtocolVersion + sp.Sign(m, c.privateKey) + + // message callback + if c.messageOutCallback != nil { + c.messageOutCallback(m, sp) + } + // protobuf marshalling + out, err := proto.Marshal(sp) + if err != nil { + panic(err) + } + + // send to peers one by one + for _, peer := range c.peers { + _ = peer.Send(out) + } + + // we also need to send this message to myself + c.loopback = append(c.loopback, out) + return sp +} + +// sendTo signs the message with private key before transmitting to the peer. +func (c *Consensus) sendTo(m *Message, leader Identity) { + // sign + sp := new(SignedProto) + sp.Version = ProtocolVersion + sp.Sign(m, c.privateKey) + + // message callback + if c.messageOutCallback != nil { + c.messageOutCallback(m, sp) + } + + // protobuf marshalling + out, err := proto.Marshal(sp) + if err != nil { + panic(err) + } + + // we need to send this message to myself (via loopback) if i'm the leader + if leader == c.identity { + c.loopback = append(c.loopback, out) + return + } + + // otherwise, find and transmit to the leader + for _, peer := range c.peers { + if pk := peer.GetPublicKey(); pk != nil { + coord := c.pubKeyToIdentity(pk) + if coord == leader { + // we do not return here to avoid missing re-connected peer. + peer.Send(out) + } + } + } +} + +// propagate broadcasts signed message UNCHANGED to peers. +func (c *Consensus) propagate(bts []byte) { + // send to peers one by one + for _, peer := range c.peers { + _ = peer.Send(bts) + } +} + +// getRound returns the consensus round with given idx, create one if not exists +// if purgeLower has set, all lower rounds will be cleared +func (c *Consensus) getRound(idx uint64, purgeLower bool) *consensusRound { + var next *list.Element + for elem := c.rounds.Front(); elem != nil; elem = next { + next = elem.Next() + r := elem.Value.(*consensusRound) + + if r.RoundNumber < idx { // lower round + // if remove flag has set, remove this round safely, + // usually used by switchRound + if purgeLower { + c.rounds.Remove(elem) + } + continue + } else if idx < r.RoundNumber { // higher round + // insert a new round entry before this round + // to make sure the list is ordered + newr := newConsensusRound(idx, c) + c.rounds.InsertBefore(newr, elem) + return newr + } else if r.RoundNumber == idx { // found entry + return r + } + } + + // looped to the end, we create and push back + newr := newConsensusRound(idx, c) + c.rounds.PushBack(newr) + return newr +} + +// lockRelease updates locks while entering lock-release status +// and will broadcast its max B' if there is any. +func (c *Consensus) lockRelease() { + // only keep the locked B' with the max round number + // while switching to lock-release status + if len(c.locks) > 0 { + max := c.locks[0] + for i := 1; i < len(c.locks); i++ { + if max.Message.Round < c.locks[i].Message.Round { + max = c.locks[i] + } + } + c.locks = []messageTuple{max} + c.broadcastLockRelease(max.Signed) + } +} + +// switchRound sets currentRound to the given idx, and creates new a consensusRound +// if it's not been initialized. +// and all lower rounds will be cleared while switching. +func (c *Consensus) switchRound(round uint64) { c.currentRound = c.getRound(round, true) } + +// roundLeader returns leader's identity for a given round +func (c *Consensus) roundLeader(round uint64) Identity { + // NOTE: fixed leader is for testing + if c.fixedLeader != nil { + return *c.fixedLeader + } + return c.participants[int(round)%len(c.participants)] +} + +// heightSync changes current height to the given height with state +// resets all fields to this new height. +func (c *Consensus) heightSync(height uint64, round uint64, s State, now time.Time) { + c.latestHeight = height // set height + c.latestRound = round // set round + c.latestState = s // set state + + c.currentRound = nil // clean current round pointer + c.lastRoundChangeProof = nil // clean round change proof + c.rounds.Init() // clean all round + c.locks = nil // clean locks + c.unconfirmed = nil // clean all unconfirmed states from previous heights + c.switchRound(0) // start new round at new height + c.currentRound.Stage = stageRoundChanging +} + +// t calculates (n-1)/3 +func (c *Consensus) t() int { return (c.numIdentities - 1) / 3 } + +// Propose adds a new state to unconfirmed queue to particpate in +// consensus at next height. +func (c *Consensus) Propose(s State) { + if s == nil { + return + } + + sHash := c.stateHash(s) + for k := range c.unconfirmed { + if c.stateHash(c.unconfirmed[k]) == sHash { + return + } + } + c.unconfirmed = append(c.unconfirmed, s) +} + +// ReceiveMessage processes incoming consensus messages, and returns error +// if message cannot be processed for some reason. +func (c *Consensus) ReceiveMessage(bts []byte, now time.Time) (err error) { + // messages broadcasted to myself may be queued recursively, and + // we only process these messages in defer to avoid side effects + // while processing. + defer func() { + for len(c.loopback) > 0 { + bts := c.loopback[0] + c.loopback = c.loopback[1:] + // NOTE: message directed to myself ignores error. + _ = c.receiveMessage(bts, now) + } + }() + + return c.receiveMessage(bts, now) +} + +func (c *Consensus) receiveMessage(bts []byte, now time.Time) error { + // unmarshal signed message + signed := new(SignedProto) + err := proto.Unmarshal(bts, signed) + if err != nil { + return err + } + + // check message version + if signed.Version != ProtocolVersion { + return ErrMessageVersion + } + + // check message signature & qualifications + m, err := c.verifyMessage(signed) + if err != nil { + return err + } + + // callback for incoming message + if c.messageValidator != nil { + if !c.messageValidator(c, m, signed) { + return ErrMessageValidator + } + } + + // message switch + switch m.Type { + case MessageType_Nop: + // nop does nothing + return nil + case MessageType_RoundChange: + err := c.verifyRoundChangeMessage(m) + if err != nil { + return err + } + + // for message, we need to find in each round + // to check if this sender has already sent + // we only keep the message from the max round. + // NOTE: we don't touch current round to prevent removing + // valid proofs. + // NOTE: the total messages are bounded to max 2*participants + // at any time, so the loop has O(n) time complexity + var next *list.Element + for elem := c.rounds.Front(); elem != nil; elem = next { + next = elem.Next() + cr := elem.Value.(*consensusRound) + if idx := cr.FindRoundChange(signed.X, signed.Y); idx != -1 { // located! + if m.Round == c.currentRound.RoundNumber { // don't remove now! + continue + } else if cr.RoundNumber > m.Round { + // existing message is higher than incoming message, + // just ignore. + return nil + } else if cr.RoundNumber < m.Round { + // existing message is lower than incoming message, + // remove the existing message from this round. + cr.RemoveRoundChange(idx) + // if no message remained in this round, release + // the round resources too, to prevent OOM attack + if cr.NumRoundChanges() == 0 { + c.rounds.Remove(elem) + } + } + } + } + + // locate to round m.Round. + // NOTE: getRound must not be called before previous checks done + // in order to prevent OOM attack by creating round objects. + round := c.getRound(m.Round, false) + // as we cleared all lower rounds message, we handle the message + // at round m.Round. if this message is not duplicated in m.Round, + // round records message along with its signed message + // to provide proofs in the future. + if round.AddRoundChange(signed, m) { + // During any time of the protocol, if a the Pacemaker of Pj (including Pi) + // receives at least 2t + 1 round-change message (including round-change + // message from himself) for round r (which is larger than its current round + // status), it enters lock status of round r + // + // NOTE: m.Round lower than currentRound.RoundNumber has been tested by + // verifyRoundChangeMessage + // NOTE: lock stage can only be entered once for a single round, malicious + // participant can keep on broadcasting increasing to everyone, + // and old messages will be removed from previous rounds in such + // case, so rounds may possibly satisify 2*t+1 more than once. + // + // Example: P sends r+1 to remove from r, and sends to r again to trigger 2t+1 once + // more to reset timeout. + if round.NumRoundChanges() == 2*c.t()+1 && round.Stage < stageLock { + // switch to this round + c.switchRound(m.Round) + // record this round change proof for resyncing + c.lastRoundChangeProof = c.currentRound.SignedRoundChanges() + + // If Pj has not broadcasted the round-change message yet, + // it broadcasts now. + c.broadcastRoundChange() + + // leader of this round MUST wait on collectDuration, + // to decide to broadcast or message + err := c.verifySelectMessage(m, signed) + if err != nil { + return err + } + + // round will be increased monotonically + if m.Round > c.currentRound.RoundNumber { + c.switchRound(m.Round) + c.lastRoundChangeProof = []*SignedProto{signed} // record this proof for resyncing + } + + // for rounds r' >= r, we must check c.stage to stageLockRelease + // only once to prevent resetting lockReleaseTimeout or shifting c.cstage + if c.currentRound.Stage < stageLockRelease { + c.currentRound.Stage = stageLockRelease + c.lockReleaseTimeout = now.Add(c.commitDuration(m.Round)) + c.lockRelease() + // add to Blockj + c.Propose(m.State) + } + + case MessageType_Lock: + // verify message + err := c.verifyLockMessage(m, signed) + if err != nil { + return err + } + + // round will be increased monotonically + if m.Round > c.currentRound.RoundNumber { + c.switchRound(m.Round) + c.lastRoundChangeProof = []*SignedProto{signed} // record this proof for resyncing + } + + // for rounds r' >= r, we must check to enter commit status + // only once to prevent resetting commitTimeout or shifting c.cstage + if c.currentRound.Stage < stageCommit { + c.currentRound.Stage = stageCommit + c.commitTimeout = now.Add(c.commitDuration(m.Round)) + + mHash := c.stateHash(m.State) + // release any potential lock on B' in this round + // in-place deletion + o := 0 + for i := 0; i < len(c.locks); i++ { + if c.locks[i].StateHash != mHash { + c.locks[o] = c.locks[i] + o++ // o is the new length of c.locks + } + } + c.locks = c.locks[:o] + // append the new element + c.locks = append(c.locks, messageTuple{StateHash: mHash, Message: m, Signed: signed}) + } + + // for any incoming message with r=r', sendCommit will send + // once. + c.sendCommit(m) + + case MessageType_LockRelease: + // verifies the LockRelease field in message. + lockmsg, err := c.verifyLockReleaseMessage(m.LockRelease) + if err != nil { + return err + } + + // length of locks is 0, append and return. + if len(c.locks) == 0 { + c.locks = append(c.locks, messageTuple{StateHash: c.stateHash(lockmsg.State), Message: lockmsg, Signed: m.LockRelease}) + return nil + } + + // remove any locks if lockmsg.r > r' and keep lockmsg.r, + o := 0 + for i := 0; i < len(c.locks); i++ { + if !(lockmsg.Round > c.locks[i].Message.Round) { + // if the round of this lock is not larger than what we + // have kept, ignore and continue. + c.locks[o] = c.locks[i] + o++ + } + } + + // some locks have been removed if o is smaller than original locks length, + // then we keep this lock. + if o < len(c.locks) { + c.locks = c.locks[:o] + c.locks = append(c.locks, messageTuple{StateHash: c.stateHash(lockmsg.State), Message: lockmsg, Signed: m.LockRelease}) + } + + case MessageType_Commit: + // leader process commits message from all participants, + // check to see if I'm the leader of this round to process this message. + leaderKey := c.roundLeader(m.Round) + if leaderKey == c.identity { + // verify commit message. + // NOTE: leader only accept commits for current height & round. + err := c.verifyCommitMessage(m) + if err != nil { + return err + } + + // verifyCommitMessage can guarantee that the message is to currentRound, + // so we're safe to process in current round. + if c.currentRound.AddCommit(signed, m) { + // NOTE: we proceed the following only when AddCommit returns true. + // NumCommitted will only return commits with locked B' + // and ignore non-B' commits. + if c.currentRound.NumCommitted() >= 2*c.t()+1 { + /* + log.Println("======= LEADER'S DECIDE=====") + log.Println("Height:", c.currentHeight+1) + log.Println("Round:", c.currentRound.RoundNumber) + log.Println("State:", State(c.currentRound.LockedState).hash()) + */ + + // broadcast decide will return what it has sent + c.latestProof = c.broadcastDecide() + c.heightSync(c.latestHeight+1, c.currentRound.RoundNumber, c.currentRound.LockedState, now) + // leader should wait for 1 more latency + c.rcTimeout = now.Add(c.roundchangeDuration(0) + c.latency) + // broadcast at new height + c.broadcastRoundChange() + } + } + } + + case MessageType_Decide: + err := c.verifyDecideMessage(m, signed) + if err != nil { + return err + } + + // record this proof for chaining + c.latestProof = signed + + // propagate this message to my neighbour. + // NOTE: verifyDecideMessage() can stop broadcast storm. + c.propagate(bts) + // passive confirmation from the leader. + c.heightSync(m.Height, m.Round, m.State, now) + // non-leader starts waiting for rcTimeout + c.rcTimeout = now.Add(c.roundchangeDuration(0)) + // we sync our height and broadcast new . + c.broadcastRoundChange() + + case MessageType_Resync: + // push the proofs in loopback device + for k := range m.Proof { + // protobuf marshalling + out, err := proto.Marshal(m.Proof[k]) + if err != nil { + panic(err) + } + c.loopback = append(c.loopback, out) + } + + default: + return ErrMessageUnknownMessageType + } + return nil +} + +// Update will process timing event for the state machine, callers +// from outside MUST call this function periodically(like 20ms). +func (c *Consensus) Update(now time.Time) error { + // as in ReceiveMessage, we also need to handle broadcasting messages + // directed to myself. + defer func() { + for len(c.loopback) > 0 { + bts := c.loopback[0] + c.loopback = c.loopback[1:] + _ = c.receiveMessage(bts, now) + } + }() + + // stage switch + switch c.currentRound.Stage { + case stageRoundChanging: + if c.rcTimeout.IsZero() { + panic("roundchanging stage entered, but lockTimeout not set") + } + + if now.After(c.rcTimeout) { + c.broadcastRoundChange() + c.broadcastResync() // we also need to broadcast the round change event message if there is any + c.rcTimeout = now.Add(c.roundchangeDuration(c.currentRound.RoundNumber)) + } + case stageLock: + if c.lockTimeout.IsZero() { + panic("lock stage entered, but lockTimeout not set") + } + // leader's collection, we perform periodically check for or message to participants. + // enqueue all received non-NULL data + states := c.currentRound.RoundChangeStates() + for k := range states { + c.Propose(states[k]) + } + + // broadcast this related + ErrSelectStateValidation = errors.New("the state data validation failed message has another height than expected") + ErrSelectRoundLower = errors.New("the message is not signed by leader") + ErrSelectStateMismatch = errors.New("the message has unknown participant") + ErrSelectProofTypeMismatch = errors.New("the proofs in message has mismatched height") + ErrSelectProofRoundMismatch = errors.New("the proofs in message has invalid state data") + ErrSelectProofNotTheMaximal = errors.New("the proposed state is not the maximal one in the message has insufficient overall proofs") + ErrSelectProofExceeded = errors.New("the message + MessageType_Select MessageType = 3 + // MessageCommit = message + MessageType_Commit MessageType = 4 + // MessageLockRelease = message + MessageType_LockRelease MessageType = 5 + // MessageDecide = message + MessageType_Decide MessageType = 6 + // MessageResync= message + MessageType_Resync MessageType = 7 +) + +var MessageType_name = map[int32]string{ + 0: "Nop", + 1: "RoundChange", + 2: "Lock", + 3: "Select", + 4: "Commit", + 5: "LockRelease", + 6: "Decide", + 7: "Resync", +} + +var MessageType_value = map[string]int32{ + "Nop": 0, + "RoundChange": 1, + "Lock": 2, + "Select": 3, + "Commit": 4, + "LockRelease": 5, + "Decide": 6, + "Resync": 7, +} + +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} + +func (MessageType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0} +} + +// SignedProto defines a message with signature and it's publickey +type SignedProto struct { + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // the Message encoded raw protobuf in bytes + Message []byte `protobuf:"bytes,2,opt,name=Message,proto3" json:"Message,omitempty"` + // signer's public key + X PubKeyAxis `protobuf:"bytes,3,opt,name=x,proto3,customtype=PubKeyAxis" json:"x"` + Y PubKeyAxis `protobuf:"bytes,4,opt,name=y,proto3,customtype=PubKeyAxis" json:"y"` + // signature r,s for prefix+messages+version+x+y above + R []byte `protobuf:"bytes,5,opt,name=r,proto3" json:"r,omitempty"` + S []byte `protobuf:"bytes,6,opt,name=s,proto3" json:"s,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignedProto) Reset() { *m = SignedProto{} } +func (m *SignedProto) String() string { return proto.CompactTextString(m) } +func (*SignedProto) ProtoMessage() {} +func (*SignedProto) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0} +} +func (m *SignedProto) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedProto.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedProto.Merge(m, src) +} +func (m *SignedProto) XXX_Size() int { + return m.Size() +} +func (m *SignedProto) XXX_DiscardUnknown() { + xxx_messageInfo_SignedProto.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedProto proto.InternalMessageInfo + +func (m *SignedProto) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *SignedProto) GetMessage() []byte { + if m != nil { + return m.Message + } + return nil +} + +func (m *SignedProto) GetR() []byte { + if m != nil { + return m.R + } + return nil +} + +func (m *SignedProto) GetS() []byte { + if m != nil { + return m.S + } + return nil +} + +// Message defines a consensus message +type Message struct { + // Type of this message + Type MessageType `protobuf:"varint,1,opt,name=Type,proto3,enum=bdls.MessageType" json:"Type,omitempty"` + // Height in consensus + Height uint64 `protobuf:"varint,2,opt,name=Height,proto3" json:"Height,omitempty"` + // Round in consensus + Round uint64 `protobuf:"varint,3,opt,name=Round,proto3" json:"Round,omitempty"` + // Proposed state (optional) + State []byte `protobuf:"bytes,4,opt,name=State,proto3" json:"State,omitempty"` + // Proofs related + Proof []*SignedProto `protobuf:"bytes,5,rep,name=Proof,proto3" json:"Proof,omitempty"` + // for lock-release, it's an embeded message + LockRelease *SignedProto `protobuf:"bytes,6,opt,name=LockRelease,proto3" json:"LockRelease,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{1} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetType() MessageType { + if m != nil { + return m.Type + } + return MessageType_Nop +} + +func (m *Message) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Message) GetRound() uint64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *Message) GetState() []byte { + if m != nil { + return m.State + } + return nil +} + +func (m *Message) GetProof() []*SignedProto { + if m != nil { + return m.Proof + } + return nil +} + +func (m *Message) GetLockRelease() *SignedProto { + if m != nil { + return m.LockRelease + } + return nil +} + +func init() { + proto.RegisterEnum("bdls.MessageType", MessageType_name, MessageType_value) + proto.RegisterType((*SignedProto)(nil), "bdls.SignedProto") + proto.RegisterType((*Message)(nil), "bdls.Message") +} + +func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } + +var fileDescriptor_33c57e4bae7b9afd = []byte{ + // 381 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcd, 0xaa, 0xda, 0x40, + 0x14, 0xc7, 0x1d, 0xf3, 0x61, 0x39, 0xd1, 0x76, 0x3a, 0x94, 0x32, 0x74, 0xa1, 0x41, 0x28, 0x95, + 0x42, 0x23, 0xd4, 0x27, 0xa8, 0x76, 0x51, 0xe8, 0x07, 0x32, 0xf6, 0x05, 0xf2, 0x71, 0x8c, 0xa1, + 0x26, 0x23, 0x99, 0xa4, 0x98, 0xc7, 0xe9, 0xdb, 0xb8, 0x2c, 0x5d, 0x76, 0x21, 0x17, 0x9f, 0xe4, + 0x32, 0x13, 0xbd, 0x64, 0x71, 0xef, 0xee, 0xfc, 0xce, 0xff, 0x7f, 0xce, 0xf9, 0x27, 0x0c, 0x8c, + 0x72, 0x54, 0x2a, 0x4c, 0x31, 0x38, 0x94, 0xb2, 0x92, 0xcc, 0x8e, 0x92, 0xbd, 0x7a, 0xf3, 0x21, + 0xcd, 0xaa, 0x5d, 0x1d, 0x05, 0xb1, 0xcc, 0xe7, 0xa9, 0x4c, 0xe5, 0xdc, 0x88, 0x51, 0xbd, 0x35, + 0x64, 0xc0, 0x54, 0xed, 0xd0, 0xf4, 0x0f, 0x01, 0x6f, 0x93, 0xa5, 0x05, 0x26, 0x6b, 0xb3, 0x84, + 0xc3, 0xe0, 0x37, 0x96, 0x2a, 0x93, 0x05, 0x27, 0x3e, 0x99, 0x8d, 0xc4, 0x0d, 0xb5, 0xf2, 0xbd, + 0xbd, 0xc7, 0xfb, 0x3e, 0x99, 0x0d, 0xc5, 0x0d, 0x99, 0x0f, 0xe4, 0xc8, 0x2d, 0xdd, 0x5b, 0xb2, + 0xd3, 0x79, 0xd2, 0xfb, 0x7f, 0x9e, 0xc0, 0xba, 0x8e, 0xbe, 0x62, 0xf3, 0xe9, 0x98, 0x29, 0x41, + 0x8e, 0xda, 0xd1, 0x70, 0xfb, 0x69, 0x47, 0xc3, 0x86, 0x40, 0x4a, 0xee, 0x98, 0xbd, 0xa4, 0xd4, + 0xa4, 0xb8, 0xdb, 0x92, 0x9a, 0xfe, 0x23, 0x0f, 0xa7, 0xd9, 0x5b, 0xb0, 0x7f, 0x36, 0x07, 0x34, + 0xe1, 0x9e, 0x7f, 0x7c, 0x19, 0xe8, 0x6f, 0x0e, 0xae, 0xa2, 0x16, 0x84, 0x91, 0xd9, 0x6b, 0x70, + 0xbf, 0x60, 0x96, 0xee, 0x2a, 0x93, 0xd5, 0x16, 0x57, 0x62, 0xaf, 0xc0, 0x11, 0xb2, 0x2e, 0x12, + 0x13, 0xd7, 0x16, 0x2d, 0xe8, 0xee, 0xa6, 0x0a, 0x2b, 0x6c, 0x23, 0x8a, 0x16, 0xd8, 0x3b, 0x70, + 0xd6, 0xa5, 0x94, 0x5b, 0xee, 0xf8, 0xd6, 0xcc, 0xbb, 0xdd, 0xea, 0xfc, 0x2c, 0xd1, 0xea, 0x6c, + 0x01, 0xde, 0x37, 0x19, 0xff, 0x12, 0xb8, 0xc7, 0x50, 0xa1, 0xc9, 0xfd, 0xa8, 0xbd, 0xeb, 0x7a, + 0x5f, 0x82, 0xd7, 0x89, 0xcd, 0x06, 0x60, 0xfd, 0x90, 0x07, 0xda, 0x63, 0x2f, 0xc0, 0x33, 0xa1, + 0x56, 0xbb, 0xb0, 0x48, 0x91, 0x12, 0xf6, 0x0c, 0x6c, 0x3d, 0x47, 0xfb, 0x0c, 0xc0, 0xdd, 0xe0, + 0x1e, 0xe3, 0x8a, 0x5a, 0xba, 0x5e, 0xc9, 0x3c, 0xcf, 0x2a, 0x6a, 0xeb, 0x91, 0xce, 0x66, 0xea, + 0x68, 0xf1, 0x33, 0xc6, 0x59, 0x82, 0xd4, 0xd5, 0xb5, 0x40, 0xd5, 0x14, 0x31, 0x1d, 0x2c, 0x87, + 0xa7, 0xcb, 0x98, 0xfc, 0xbd, 0x8c, 0xc9, 0xdd, 0x65, 0x4c, 0x22, 0xd7, 0xbc, 0x80, 0xc5, 0x7d, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x43, 0xc3, 0x64, 0x6c, 0x47, 0x02, 0x00, 0x00, +} + +func (m *SignedProto) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedProto) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedProto) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.S) > 0 { + i -= len(m.S) + copy(dAtA[i:], m.S) + i = encodeVarintMessage(dAtA, i, uint64(len(m.S))) + i-- + dAtA[i] = 0x32 + } + if len(m.R) > 0 { + i -= len(m.R) + copy(dAtA[i:], m.R) + i = encodeVarintMessage(dAtA, i, uint64(len(m.R))) + i-- + dAtA[i] = 0x2a + } + { + size := m.Y.Size() + i -= size + if _, err := m.Y.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size := m.X.Size() + i -= size + if _, err := m.X.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.Version != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.LockRelease != nil { + { + size, err := m.LockRelease.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Proof) > 0 { + for iNdEx := len(m.Proof) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Proof[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintMessage(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + } + if m.Round != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x18 + } + if m.Height != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { + offset -= sovMessage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SignedProto) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + sovMessage(uint64(m.Version)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + l = m.X.Size() + n += 1 + l + sovMessage(uint64(l)) + l = m.Y.Size() + n += 1 + l + sovMessage(uint64(l)) + l = len(m.R) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + l = len(m.S) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovMessage(uint64(m.Type)) + } + if m.Height != 0 { + n += 1 + sovMessage(uint64(m.Height)) + } + if m.Round != 0 { + n += 1 + sovMessage(uint64(m.Round)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + if len(m.Proof) > 0 { + for _, e := range m.Proof { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.LockRelease != nil { + l = m.LockRelease.Size() + n += 1 + l + sovMessage(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMessage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessage(x uint64) (n int) { + return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SignedProto) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedProto: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedProto: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = append(m.Message[:0], dAtA[iNdEx:postIndex]...) + if m.Message == nil { + m.Message = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field X", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.X.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Y", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Y.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field R", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.R = append(m.R[:0], dAtA[iNdEx:postIndex]...) + if m.R == nil { + m.R = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.S = append(m.S[:0], dAtA[iNdEx:postIndex]...) + if m.S == nil { + m.S = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MessageType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = append(m.State[:0], dAtA[iNdEx:postIndex]...) + if m.State == nil { + m.State = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proof = append(m.Proof, &SignedProto{}) + if err := m.Proof[len(m.Proof)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LockRelease", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LockRelease == nil { + m.LockRelease = &SignedProto{} + } + if err := m.LockRelease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/BDLS-bft/bdls/message.proto b/vendor/github.com/BDLS-bft/bdls/message.proto new file mode 100644 index 00000000000..fa65dcd248e --- /dev/null +++ b/vendor/github.com/BDLS-bft/bdls/message.proto @@ -0,0 +1,54 @@ + +syntax = "proto3"; +package bdls; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +// SignedProto defines a message with signature and it's publickey +message SignedProto { + uint32 version=1; + // the Message encoded raw protobuf in bytes + bytes Message=2; + // signer's public key + bytes x = 3 [(gogoproto.customtype) = "PubKeyAxis", (gogoproto.nullable) = false]; + bytes y = 4 [(gogoproto.customtype) = "PubKeyAxis", (gogoproto.nullable) = false]; + // signature r,s for prefix+messages+version+x+y above + bytes r = 5; + bytes s = 6; +} + +// MessageType defines supported message types +enum MessageType{ + // No operation, for default message type, and keepalive connection + Nop = 0; + // MessageRoundChange = message + RoundChange = 1; + // MessageLock = message + Lock = 2; + // MessageSelect =