diff --git a/buildkite/scripts/export-git-env-vars.sh b/buildkite/scripts/export-git-env-vars.sh index 5e628e03e7a..a21c3389406 100755 --- a/buildkite/scripts/export-git-env-vars.sh +++ b/buildkite/scripts/export-git-env-vars.sh @@ -27,6 +27,7 @@ set -u export MINA_DEB_CODENAME=${MINA_DEB_CODENAME:=bullseye} [[ -n "$BUILDKITE_BRANCH" ]] && export GITBRANCH=$(echo "$BUILDKITE_BRANCH" | sed 's!/!-!g; s!_!-!g; s!#!-!g') +export RELEASE=unstable if [ "${BUILDKITE_REPO}" != "${MINA_REPO}" ]; then # Abort if `BUILDKITE_REPO` doesn't have the expected format @@ -41,47 +42,10 @@ if [ "${BUILDKITE_REPO}" != "${MINA_REPO}" ]; then # For example: for given repo 'https://github.com/dkijania/mina.git' we convert it to 'dkijania_mina' export GITTAG=1.0.0$(echo ${BUILDKITE_REPO} | sed -e 's/^.*github.com[:\/]\(.*\)\.git$/\1/' -e 's/\//-/') export THIS_COMMIT_TAG="" - RELEASE=unstable else # GITTAG is the closest tagged commit to this commit, while THIS_COMMIT_TAG only has a value when the current commit is tagged export GITTAG=$(find_most_recent_numeric_tag HEAD) - - # Determine deb repo to use - case $GITBRANCH in - berkeley|rampup|compatible|master|release/*) # whitelist of branches that can be tagged - case "${THIS_COMMIT_TAG}" in - *alpha*) # any tag including the string `alpha` - RELEASE=alpha ;; - *beta*) # any tag including the string `beta` - RELEASE=beta ;; - *berkeley*) # any tag including the string `berkeley` - RELEASE=berkeley ;; - *rampup*) # any tag including the string `rampup` - RELEASE=rampup ;; - *devnet*) - RELEASE=devnet ;; - ?*) - # if the tag is a version number sans any suffix, then it's a stable release - if grep -qP '^\d+\.\d+\.\d+$' <<< "${THIS_COMMIT_TAG}"; then - RELEASE=stable - else - RELEASE=unstable - fi ;; - "") # No tag - RELEASE="unstable" ;; - # real soon now: - # RELEASE="${GITHASH}" ;; # avoids deb-s3 concurrency issues between PRs - *) # The above set of cases should be exhaustive, if they're not then still set RELEASE=unstable - RELEASE=unstable - echo "git tag --points-at HEAD may have failed, falling back to unstable. Value: \"$(git tag --points-at HEAD)\"" - ;; - esac ;; - release-automation-testing/*) # whitelist of branches that can be tagged - RELEASE=prerelease ;; - *) - RELEASE=unstable ;; - esac fi if [[ -n "${THIS_COMMIT_TAG}" ]]; then # If the commit is tagged @@ -100,6 +64,6 @@ case $GITBRANCH in MINA_BUILD_MAINNET=false ;; esac -echo "Publishing on release channel \"${RELEASE}\" based on branch \"${GITBRANCH}\" and tag \"${THIS_COMMIT_TAG}\"" +echo "Publishing on release channel \"${RELEASE}\"" [[ -n ${THIS_COMMIT_TAG} ]] && export MINA_COMMIT_TAG="${THIS_COMMIT_TAG}" export MINA_DEB_RELEASE="${RELEASE}" diff --git a/dockerfiles/scripts/daemon-entrypoint.sh b/dockerfiles/scripts/daemon-entrypoint.sh index a812cd1ace8..c1cb56fb30e 100755 --- a/dockerfiles/scripts/daemon-entrypoint.sh +++ b/dockerfiles/scripts/daemon-entrypoint.sh @@ -12,7 +12,7 @@ INPUT_ARGS="$@" declare -a VERBOSE_LOG_FILES=('mina-stderr.log' '.mina-config/mina-prover.log' '.mina-config/mina-verifier.log') # Attempt to execute or source custom entrypoint scripts accordingly -for script in /entrypoint.d/* /entrypoint.d/.*; do +for script in /entrypoint.d/*; do if [[ "$( basename "${script}")" == *mina-env ]]; then source "${script}" elif [[ -f "${script}" ]] && [[ ! -x "${script}" ]]; then diff --git a/scripts/archive/migration/mina-berkeley-migration-script b/scripts/archive/migration/mina-berkeley-migration-script index 44ee9f20751..a0180715bb6 100755 --- a/scripts/archive/migration/mina-berkeley-migration-script +++ b/scripts/archive/migration/mina-berkeley-migration-script @@ -20,6 +20,7 @@ declare -r CLI_NAME="$0"; declare -r PS4='debug($LINENO) ${FUNCNAME[0]:+${FUNCNAME[0]}}(): '; CHECKPOINT_PREFIX=migration +CHECKPOINT_INTERVAL=1000 ################################################################################ # functions @@ -87,6 +88,7 @@ function initial_help(){ printf " %-25s %s\n" "-n | --network" "[string] network name when determining precomputed blocks. NOTICE: there is an assumption that precomputed blocks are named with format: {network}-{height}-{state_hash}.json"; printf " %-25s %s\n" "-d | --delete-blocks" "[flag] delete blocks after they are processed (saves space with -sb)" printf " %-25s %s\n" "-p | --prefetch-blocks" "[flag] downloads all blocks at once instead of incrementally" + printf " %-25s %s\n" "-i | --checkpoint-interval" "[int] replayer checkpoint interval. Default: 1000" printf " %-25s %s\n" "-c | --checkpoint-output-path" "[file] output folder for replayer checkpoints" printf " %-25s %s\n" "-l | --precomputed-blocks-local-path" "[file] on-disk precomputed blocks location" echo "" @@ -119,7 +121,8 @@ function initial(){ local __network='' local __checkpoint_output_path='.' local __precomputed_blocks_local_path='.' - + local __checkpoint_interval=$CHECKPOINT_INTERVAL + while [ ${#} -gt 0 ]; do error_message="Error: a value is needed for '$1'"; case $1 in @@ -162,6 +165,10 @@ function initial(){ __checkpoint_output_path=${2:?$error_message} shift 2; ;; + -i | --checkpoint-interval ) + __checkpoint_interval=${2:?$error_message} + shift 2; + ;; -l | --precomputed-blocks-local-path ) __precomputed_blocks_local_path=${2:?$error_message} shift 2; @@ -217,7 +224,8 @@ function initial(){ "$__stream_blocks" \ "$__network" \ "$__checkpoint_output_path" \ - "$__precomputed_blocks_local_path" + "$__precomputed_blocks_local_path" \ + "$__checkpoint_interval" } function check_log_for_error() { @@ -263,6 +271,7 @@ function run_initial_migration() { local __network=$8 local __checkpoint_output_path=$9 local __precomputed_blocks_local_path=${10} + local __checkpoint_interval=${11} local __date=$(date '+%Y-%m-%d_%H_%M_%S') local __berkely_migration_log="berkeley_migration_$__date.log" @@ -295,7 +304,7 @@ function run_initial_migration() { --migration-mode \ --archive-uri "$__migrated_archive_uri" \ --input-file "$__config_file" \ - --checkpoint-interval 1000 \ + --checkpoint-interval "$__checkpoint_interval" \ --checkpoint-file-prefix "$CHECKPOINT_PREFIX" \ --checkpoint-output-folder "$__checkpoint_output_path" \ --log-file "$__replayer_log" @@ -331,6 +340,7 @@ function incremental_help(){ printf " %-25s %s\n" "-d | --delete-blocks" "delete blocks after they are processed (saves space with -sb)" printf " %-25s %s\n" "-p | --prefetch-blocks" "downloads all blocks at once instead of incrementally" printf " %-25s %s\n" "-c | --checkpoint-output-path" "[file] output folder for replayer checkpoints" + printf " %-25s %s\n" "-i | --checkpoint-interval" "[int] replayer checkpoint interval. Default: 1000" printf " %-25s %s\n" "-l | --precomputed-blocks-local-path" "[file] on-disk precomputed blocks location" echo "" echo "Example:" @@ -388,7 +398,7 @@ function incremental(){ local __keep_precomputed_blocks=true local __stream_blocks=true local __network='' - local __checkpoint_interval=1000 + local __checkpoint_interval=$CHECKPOINT_INTERVAL local __checkpoint_output_path='.' local __precomputed_blocks_local_path='.' @@ -584,6 +594,7 @@ function final_help(){ printf " %-25s %s\n" "-d | --delete-blocks" "delete blocks after they are processed (saves space with -sb)" printf " %-25s %s\n" "-p | --prefetch-blocks" "downloads all blocks at once instead of incrementally" printf " %-25s %s\n" "-c | --checkpoint-output-path" "[file] output folder for replayer checkpoints" + printf " %-25s %s\n" "-i | --checkpoint-interval" "[int] replayer checkpoint interval. Default: 1000" printf " %-25s %s\n" "-l | --precomputed-blocks-local-path" "[file] on-disk precomputed blocks location" echo "" echo "Example:" @@ -612,7 +623,7 @@ function final(){ local __keep_precomputed_blocks=true local __stream_blocks=true local __network='' - local __checkpoint_interval=1000 + local __checkpoint_interval=$CHECKPOINT_INTERVAL local __fork_genesis_config='' local __checkpoint_output_path='.' local __precomputed_blocks_local_path='.' diff --git a/scripts/release-docker.sh b/scripts/release-docker.sh index 197057964e5..e3136b65552 100755 --- a/scripts/release-docker.sh +++ b/scripts/release-docker.sh @@ -175,15 +175,4 @@ if [[ -z "$NOUPLOAD" ]] || [[ "$NOUPLOAD" -eq 0 ]]; then docker tag "${TAG}" "${HASHTAG}" docker push "${HASHTAG}" - echo "Release Env Var: ${DEB_RELEASE}" - echo "Release: ${DEB_RELEASE##*=}" - - if [[ "${DEB_RELEASE##*=}" = "unstable" ]]; then - echo "Release is unstable: not pushing to docker hub" - else - echo "Release is public (alpha, beta, berkeley, or stable): pushing image to docker hub" - # tag and push to dockerhub - docker tag "${TAG}" "minaprotocol/${SERVICE}:${VERSION}" - docker push "minaprotocol/${SERVICE}:${VERSION}" - fi fi diff --git a/src/app/berkeley_migration/berkeley_migration.ml b/src/app/berkeley_migration/berkeley_migration.ml index e8377480cea..ddabb420f2f 100644 --- a/src/app/berkeley_migration/berkeley_migration.ml +++ b/src/app/berkeley_migration/berkeley_migration.ml @@ -507,70 +507,54 @@ let main ~mainnet_archive_uri ~migrated_archive_uri ~runtime_config_file [%log info] "Prefetching all required precomputed blocks" ; fetch_precomputed_blocks_for mainnet_blocks_to_migrate ) in - (* 3 * because we want to report download, migration, and reupload separately *) - let total_reports_expected = 3 * List.length mainnet_blocks_to_migrate in + [%log info] "Migrating mainnet blocks" ; let%bind () = - Progress.with_reporter - Progress.Line.( - list - [ const "Migrating blocks" - ; spinner () - ; bar total_reports_expected - ; eta total_reports_expected - ; percentage_of total_reports_expected - ]) - (fun f -> - List.chunks_of ~length:batch_size mainnet_blocks_to_migrate - |> Deferred.List.iter ~f:(fun (blocks : Sql.Mainnet.Block.t list) -> - [%log debug] - "Migrating %d blocks starting at height %Ld (%s..%s)" - (List.length blocks) (List.hd_exn blocks).height - (List.hd_exn blocks).state_hash - (List.last_exn blocks).state_hash ; - let%bind precomputed_blocks = - if stream_precomputed_blocks then ( - [%log debug] "Fetching batch of precomputed blocks" ; - fetch_precomputed_blocks_for blocks ) - else return prefetched_precomputed_blocks - in - f (Map.length precomputed_blocks) ; - [%log debug] "Converting blocks to extensional format..." ; - let%bind extensional_blocks = - mainnet_block_to_extensional_batch ~logger ~mainnet_pool - ~genesis_block ~precomputed_blocks blocks - in - f (List.length extensional_blocks) ; - [%log debug] "Adding blocks to migrated database..." ; - let%bind () = - query_migrated_db ~f:(fun db -> - match%map - Archive_lib.Processor.Block - .add_from_extensional_batch db extensional_blocks - ~v1_transaction_hash:true - with - | Ok _id -> - f (List.length extensional_blocks) ; - Ok () - | Error (`Congested _) -> - failwith - "Could not archive extensional block batch: \ - congested" - | Error (`Decode_rejected _ as err) - | Error (`Encode_failed _ as err) - | Error (`Encode_rejected _ as err) - | Error (`Request_failed _ as err) - | Error (`Request_rejected _ as err) - | Error (`Response_failed _ as err) - | Error (`Response_rejected _ as err) -> - failwithf - "Could not archive extensional block batch: %s" - (Caqti_error.show err) () ) - in - if stream_precomputed_blocks && not keep_precomputed_blocks - then - Precomputed_block.delete_fetched_concrete ~network - (required_precomputed_blocks blocks) - else return () ) ) + List.chunks_of ~length:batch_size mainnet_blocks_to_migrate + |> Deferred.List.iter ~f:(fun (blocks : Sql.Mainnet.Block.t list) -> + [%log info] "Migrating %d blocks starting at height %Ld (%s..%s)" + (List.length blocks) (List.hd_exn blocks).height + (List.hd_exn blocks).state_hash + (List.last_exn blocks).state_hash ; + let%bind precomputed_blocks = + if stream_precomputed_blocks then ( + [%log info] "Fetching batch of precomputed blocks" ; + fetch_precomputed_blocks_for blocks ) + else return prefetched_precomputed_blocks + in + [%log info] "Converting blocks to extensional format..." ; + let%bind extensional_blocks = + mainnet_block_to_extensional_batch ~logger ~mainnet_pool + ~genesis_block ~precomputed_blocks blocks + in + [%log info] "Adding blocks to migrated database..." ; + let%bind () = + query_migrated_db ~f:(fun db -> + match%map + Archive_lib.Processor.Block.add_from_extensional_batch db + extensional_blocks ~v1_transaction_hash:true + with + | Ok _id -> + Ok () + | Error (`Congested _) -> + failwith + "Could not archive extensional block batch: \ + congested" + | Error (`Decode_rejected _ as err) + | Error (`Encode_failed _ as err) + | Error (`Encode_rejected _ as err) + | Error (`Request_failed _ as err) + | Error (`Request_rejected _ as err) + | Error (`Response_failed _ as err) + | Error (`Response_rejected _ as err) -> + failwithf + "Could not archive extensional block batch: %s" + (Caqti_error.show err) () ) + in + if stream_precomputed_blocks && not keep_precomputed_blocks then + Precomputed_block.delete_fetched_concrete ~network + ~local_path:precomputed_blocks_local_path + (required_precomputed_blocks blocks) + else return () ) in let%bind () = (* this will still run even if we are downloading precomputed blocks in batches, to handle any leftover blocks from prior runs *) @@ -643,12 +627,15 @@ let () = and precomputed_blocks_local_path = Param.flag "--precomputed-blocks-local-path" ~aliases:[ "-precomputed-blocks-local-path" ] - Param.(required string) + Param.(optional string) ~doc:"PATH the precomputed blocks on-disk location" and log_json = Cli_lib.Flag.Log.json and log_level = Cli_lib.Flag.Log.level and file_log_level = Cli_lib.Flag.Log.file_log_level and log_filename = Cli_lib.Flag.Log.file in + let precomputed_blocks_local_path = + Option.value precomputed_blocks_local_path ~default:"." + in main ~mainnet_archive_uri ~migrated_archive_uri ~runtime_config_file ~fork_state_hash ~mina_network_blocks_bucket ~batch_size ~network ~stream_precomputed_blocks ~keep_precomputed_blocks ~log_json diff --git a/src/app/berkeley_migration/precomputed_block.ml b/src/app/berkeley_migration/precomputed_block.ml index 3749b607bcd..bd9a88dd33c 100644 --- a/src/app/berkeley_migration/precomputed_block.ml +++ b/src/app/berkeley_migration/precomputed_block.ml @@ -219,7 +219,8 @@ let concrete_fetch_batch ~logger ~bucket ~network targets ~local_path = else let gsutil_input = String.concat ~sep:"\n" block_uris_to_download in let gsutil_process = - Process.run ~prog:"gsutil" ~args:[ "-m"; "cp"; "-I"; "." ] + Process.run ~prog:"gsutil" + ~args:[ "-m"; "cp"; "-I"; local_path ] ~stdin:gsutil_input () in don't_wait_for @@ -248,21 +249,18 @@ let concrete_fetch_batch ~logger ~bucket ~network targets ~local_path = let file_throttle = Throttle.create ~continue_on_error:false ~max_concurrent_jobs:100 in - - Progress.with_reporter - (bar ~data:`Sum ~total:(List.length targets) "Parsing blocks for metadata") - (fun progress -> - Deferred.List.map targets ~how:`Parallel ~f:(fun target -> - let _, state_hash = target in - let%map contents = - Throttle.enqueue file_throttle (fun () -> - Reader.file_contents (Id.filename ~network target) ) - in - let block = of_yojson (Yojson.Safe.from_string contents) in - progress 1 ; (state_hash, block) ) ) + Deferred.List.map targets ~how:`Parallel ~f:(fun target -> + let _, state_hash = target in + let%map contents = + Throttle.enqueue file_throttle (fun () -> + Reader.file_contents + (sprintf "%s/%s" local_path (Id.filename ~network target)) ) + in + let block = of_yojson (Yojson.Safe.from_string contents) in + (state_hash, block) ) >>| Mina_base.State_hash.Map.of_alist_exn -let delete_fetched_concrete ~network targets : unit Deferred.t = +let delete_fetched_concrete ~local_path ~network targets : unit Deferred.t = (* not perfect, but this is a reasonably portable default *) let max_args_size = (*16kb*) 16 * 1024 in (* break a list up into chunks using a fold operation *) @@ -284,25 +282,21 @@ let delete_fetched_concrete ~network targets : unit Deferred.t = in List.rev (loop list init [] []) in - let batches = - List.map targets ~f:(Id.filename ~network) - |> chunk_using ~init:0 ~f:(fun accumulated_size block_id -> - let arg_size = String.length block_id in - let size_with_new_arg = accumulated_size + String.length block_id in - if size_with_new_arg > max_args_size then `Emit arg_size - else `Accumulate size_with_new_arg ) - in - Progress.with_reporter - (bar ~data:`Sum ~total:(List.length targets) "Removing blocks") - (fun progress -> - Deferred.List.iter batches ~f:(fun files -> - match%map Process.run ~prog:"rm" ~args:files () with - | Ok _ -> - progress (List.length files) - | Error err -> - failwithf "Could not delete fetched precomputed blocks, error %s" - (Error.to_string_hum err) () ) ) + List.map targets ~f:(fun target -> + sprintf "%s/%s" local_path (Id.filename ~network target) ) + |> chunk_using ~init:0 ~f:(fun accumulated_size block_id -> + let arg_size = String.length block_id in + let size_with_new_arg = accumulated_size + String.length block_id in + if size_with_new_arg > max_args_size then `Emit arg_size + else `Accumulate size_with_new_arg ) + |> Deferred.List.iter ~f:(fun files -> + match%map Process.run ~prog:"rm" ~args:files () with + | Ok _ -> + () + | Error err -> + failwithf "Could not delete fetched precomputed blocks, error %s" + (Error.to_string_hum err) () ) let delete_fetched ~network ~path : unit Deferred.t = let%bind block_ids = list_directory ~network ~path in - delete_fetched_concrete ~network (Set.to_list block_ids) + delete_fetched_concrete ~local_path:path ~network (Set.to_list block_ids) diff --git a/src/lib/exit_handlers/exit_handlers.ml b/src/lib/exit_handlers/exit_handlers.ml index eb9135f4197..98d9ee1db5b 100644 --- a/src/lib/exit_handlers/exit_handlers.ml +++ b/src/lib/exit_handlers/exit_handlers.ml @@ -25,7 +25,7 @@ let register_handler ~logger ~description (f : unit -> unit) = (* register a Deferred.t thunk to be called at Async shutdown; log registration and execution *) let register_async_shutdown_handler ~logger ~description (f : unit -> unit Deferred.t) = - [%log info] "Registering async shutdown handler: $description" + [%log debug] "Registering async shutdown handler: $description" ~metadata:[ ("description", `String description) ] ; let logging_thunk () = [%log info] "Running async shutdown handler: $description" diff --git a/src/lib/mina_graphql/mina_graphql.ml b/src/lib/mina_graphql/mina_graphql.ml index 9ce4204ae4c..3a450cb849a 100644 --- a/src/lib/mina_graphql/mina_graphql.ml +++ b/src/lib/mina_graphql/mina_graphql.ml @@ -2382,27 +2382,12 @@ module Queries = struct Deferred.Result.fail "Daemon is bootstrapping" | `Active breadcrumb -> ( let txn_stop_slot_opt = - match runtime_config.daemon with - | Some daemon -> - daemon.slot_tx_end - | None -> - None + Runtime_config.slot_tx_end_or_default runtime_config in match txn_stop_slot_opt with | None -> return breadcrumb - | Some txn_stop_slot -> - (* NB: Here we use the correct notion of the stop slot: we - want to stop at an offset from genesis. This is - inconsistent with the uses across the rest of the code - -- the stop slot is being used as since hard-fork - instead, which is the incorrect version -- but I refuse - to propagate that error to here. - *) - let stop_slot = - Mina_numbers.Global_slot_since_genesis.of_int - txn_stop_slot - in + | Some stop_slot -> let rec find_block_older_than_stop_slot breadcrumb = let protocol_state = Transition_frontier.Breadcrumb.protocol_state @@ -2411,11 +2396,10 @@ module Queries = struct let global_slot = Mina_state.Protocol_state.consensus_state protocol_state - |> Consensus.Data.Consensus_state - .global_slot_since_genesis + |> Consensus.Data.Consensus_state.curr_global_slot in if - Mina_numbers.Global_slot_since_genesis.( < ) + Mina_numbers.Global_slot_since_hard_fork.( < ) global_slot stop_slot then return breadcrumb else diff --git a/src/lib/mina_lib/conf_dir.ml b/src/lib/mina_lib/conf_dir.ml index 5966337ae73..a03b400841b 100644 --- a/src/lib/mina_lib/conf_dir.ml +++ b/src/lib/mina_lib/conf_dir.ml @@ -18,7 +18,7 @@ let check_and_set_lockfile ~logger conf_dir = return (Writer.writef writer "%d\n" (Pid.to_int pid)) ) ) with | Ok () -> - [%log info] "Created daemon lockfile $lockfile" + [%log debug] "Created daemon lockfile $lockfile" ~metadata:[ ("lockfile", `String lockfile) ] ; Exit_handlers.register_async_shutdown_handler ~logger ~description:"Remove daemon lockfile" (fun () ->